patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -239,7 +239,7 @@ class RefactoringChecker(checkers.BaseTokenChecker):
"with statement assignment and exception handler assignment.",
),
"R1705": (
- 'Unnecessary "%s" after "return"',
+ 'Unnecessary "%s" after "return" remove it and de-indent all the code inside it',
"no-else-return",
"Used in order to highlight an unnecessary block of "
"code following an if containing a return statement. " | 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import collections
import copy
import itertools
import tokenize
from functools import reduce
from typing import Dict, Iterator, List, NamedTuple, Optional, Tuple, Union
import astroid
from astroid import nodes
from astroid.util import Uninferable
from pylint import checkers, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import node_frame_class
KNOWN_INFINITE_ITERATORS = {"itertools.count"}
BUILTIN_EXIT_FUNCS = frozenset(("quit", "exit"))
CALLS_THAT_COULD_BE_REPLACED_BY_WITH = frozenset(
(
"threading.lock.acquire",
"threading._RLock.acquire",
"threading.Semaphore.acquire",
"multiprocessing.managers.BaseManager.start",
"multiprocessing.managers.SyncManager.start",
)
)
CALLS_RETURNING_CONTEXT_MANAGERS = frozenset(
(
"_io.open", # regular 'open()' call
"codecs.open",
"urllib.request.urlopen",
"tempfile.NamedTemporaryFile",
"tempfile.SpooledTemporaryFile",
"tempfile.TemporaryDirectory",
"tempfile.TemporaryFile",
"zipfile.ZipFile",
"zipfile.PyZipFile",
"zipfile.ZipFile.open",
"zipfile.PyZipFile.open",
"tarfile.TarFile",
"tarfile.TarFile.open",
"multiprocessing.context.BaseContext.Pool",
"subprocess.Popen",
)
)
def _if_statement_is_always_returning(if_node, returning_node_class) -> bool:
return any(isinstance(node, returning_node_class) for node in if_node.body)
def _is_trailing_comma(tokens: List[tokenize.TokenInfo], index: int) -> bool:
"""Check if the given token is a trailing comma
:param tokens: Sequence of modules tokens
:type tokens: list[tokenize.TokenInfo]
:param int index: Index of token under check in tokens
:returns: True if the token is a comma which trails an expression
:rtype: bool
"""
token = tokens[index]
if token.exact_type != tokenize.COMMA:
return False
# Must have remaining tokens on the same line such as NEWLINE
left_tokens = list(itertools.islice(tokens, index + 1, None))
more_tokens_on_line = False
for remaining_token in left_tokens:
if remaining_token.start[0] == token.start[0]:
more_tokens_on_line = True
# If one of the remaining same line tokens is not NEWLINE or COMMENT
# the comma is not trailing.
if remaining_token.type not in (tokenize.NEWLINE, tokenize.COMMENT):
return False
if not more_tokens_on_line:
return False
def get_curline_index_start():
"""Get the index denoting the start of the current line"""
for subindex, token in enumerate(reversed(tokens[:index])):
# See Lib/tokenize.py and Lib/token.py in cpython for more info
if token.type == tokenize.NEWLINE:
return index - subindex
return 0
curline_start = get_curline_index_start()
expected_tokens = {"return", "yield"}
return any(
"=" in prevtoken.string or prevtoken.string in expected_tokens
for prevtoken in tokens[curline_start:index]
)
def _is_inside_context_manager(node: nodes.Call) -> bool:
frame = node.frame(future=True)
if not isinstance(
frame, (nodes.FunctionDef, astroid.BoundMethod, astroid.UnboundMethod)
):
return False
return frame.name == "__enter__" or utils.decorated_with(
frame, "contextlib.contextmanager"
)
def _is_a_return_statement(node: nodes.Call) -> bool:
frame = node.frame(future=True)
for parent in node.node_ancestors():
if parent is frame:
break
if isinstance(parent, nodes.Return):
return True
return False
def _is_part_of_with_items(node: nodes.Call) -> bool:
"""Checks if one of the node's parents is a ``nodes.With`` node and that the node itself is located
somewhere under its ``items``.
"""
frame = node.frame(future=True)
current = node
while current != frame:
if isinstance(current, nodes.With):
items_start = current.items[0][0].lineno
items_end = current.items[-1][0].tolineno
return items_start <= node.lineno <= items_end
current = current.parent
return False
def _will_be_released_automatically(node: nodes.Call) -> bool:
"""Checks if a call that could be used in a ``with`` statement is used in an alternative
construct which would ensure that its __exit__ method is called.
"""
callables_taking_care_of_exit = frozenset(
(
"contextlib._BaseExitStack.enter_context",
"contextlib.ExitStack.enter_context", # necessary for Python 3.6 compatibility
)
)
if not isinstance(node.parent, nodes.Call):
return False
func = utils.safe_infer(node.parent.func)
if not func:
return False
return func.qname() in callables_taking_care_of_exit
class ConsiderUsingWithStack(NamedTuple):
"""Stack for objects that may potentially trigger a R1732 message
if they are not used in a ``with`` block later on.
"""
module_scope: Dict[str, nodes.NodeNG] = {}
class_scope: Dict[str, nodes.NodeNG] = {}
function_scope: Dict[str, nodes.NodeNG] = {}
def __iter__(self) -> Iterator[Dict[str, nodes.NodeNG]]:
yield from (self.function_scope, self.class_scope, self.module_scope)
def get_stack_for_frame(
self, frame: Union[nodes.FunctionDef, nodes.ClassDef, nodes.Module]
):
"""Get the stack corresponding to the scope of the given frame."""
if isinstance(frame, nodes.FunctionDef):
return self.function_scope
if isinstance(frame, nodes.ClassDef):
return self.class_scope
return self.module_scope
def clear_all(self) -> None:
"""Convenience method to clear all stacks"""
for stack in self:
stack.clear()
class RefactoringChecker(checkers.BaseTokenChecker):
"""Looks for code which can be refactored
This checker also mixes the astroid and the token approaches
in order to create knowledge about whether an "else if" node
is a true "else if" node, or an "elif" node.
"""
__implements__ = (interfaces.ITokenChecker, interfaces.IAstroidChecker)
name = "refactoring"
msgs = {
"R1701": (
"Consider merging these isinstance calls to isinstance(%s, (%s))",
"consider-merging-isinstance",
"Used when multiple consecutive isinstance calls can be merged into one.",
),
"R1706": (
"Consider using ternary (%s)",
"consider-using-ternary",
"Used when one of known pre-python 2.5 ternary syntax is used.",
),
"R1709": (
"Boolean expression may be simplified to %s",
"simplify-boolean-expression",
"Emitted when redundant pre-python 2.5 ternary syntax is used.",
),
"R1726": (
"Boolean condition '%s' may be simplified to '%s'",
"simplifiable-condition",
"Emitted when a boolean condition is able to be simplified.",
),
"R1727": (
"Boolean condition '%s' will always evaluate to '%s'",
"condition-evals-to-constant",
"Emitted when a boolean condition can be simplified to a constant value.",
),
"R1702": (
"Too many nested blocks (%s/%s)",
"too-many-nested-blocks",
"Used when a function or a method has too many nested "
"blocks. This makes the code less understandable and "
"maintainable.",
{"old_names": [("R0101", "old-too-many-nested-blocks")]},
),
"R1703": (
"The if statement can be replaced with %s",
"simplifiable-if-statement",
"Used when an if statement can be replaced with 'bool(test)'. ",
{"old_names": [("R0102", "old-simplifiable-if-statement")]},
),
"R1704": (
"Redefining argument with the local name %r",
"redefined-argument-from-local",
"Used when a local name is redefining an argument, which might "
"suggest a potential error. This is taken in account only for "
"a handful of name binding operations, such as for iteration, "
"with statement assignment and exception handler assignment.",
),
"R1705": (
'Unnecessary "%s" after "return"',
"no-else-return",
"Used in order to highlight an unnecessary block of "
"code following an if containing a return statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"return statement.",
),
"R1707": (
"Disallow trailing comma tuple",
"trailing-comma-tuple",
"In Python, a tuple is actually created by the comma symbol, "
"not by the parentheses. Unfortunately, one can actually create a "
"tuple by misplacing a trailing comma, which can lead to potential "
"weird bugs in your code. You should always use parentheses "
"explicitly for creating a tuple.",
),
"R1708": (
"Do not raise StopIteration in generator, use return statement instead",
"stop-iteration-return",
"According to PEP479, the raise of StopIteration to end the loop of "
"a generator may lead to hard to find bugs. This PEP specify that "
"raise StopIteration has to be replaced by a simple return statement",
),
"R1710": (
"Either all return statements in a function should return an expression, "
"or none of them should.",
"inconsistent-return-statements",
"According to PEP8, if any return statement returns an expression, "
"any return statements where no value is returned should explicitly "
"state this as return None, and an explicit return statement "
"should be present at the end of the function (if reachable)",
),
"R1711": (
"Useless return at end of function or method",
"useless-return",
'Emitted when a single "return" or "return None" statement is found '
"at the end of function or method definition. This statement can safely be "
"removed because Python will implicitly return None",
),
"R1712": (
"Consider using tuple unpacking for swapping variables",
"consider-swap-variables",
"You do not have to use a temporary variable in order to "
'swap variables. Using "tuple unpacking" to directly swap '
"variables makes the intention more clear.",
),
"R1713": (
"Consider using str.join(sequence) for concatenating "
"strings from an iterable",
"consider-using-join",
"Using str.join(sequence) is faster, uses less memory "
"and increases readability compared to for-loop iteration.",
),
"R1714": (
'Consider merging these comparisons with "in" to %r',
"consider-using-in",
"To check if a variable is equal to one of many values,"
'combine the values into a tuple and check if the variable is contained "in" it '
"instead of checking for equality against each of the values."
"This is faster and less verbose.",
),
"R1715": (
"Consider using dict.get for getting values from a dict "
"if a key is present or a default if not",
"consider-using-get",
"Using the builtin dict.get for getting a value from a dictionary "
"if a key is present or a default if not, is simpler and considered "
"more idiomatic, although sometimes a bit slower",
),
"R1716": (
"Simplify chained comparison between the operands",
"chained-comparison",
"This message is emitted when pylint encounters boolean operation like"
'"a < b and b < c", suggesting instead to refactor it to "a < b < c"',
),
"R1717": (
"Consider using a dictionary comprehension",
"consider-using-dict-comprehension",
"Emitted when we detect the creation of a dictionary "
"using the dict() callable and a transient list. "
"Although there is nothing syntactically wrong with this code, "
"it is hard to read and can be simplified to a dict comprehension."
"Also it is faster since you don't need to create another "
"transient list",
),
"R1718": (
"Consider using a set comprehension",
"consider-using-set-comprehension",
"Although there is nothing syntactically wrong with this code, "
"it is hard to read and can be simplified to a set comprehension."
"Also it is faster since you don't need to create another "
"transient list",
),
"R1719": (
"The if expression can be replaced with %s",
"simplifiable-if-expression",
"Used when an if expression can be replaced with 'bool(test)'. ",
),
"R1720": (
'Unnecessary "%s" after "raise"',
"no-else-raise",
"Used in order to highlight an unnecessary block of "
"code following an if containing a raise statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"raise statement.",
),
"R1721": (
"Unnecessary use of a comprehension, use %s instead.",
"unnecessary-comprehension",
"Instead of using an identity comprehension, "
"consider using the list, dict or set constructor. "
"It is faster and simpler.",
),
"R1722": (
"Consider using sys.exit()",
"consider-using-sys-exit",
"Instead of using exit() or quit(), consider using the sys.exit().",
),
"R1723": (
'Unnecessary "%s" after "break"',
"no-else-break",
"Used in order to highlight an unnecessary block of "
"code following an if containing a break statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"break statement.",
),
"R1724": (
'Unnecessary "%s" after "continue"',
"no-else-continue",
"Used in order to highlight an unnecessary block of "
"code following an if containing a continue statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"continue statement.",
),
"R1725": (
"Consider using Python 3 style super() without arguments",
"super-with-arguments",
"Emitted when calling the super() builtin with the current class "
"and instance. On Python 3 these arguments are the default and they can be omitted.",
),
"R1728": (
"Consider using a generator instead '%s(%s)'",
"consider-using-generator",
"If your container can be large using "
"a generator will bring better performance.",
),
"R1729": (
"Use a generator instead '%s(%s)'",
"use-a-generator",
"Comprehension inside of 'any' or 'all' is unnecessary. "
"A generator would be sufficient and faster.",
),
"R1730": (
"Consider using '%s' instead of unnecessary if block",
"consider-using-min-builtin",
"Using the min builtin instead of a conditional improves readability and conciseness.",
),
"R1731": (
"Consider using '%s' instead of unnecessary if block",
"consider-using-max-builtin",
"Using the max builtin instead of a conditional improves readability and conciseness.",
),
"R1732": (
"Consider using 'with' for resource-allocating operations",
"consider-using-with",
"Emitted if a resource-allocating assignment or call may be replaced by a 'with' block. "
"By using 'with' the release of the allocated resources is ensured even in the case of an exception.",
),
"R1733": (
"Unnecessary dictionary index lookup, use '%s' instead",
"unnecessary-dict-index-lookup",
"Emitted when iterating over the dictionary items (key-item pairs) and accessing the "
"value by index lookup. "
"The value can be accessed directly instead.",
),
"R1734": (
"Consider using [] instead of list()",
"use-list-literal",
"Emitted when using list() to create an empty list instead of the literal []. "
"The literal is faster as it avoids an additional function call.",
),
"R1735": (
"Consider using {} instead of dict()",
"use-dict-literal",
"Emitted when using dict() to create an empty dictionary instead of the literal {}. "
"The literal is faster as it avoids an additional function call.",
),
}
options = (
(
"max-nested-blocks",
{
"default": 5,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of nested blocks for function / method body",
},
),
(
"never-returning-functions",
{
"default": ("sys.exit", "argparse.parse_error"),
"type": "csv",
"help": "Complete name of functions that never returns. When checking "
"for inconsistent-return-statements if a never returning function is "
"called then it will be considered as an explicit return statement "
"and no message will be printed.",
},
),
)
priority = 0
def __init__(self, linter=None):
super().__init__(linter)
self._return_nodes = {}
self._consider_using_with_stack = ConsiderUsingWithStack()
self._init()
self._never_returning_functions = None
def _init(self):
self._nested_blocks = []
self._elifs = []
self._nested_blocks_msg = None
self._reported_swap_nodes = set()
self._can_simplify_bool_op = False
self._consider_using_with_stack.clear_all()
def open(self):
# do this in open since config not fully initialized in __init__
self._never_returning_functions = set(self.config.never_returning_functions)
@astroid.decorators.cachedproperty
def _dummy_rgx(self):
return lint_utils.get_global_option(self, "dummy-variables-rgx", default=None)
@staticmethod
def _is_bool_const(node):
return isinstance(node.value, nodes.Const) and isinstance(
node.value.value, bool
)
def _is_actual_elif(self, node):
"""Check if the given node is an actual elif
This is a problem we're having with the builtin ast module,
which splits `elif` branches into a separate if statement.
Unfortunately we need to know the exact type in certain
cases.
"""
if isinstance(node.parent, nodes.If):
orelse = node.parent.orelse
# current if node must directly follow an "else"
if orelse and orelse == [node]:
if (node.lineno, node.col_offset) in self._elifs:
return True
return False
def _check_simplifiable_if(self, node):
"""Check if the given if node can be simplified.
The if statement can be reduced to a boolean expression
in some cases. For instance, if there are two branches
and both of them return a boolean value that depends on
the result of the statement's test, then this can be reduced
to `bool(test)` without losing any functionality.
"""
if self._is_actual_elif(node):
# Not interested in if statements with multiple branches.
return
if len(node.orelse) != 1 or len(node.body) != 1:
return
# Check if both branches can be reduced.
first_branch = node.body[0]
else_branch = node.orelse[0]
if isinstance(first_branch, nodes.Return):
if not isinstance(else_branch, nodes.Return):
return
first_branch_is_bool = self._is_bool_const(first_branch)
else_branch_is_bool = self._is_bool_const(else_branch)
reduced_to = "'return bool(test)'"
elif isinstance(first_branch, nodes.Assign):
if not isinstance(else_branch, nodes.Assign):
return
# Check if we assign to the same value
first_branch_targets = [
target.name
for target in first_branch.targets
if isinstance(target, nodes.AssignName)
]
else_branch_targets = [
target.name
for target in else_branch.targets
if isinstance(target, nodes.AssignName)
]
if not first_branch_targets or not else_branch_targets:
return
if sorted(first_branch_targets) != sorted(else_branch_targets):
return
first_branch_is_bool = self._is_bool_const(first_branch)
else_branch_is_bool = self._is_bool_const(else_branch)
reduced_to = "'var = bool(test)'"
else:
return
if not first_branch_is_bool or not else_branch_is_bool:
return
if not first_branch.value.value:
# This is a case that can't be easily simplified and
# if it can be simplified, it will usually result in a
# code that's harder to understand and comprehend.
# Let's take for instance `arg and arg <= 3`. This could theoretically be
# reduced to `not arg or arg > 3`, but the net result is that now the
# condition is harder to understand, because it requires understanding of
# an extra clause:
# * first, there is the negation of truthness with `not arg`
# * the second clause is `arg > 3`, which occurs when arg has a
# a truth value, but it implies that `arg > 3` is equivalent
# with `arg and arg > 3`, which means that the user must
# think about this assumption when evaluating `arg > 3`.
# The original form is easier to grasp.
return
self.add_message("simplifiable-if-statement", node=node, args=(reduced_to,))
def process_tokens(self, tokens):
# Process tokens and look for 'if' or 'elif'
for index, token in enumerate(tokens):
token_string = token[1]
if token_string == "elif":
# AST exists by the time process_tokens is called, so
# it's safe to assume tokens[index+1]
# exists. tokens[index+1][2] is the elif's position as
# reported by CPython and PyPy,
# tokens[index][2] is the actual position and also is
# reported by IronPython.
self._elifs.extend([tokens[index][2], tokens[index + 1][2]])
elif _is_trailing_comma(tokens, index):
if self.linter.is_message_enabled("trailing-comma-tuple"):
self.add_message("trailing-comma-tuple", line=token.start[0])
@utils.check_messages("consider-using-with")
def leave_module(self, _: nodes.Module) -> None:
# check for context managers that have been created but not used
self._emit_consider_using_with_if_needed(
self._consider_using_with_stack.module_scope
)
self._init()
@utils.check_messages("too-many-nested-blocks")
def visit_tryexcept(self, node: nodes.TryExcept) -> None:
self._check_nested_blocks(node)
visit_tryfinally = visit_tryexcept
visit_while = visit_tryexcept
def _check_redefined_argument_from_local(self, name_node):
if self._dummy_rgx and self._dummy_rgx.match(name_node.name):
return
if not name_node.lineno:
# Unknown position, maybe it is a manually built AST?
return
scope = name_node.scope()
if not isinstance(scope, nodes.FunctionDef):
return
for defined_argument in scope.args.nodes_of_class(
nodes.AssignName, skip_klass=(nodes.Lambda,)
):
if defined_argument.name == name_node.name:
self.add_message(
"redefined-argument-from-local",
node=name_node,
args=(name_node.name,),
)
@utils.check_messages(
"redefined-argument-from-local",
"too-many-nested-blocks",
"unnecessary-dict-index-lookup",
)
def visit_for(self, node: nodes.For) -> None:
self._check_nested_blocks(node)
self._check_unnecessary_dict_index_lookup(node)
for name in node.target.nodes_of_class(nodes.AssignName):
self._check_redefined_argument_from_local(name)
@utils.check_messages("redefined-argument-from-local")
def visit_excepthandler(self, node: nodes.ExceptHandler) -> None:
if node.name and isinstance(node.name, nodes.AssignName):
self._check_redefined_argument_from_local(node.name)
@utils.check_messages("redefined-argument-from-local")
def visit_with(self, node: nodes.With) -> None:
for var, names in node.items:
if isinstance(var, nodes.Name):
for stack in self._consider_using_with_stack:
# We don't need to restrict the stacks we search to the current scope and outer scopes,
# as e.g. the function_scope stack will be empty when we check a ``with`` on the class level.
if var.name in stack:
del stack[var.name]
break
if not names:
continue
for name in names.nodes_of_class(nodes.AssignName):
self._check_redefined_argument_from_local(name)
def _check_superfluous_else(self, node, msg_id, returning_node_class):
if not node.orelse:
# Not interested in if statements without else.
return
if self._is_actual_elif(node):
# Not interested in elif nodes; only if
return
if _if_statement_is_always_returning(node, returning_node_class):
orelse = node.orelse[0]
followed_by_elif = (orelse.lineno, orelse.col_offset) in self._elifs
self.add_message(
msg_id, node=node, args="elif" if followed_by_elif else "else"
)
def _check_superfluous_else_return(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-return", returning_node_class=nodes.Return
)
def _check_superfluous_else_raise(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-raise", returning_node_class=nodes.Raise
)
def _check_superfluous_else_break(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-break", returning_node_class=nodes.Break
)
def _check_superfluous_else_continue(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-continue", returning_node_class=nodes.Continue
)
@staticmethod
def _type_and_name_are_equal(node_a, node_b):
for _type in (nodes.Name, nodes.AssignName):
if all(isinstance(_node, _type) for _node in (node_a, node_b)):
return node_a.name == node_b.name
if all(isinstance(_node, nodes.Const) for _node in (node_a, node_b)):
return node_a.value == node_b.value
return False
def _is_dict_get_block(self, node):
# "if <compare node>"
if not isinstance(node.test, nodes.Compare):
return False
# Does not have a single statement in the guard's body
if len(node.body) != 1:
return False
# Look for a single variable assignment on the LHS and a subscript on RHS
stmt = node.body[0]
if not (
isinstance(stmt, nodes.Assign)
and len(node.body[0].targets) == 1
and isinstance(node.body[0].targets[0], nodes.AssignName)
and isinstance(stmt.value, nodes.Subscript)
):
return False
# The subscript's slice needs to be the same as the test variable.
slice_value = stmt.value.slice
if not (
self._type_and_name_are_equal(stmt.value.value, node.test.ops[0][1])
and self._type_and_name_are_equal(slice_value, node.test.left)
):
return False
# The object needs to be a dictionary instance
return isinstance(utils.safe_infer(node.test.ops[0][1]), nodes.Dict)
def _check_consider_get(self, node):
if_block_ok = self._is_dict_get_block(node)
if if_block_ok and not node.orelse:
self.add_message("consider-using-get", node=node)
elif (
if_block_ok
and len(node.orelse) == 1
and isinstance(node.orelse[0], nodes.Assign)
and self._type_and_name_are_equal(
node.orelse[0].targets[0], node.body[0].targets[0]
)
and len(node.orelse[0].targets) == 1
):
self.add_message("consider-using-get", node=node)
@utils.check_messages(
"too-many-nested-blocks",
"simplifiable-if-statement",
"no-else-return",
"no-else-raise",
"no-else-break",
"no-else-continue",
"consider-using-get",
)
def visit_if(self, node: nodes.If) -> None:
self._check_simplifiable_if(node)
self._check_nested_blocks(node)
self._check_superfluous_else_return(node)
self._check_superfluous_else_raise(node)
self._check_superfluous_else_break(node)
self._check_superfluous_else_continue(node)
self._check_consider_get(node)
self._check_consider_using_min_max_builtin(node)
def _check_consider_using_min_max_builtin(self, node: nodes.If):
"""Check if the given if node can be refactored as a min/max python builtin."""
if self._is_actual_elif(node) or node.orelse:
# Not interested in if statements with multiple branches.
return
if len(node.body) != 1:
return
body = node.body[0]
# Check if condition can be reduced.
if not hasattr(body, "targets") or len(body.targets) != 1:
return
target = body.targets[0]
if not (
isinstance(node.test, nodes.Compare)
and not isinstance(target, nodes.Subscript)
and not isinstance(node.test.left, nodes.Subscript)
and isinstance(body, nodes.Assign)
):
return
# Check that the assignation is on the same variable.
if hasattr(node.test.left, "name"):
left_operand = node.test.left.name
elif hasattr(node.test.left, "attrname"):
left_operand = node.test.left.attrname
else:
return
if hasattr(target, "name"):
target_assignation = target.name
elif hasattr(target, "attrname"):
target_assignation = target.attrname
else:
return
if not (left_operand == target_assignation):
return
if len(node.test.ops) > 1:
return
if not isinstance(body.value, (nodes.Name, nodes.Const)):
return
operator, right_statement = node.test.ops[0]
if isinstance(body.value, nodes.Name):
body_value = body.value.name
else:
body_value = body.value.value
if isinstance(right_statement, nodes.Name):
right_statement_value = right_statement.name
elif isinstance(right_statement, nodes.Const):
right_statement_value = right_statement.value
else:
return
# Verify the right part of the statement is the same.
if right_statement_value != body_value:
return
if operator in {"<", "<="}:
reduced_to = "{target} = max({target}, {item})".format(
target=target_assignation, item=body_value
)
self.add_message(
"consider-using-max-builtin", node=node, args=(reduced_to,)
)
elif operator in {">", ">="}:
reduced_to = "{target} = min({target}, {item})".format(
target=target_assignation, item=body_value
)
self.add_message(
"consider-using-min-builtin", node=node, args=(reduced_to,)
)
@utils.check_messages("simplifiable-if-expression")
def visit_ifexp(self, node: nodes.IfExp) -> None:
self._check_simplifiable_ifexp(node)
def _check_simplifiable_ifexp(self, node):
if not isinstance(node.body, nodes.Const) or not isinstance(
node.orelse, nodes.Const
):
return
if not isinstance(node.body.value, bool) or not isinstance(
node.orelse.value, bool
):
return
if isinstance(node.test, nodes.Compare):
test_reduced_to = "test"
else:
test_reduced_to = "bool(test)"
if (node.body.value, node.orelse.value) == (True, False):
reduced_to = f"'{test_reduced_to}'"
elif (node.body.value, node.orelse.value) == (False, True):
reduced_to = "'not test'"
else:
return
self.add_message("simplifiable-if-expression", node=node, args=(reduced_to,))
@utils.check_messages(
"too-many-nested-blocks",
"inconsistent-return-statements",
"useless-return",
"consider-using-with",
)
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
# check left-over nested blocks stack
self._emit_nested_blocks_message_if_needed(self._nested_blocks)
# new scope = reinitialize the stack of nested blocks
self._nested_blocks = []
# check consistent return statements
self._check_consistent_returns(node)
# check for single return or return None at the end
self._check_return_at_the_end(node)
self._return_nodes[node.name] = []
# check for context managers that have been created but not used
self._emit_consider_using_with_if_needed(
self._consider_using_with_stack.function_scope
)
self._consider_using_with_stack.function_scope.clear()
@utils.check_messages("consider-using-with")
def leave_classdef(self, _: nodes.ClassDef) -> None:
# check for context managers that have been created but not used
self._emit_consider_using_with_if_needed(
self._consider_using_with_stack.class_scope
)
self._consider_using_with_stack.class_scope.clear()
@utils.check_messages("stop-iteration-return")
def visit_raise(self, node: nodes.Raise) -> None:
self._check_stop_iteration_inside_generator(node)
def _check_stop_iteration_inside_generator(self, node):
"""Check if an exception of type StopIteration is raised inside a generator"""
frame = node.frame(future=True)
if not isinstance(frame, nodes.FunctionDef) or not frame.is_generator():
return
if utils.node_ignores_exception(node, StopIteration):
return
if not node.exc:
return
exc = utils.safe_infer(node.exc)
if not exc or not isinstance(exc, (astroid.Instance, nodes.ClassDef)):
return
if self._check_exception_inherit_from_stopiteration(exc):
self.add_message("stop-iteration-return", node=node)
@staticmethod
def _check_exception_inherit_from_stopiteration(exc):
"""Return True if the exception node in argument inherit from StopIteration"""
stopiteration_qname = f"{utils.EXCEPTIONS_MODULE}.StopIteration"
return any(_class.qname() == stopiteration_qname for _class in exc.mro())
def _check_consider_using_comprehension_constructor(self, node):
if (
isinstance(node.func, nodes.Name)
and node.args
and isinstance(node.args[0], nodes.ListComp)
):
if node.func.name == "dict":
element = node.args[0].elt
if isinstance(element, nodes.Call):
return
# If we have an `IfExp` here where both the key AND value
# are different, then don't raise the issue. See #5588
if (
isinstance(element, nodes.IfExp)
and isinstance(element.body, (nodes.Tuple, nodes.List))
and len(element.body.elts) == 2
and isinstance(element.orelse, (nodes.Tuple, nodes.List))
and len(element.orelse.elts) == 2
):
key1, value1 = element.body.elts
key2, value2 = element.orelse.elts
if (
key1.as_string() != key2.as_string()
and value1.as_string() != value2.as_string()
):
return
message_name = "consider-using-dict-comprehension"
self.add_message(message_name, node=node)
elif node.func.name == "set":
message_name = "consider-using-set-comprehension"
self.add_message(message_name, node=node)
def _check_consider_using_generator(self, node):
# 'any' and 'all' definitely should use generator, while 'list' and 'tuple' need to be considered first
# See https://github.com/PyCQA/pylint/pull/3309#discussion_r576683109
checked_call = ["any", "all", "list", "tuple"]
if (
isinstance(node, nodes.Call)
and node.func
and isinstance(node.func, nodes.Name)
and node.func.name in checked_call
):
# functions in checked_calls take exactly one argument
# check whether the argument is list comprehension
if len(node.args) == 1 and isinstance(node.args[0], nodes.ListComp):
# remove square brackets '[]'
inside_comp = node.args[0].as_string()[1:-1]
call_name = node.func.name
if call_name in {"any", "all"}:
self.add_message(
"use-a-generator",
node=node,
args=(call_name, inside_comp),
)
else:
self.add_message(
"consider-using-generator",
node=node,
args=(call_name, inside_comp),
)
@utils.check_messages(
"stop-iteration-return",
"consider-using-dict-comprehension",
"consider-using-set-comprehension",
"consider-using-sys-exit",
"super-with-arguments",
"consider-using-generator",
"consider-using-with",
"use-list-literal",
"use-dict-literal",
)
def visit_call(self, node: nodes.Call) -> None:
self._check_raising_stopiteration_in_generator_next_call(node)
self._check_consider_using_comprehension_constructor(node)
self._check_quit_exit_call(node)
self._check_super_with_arguments(node)
self._check_consider_using_generator(node)
self._check_consider_using_with(node)
self._check_use_list_or_dict_literal(node)
@staticmethod
def _has_exit_in_scope(scope):
exit_func = scope.locals.get("exit")
return bool(
exit_func and isinstance(exit_func[0], (nodes.ImportFrom, nodes.Import))
)
def _check_quit_exit_call(self, node):
if isinstance(node.func, nodes.Name) and node.func.name in BUILTIN_EXIT_FUNCS:
# If we have `exit` imported from `sys` in the current or global scope, exempt this instance.
local_scope = node.scope()
if self._has_exit_in_scope(local_scope) or self._has_exit_in_scope(
node.root()
):
return
self.add_message("consider-using-sys-exit", node=node)
def _check_super_with_arguments(self, node):
if not isinstance(node.func, nodes.Name) or node.func.name != "super":
return
# pylint: disable=too-many-boolean-expressions
if (
len(node.args) != 2
or not isinstance(node.args[1], nodes.Name)
or node.args[1].name != "self"
or not isinstance(node.args[0], nodes.Name)
or not isinstance(node.args[1], nodes.Name)
or node_frame_class(node) is None
or node.args[0].name != node_frame_class(node).name
):
return
self.add_message("super-with-arguments", node=node)
def _check_raising_stopiteration_in_generator_next_call(self, node):
"""Check if a StopIteration exception is raised by the call to next function
If the next value has a default value, then do not add message.
:param node: Check to see if this Call node is a next function
:type node: :class:`nodes.Call`
"""
def _looks_like_infinite_iterator(param):
inferred = utils.safe_infer(param)
if inferred:
return inferred.qname() in KNOWN_INFINITE_ITERATORS
return False
if isinstance(node.func, nodes.Attribute):
# A next() method, which is now what we want.
return
inferred = utils.safe_infer(node.func)
if getattr(inferred, "name", "") == "next":
frame = node.frame(future=True)
# The next builtin can only have up to two
# positional arguments and no keyword arguments
has_sentinel_value = len(node.args) > 1
if (
isinstance(frame, nodes.FunctionDef)
and frame.is_generator()
and not has_sentinel_value
and not utils.node_ignores_exception(node, StopIteration)
and not _looks_like_infinite_iterator(node.args[0])
):
self.add_message("stop-iteration-return", node=node)
def _check_nested_blocks(self, node):
"""Update and check the number of nested blocks"""
# only check block levels inside functions or methods
if not isinstance(node.scope(), nodes.FunctionDef):
return
# messages are triggered on leaving the nested block. Here we save the
# stack in case the current node isn't nested in the previous one
nested_blocks = self._nested_blocks[:]
if node.parent == node.scope():
self._nested_blocks = [node]
else:
# go through ancestors from the most nested to the less
for ancestor_node in reversed(self._nested_blocks):
if ancestor_node == node.parent:
break
self._nested_blocks.pop()
# if the node is an elif, this should not be another nesting level
if isinstance(node, nodes.If) and self._is_actual_elif(node):
if self._nested_blocks:
self._nested_blocks.pop()
self._nested_blocks.append(node)
# send message only once per group of nested blocks
if len(nested_blocks) > len(self._nested_blocks):
self._emit_nested_blocks_message_if_needed(nested_blocks)
def _emit_nested_blocks_message_if_needed(self, nested_blocks):
if len(nested_blocks) > self.config.max_nested_blocks:
self.add_message(
"too-many-nested-blocks",
node=nested_blocks[0],
args=(len(nested_blocks), self.config.max_nested_blocks),
)
def _emit_consider_using_with_if_needed(self, stack: Dict[str, nodes.NodeNG]):
for node in stack.values():
self.add_message("consider-using-with", node=node)
@staticmethod
def _duplicated_isinstance_types(node):
"""Get the duplicated types from the underlying isinstance calls.
:param nodes.BoolOp node: Node which should contain a bunch of isinstance calls.
:returns: Dictionary of the comparison objects from the isinstance calls,
to duplicate values from consecutive calls.
:rtype: dict
"""
duplicated_objects = set()
all_types = collections.defaultdict(set)
for call in node.values:
if not isinstance(call, nodes.Call) or len(call.args) != 2:
continue
inferred = utils.safe_infer(call.func)
if not inferred or not utils.is_builtin_object(inferred):
continue
if inferred.name != "isinstance":
continue
isinstance_object = call.args[0].as_string()
isinstance_types = call.args[1]
if isinstance_object in all_types:
duplicated_objects.add(isinstance_object)
if isinstance(isinstance_types, nodes.Tuple):
elems = [
class_type.as_string() for class_type in isinstance_types.itered()
]
else:
elems = [isinstance_types.as_string()]
all_types[isinstance_object].update(elems)
# Remove all keys which not duplicated
return {
key: value for key, value in all_types.items() if key in duplicated_objects
}
def _check_consider_merging_isinstance(self, node):
"""Check isinstance calls which can be merged together."""
if node.op != "or":
return
first_args = self._duplicated_isinstance_types(node)
for duplicated_name, class_names in first_args.items():
names = sorted(name for name in class_names)
self.add_message(
"consider-merging-isinstance",
node=node,
args=(duplicated_name, ", ".join(names)),
)
def _check_consider_using_in(self, node):
allowed_ops = {"or": "==", "and": "!="}
if node.op not in allowed_ops or len(node.values) < 2:
return
for value in node.values:
if (
not isinstance(value, nodes.Compare)
or len(value.ops) != 1
or value.ops[0][0] not in allowed_ops[node.op]
):
return
for comparable in value.left, value.ops[0][1]:
if isinstance(comparable, nodes.Call):
return
# Gather variables and values from comparisons
variables, values = [], []
for value in node.values:
variable_set = set()
for comparable in value.left, value.ops[0][1]:
if isinstance(comparable, (nodes.Name, nodes.Attribute)):
variable_set.add(comparable.as_string())
values.append(comparable.as_string())
variables.append(variable_set)
# Look for (common-)variables that occur in all comparisons
common_variables = reduce(lambda a, b: a.intersection(b), variables)
if not common_variables:
return
# Gather information for the suggestion
common_variable = sorted(list(common_variables))[0]
comprehension = "in" if node.op == "or" else "not in"
values = list(collections.OrderedDict.fromkeys(values))
values.remove(common_variable)
values_string = ", ".join(values) if len(values) != 1 else values[0] + ","
suggestion = f"{common_variable} {comprehension} ({values_string})"
self.add_message("consider-using-in", node=node, args=(suggestion,))
def _check_chained_comparison(self, node):
"""Check if there is any chained comparison in the expression.
Add a refactoring message if a boolOp contains comparison like a < b and b < c,
which can be chained as a < b < c.
Care is taken to avoid simplifying a < b < c and b < d.
"""
if node.op != "and" or len(node.values) < 2:
return
def _find_lower_upper_bounds(comparison_node, uses):
left_operand = comparison_node.left
for operator, right_operand in comparison_node.ops:
for operand in (left_operand, right_operand):
value = None
if isinstance(operand, nodes.Name):
value = operand.name
elif isinstance(operand, nodes.Const):
value = operand.value
if value is None:
continue
if operator in {"<", "<="}:
if operand is left_operand:
uses[value]["lower_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operator in {">", ">="}:
if operand is left_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["lower_bound"].add(comparison_node)
left_operand = right_operand
uses = collections.defaultdict(
lambda: {"lower_bound": set(), "upper_bound": set()}
)
for comparison_node in node.values:
if isinstance(comparison_node, nodes.Compare):
_find_lower_upper_bounds(comparison_node, uses)
for _, bounds in uses.items():
num_shared = len(bounds["lower_bound"].intersection(bounds["upper_bound"]))
num_lower_bounds = len(bounds["lower_bound"])
num_upper_bounds = len(bounds["upper_bound"])
if num_shared < num_lower_bounds and num_shared < num_upper_bounds:
self.add_message("chained-comparison", node=node)
break
@staticmethod
def _apply_boolean_simplification_rules(operator, values):
"""Removes irrelevant values or returns shortcircuiting values
This function applies the following two rules:
1) an OR expression with True in it will always be true, and the
reverse for AND
2) False values in OR expressions are only relevant if all values are
false, and the reverse for AND
"""
simplified_values = []
for subnode in values:
inferred_bool = None
if not next(subnode.nodes_of_class(nodes.Name), False):
inferred = utils.safe_infer(subnode)
if inferred:
inferred_bool = inferred.bool_value()
if not isinstance(inferred_bool, bool):
simplified_values.append(subnode)
elif (operator == "or") == inferred_bool:
return [subnode]
return simplified_values or [nodes.Const(operator == "and")]
def _simplify_boolean_operation(self, bool_op):
"""Attempts to simplify a boolean operation
Recursively applies simplification on the operator terms,
and keeps track of whether reductions have been made.
"""
children = list(bool_op.get_children())
intermediate = [
self._simplify_boolean_operation(child)
if isinstance(child, nodes.BoolOp)
else child
for child in children
]
result = self._apply_boolean_simplification_rules(bool_op.op, intermediate)
if len(result) < len(children):
self._can_simplify_bool_op = True
if len(result) == 1:
return result[0]
simplified_bool_op = copy.copy(bool_op)
simplified_bool_op.postinit(result)
return simplified_bool_op
def _check_simplifiable_condition(self, node):
"""Check if a boolean condition can be simplified.
Variables will not be simplified, even in the value can be inferred,
and expressions like '3 + 4' will remain expanded.
"""
if not utils.is_test_condition(node):
return
self._can_simplify_bool_op = False
simplified_expr = self._simplify_boolean_operation(node)
if not self._can_simplify_bool_op:
return
if not next(simplified_expr.nodes_of_class(nodes.Name), False):
self.add_message(
"condition-evals-to-constant",
node=node,
args=(node.as_string(), simplified_expr.as_string()),
)
else:
self.add_message(
"simplifiable-condition",
node=node,
args=(node.as_string(), simplified_expr.as_string()),
)
@utils.check_messages(
"consider-merging-isinstance",
"consider-using-in",
"chained-comparison",
"simplifiable-condition",
"condition-evals-to-constant",
)
def visit_boolop(self, node: nodes.BoolOp) -> None:
self._check_consider_merging_isinstance(node)
self._check_consider_using_in(node)
self._check_chained_comparison(node)
self._check_simplifiable_condition(node)
@staticmethod
def _is_simple_assignment(node):
return (
isinstance(node, nodes.Assign)
and len(node.targets) == 1
and isinstance(node.targets[0], nodes.AssignName)
and isinstance(node.value, nodes.Name)
)
def _check_swap_variables(self, node):
if not node.next_sibling() or not node.next_sibling().next_sibling():
return
assignments = [node, node.next_sibling(), node.next_sibling().next_sibling()]
if not all(self._is_simple_assignment(node) for node in assignments):
return
if any(node in self._reported_swap_nodes for node in assignments):
return
left = [node.targets[0].name for node in assignments]
right = [node.value.name for node in assignments]
if left[0] == right[-1] and left[1:] == right[:-1]:
self._reported_swap_nodes.update(assignments)
message = "consider-swap-variables"
self.add_message(message, node=node)
@utils.check_messages(
"simplify-boolean-expression",
"consider-using-ternary",
"consider-swap-variables",
"consider-using-with",
)
def visit_assign(self, node: nodes.Assign) -> None:
self._append_context_managers_to_stack(node)
self.visit_return(node) # remaining checks are identical as for return nodes
@utils.check_messages(
"simplify-boolean-expression",
"consider-using-ternary",
"consider-swap-variables",
)
def visit_return(self, node: nodes.Return) -> None:
self._check_swap_variables(node)
if self._is_and_or_ternary(node.value):
cond, truth_value, false_value = self._and_or_ternary_arguments(node.value)
else:
return
if all(
isinstance(value, nodes.Compare) for value in (truth_value, false_value)
):
return
inferred_truth_value = utils.safe_infer(truth_value)
if inferred_truth_value is None or inferred_truth_value == astroid.Uninferable:
truth_boolean_value = True
else:
truth_boolean_value = inferred_truth_value.bool_value()
if truth_boolean_value is False:
message = "simplify-boolean-expression"
suggestion = false_value.as_string()
else:
message = "consider-using-ternary"
suggestion = f"{truth_value.as_string()} if {cond.as_string()} else {false_value.as_string()}"
self.add_message(message, node=node, args=(suggestion,))
def _append_context_managers_to_stack(self, node: nodes.Assign) -> None:
if _is_inside_context_manager(node):
# if we are inside a context manager itself, we assume that it will handle the resource management itself.
return
if isinstance(node.targets[0], (nodes.Tuple, nodes.List, nodes.Set)):
assignees = node.targets[0].elts
value = utils.safe_infer(node.value)
if value is None or not hasattr(value, "elts"):
# We cannot deduce what values are assigned, so we have to skip this
return
values = value.elts
else:
assignees = [node.targets[0]]
values = [node.value]
if Uninferable in (assignees, values):
return
for assignee, value in zip(assignees, values):
if not isinstance(value, nodes.Call):
continue
inferred = utils.safe_infer(value.func)
if (
not inferred
or inferred.qname() not in CALLS_RETURNING_CONTEXT_MANAGERS
or not isinstance(assignee, (nodes.AssignName, nodes.AssignAttr))
):
continue
stack = self._consider_using_with_stack.get_stack_for_frame(
node.frame(future=True)
)
varname = (
assignee.name
if isinstance(assignee, nodes.AssignName)
else assignee.attrname
)
if varname in stack:
existing_node = stack[varname]
if astroid.are_exclusive(node, existing_node):
# only one of the two assignments can be executed at runtime, thus it is fine
stack[varname] = value
continue
# variable was redefined before it was used in a ``with`` block
self.add_message(
"consider-using-with",
node=existing_node,
)
stack[varname] = value
def _check_consider_using_with(self, node: nodes.Call):
if _is_inside_context_manager(node) or _is_a_return_statement(node):
# If we are inside a context manager itself, we assume that it will handle the resource management itself.
# If the node is a child of a return, we assume that the caller knows he is getting a context manager
# he should use properly (i.e. in a ``with``).
return
if (
node
in self._consider_using_with_stack.get_stack_for_frame(
node.frame(future=True)
).values()
):
# the result of this call was already assigned to a variable and will be checked when leaving the scope.
return
inferred = utils.safe_infer(node.func)
if not inferred:
return
could_be_used_in_with = (
# things like ``lock.acquire()``
inferred.qname() in CALLS_THAT_COULD_BE_REPLACED_BY_WITH
or (
# things like ``open("foo")`` which are not already inside a ``with`` statement
inferred.qname() in CALLS_RETURNING_CONTEXT_MANAGERS
and not _is_part_of_with_items(node)
)
)
if could_be_used_in_with and not _will_be_released_automatically(node):
self.add_message("consider-using-with", node=node)
def _check_use_list_or_dict_literal(self, node: nodes.Call) -> None:
"""Check if empty list or dict is created by using the literal [] or {}"""
if node.as_string() in {"list()", "dict()"}:
inferred = utils.safe_infer(node.func)
if isinstance(inferred, nodes.ClassDef) and not node.args:
if inferred.qname() == "builtins.list":
self.add_message("use-list-literal", node=node)
elif inferred.qname() == "builtins.dict" and not node.keywords:
self.add_message("use-dict-literal", node=node)
def _check_consider_using_join(self, aug_assign):
"""We start with the augmented assignment and work our way upwards.
Names of variables for nodes if match successful:
result = '' # assign
for number in ['1', '2', '3'] # for_loop
result += number # aug_assign
"""
for_loop = aug_assign.parent
if not isinstance(for_loop, nodes.For) or len(for_loop.body) > 1:
return
assign = for_loop.previous_sibling()
if not isinstance(assign, nodes.Assign):
return
result_assign_names = {
target.name
for target in assign.targets
if isinstance(target, nodes.AssignName)
}
is_concat_loop = (
aug_assign.op == "+="
and isinstance(aug_assign.target, nodes.AssignName)
and len(for_loop.body) == 1
and aug_assign.target.name in result_assign_names
and isinstance(assign.value, nodes.Const)
and isinstance(assign.value.value, str)
and isinstance(aug_assign.value, nodes.Name)
and aug_assign.value.name == for_loop.target.name
)
if is_concat_loop:
self.add_message("consider-using-join", node=aug_assign)
@utils.check_messages("consider-using-join")
def visit_augassign(self, node: nodes.AugAssign) -> None:
self._check_consider_using_join(node)
@utils.check_messages("unnecessary-comprehension", "unnecessary-dict-index-lookup")
def visit_comprehension(self, node: nodes.Comprehension) -> None:
self._check_unnecessary_comprehension(node)
self._check_unnecessary_dict_index_lookup(node)
def _check_unnecessary_comprehension(self, node: nodes.Comprehension) -> None:
if (
isinstance(node.parent, nodes.GeneratorExp)
or len(node.ifs) != 0
or len(node.parent.generators) != 1
or node.is_async
):
return
if (
isinstance(node.parent, nodes.DictComp)
and isinstance(node.parent.key, nodes.Name)
and isinstance(node.parent.value, nodes.Name)
and isinstance(node.target, nodes.Tuple)
and all(isinstance(elt, nodes.AssignName) for elt in node.target.elts)
):
expr_list = [node.parent.key.name, node.parent.value.name]
target_list = [elt.name for elt in node.target.elts]
elif isinstance(node.parent, (nodes.ListComp, nodes.SetComp)):
expr = node.parent.elt
if isinstance(expr, nodes.Name):
expr_list = expr.name
elif isinstance(expr, nodes.Tuple):
if any(not isinstance(elt, nodes.Name) for elt in expr.elts):
return
expr_list = [elt.name for elt in expr.elts]
else:
expr_list = []
target = node.parent.generators[0].target
target_list = (
target.name
if isinstance(target, nodes.AssignName)
else (
[
elt.name
for elt in target.elts
if isinstance(elt, nodes.AssignName)
]
if isinstance(target, nodes.Tuple)
else []
)
)
else:
return
if expr_list == target_list and expr_list:
args: Optional[Tuple[str]] = None
inferred = utils.safe_infer(node.iter)
if isinstance(node.parent, nodes.DictComp) and isinstance(
inferred, astroid.objects.DictItems
):
args = (f"{node.iter.func.expr.as_string()}",)
elif (
isinstance(node.parent, nodes.ListComp)
and isinstance(inferred, nodes.List)
) or (
isinstance(node.parent, nodes.SetComp)
and isinstance(inferred, nodes.Set)
):
args = (f"{node.iter.as_string()}",)
if args:
self.add_message(
"unnecessary-comprehension", node=node.parent, args=args
)
return
if isinstance(node.parent, nodes.DictComp):
func = "dict"
elif isinstance(node.parent, nodes.ListComp):
func = "list"
elif isinstance(node.parent, nodes.SetComp):
func = "set"
else:
return
self.add_message(
"unnecessary-comprehension",
node=node.parent,
args=(f"{func}({node.iter.as_string()})",),
)
@staticmethod
def _is_and_or_ternary(node):
"""Returns true if node is 'condition and true_value or false_value' form.
All of: condition, true_value and false_value should not be a complex boolean expression
"""
return (
isinstance(node, nodes.BoolOp)
and node.op == "or"
and len(node.values) == 2
and isinstance(node.values[0], nodes.BoolOp)
and not isinstance(node.values[1], nodes.BoolOp)
and node.values[0].op == "and"
and not isinstance(node.values[0].values[1], nodes.BoolOp)
and len(node.values[0].values) == 2
)
@staticmethod
def _and_or_ternary_arguments(node):
false_value = node.values[1]
condition, true_value = node.values[0].values
return condition, true_value, false_value
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
self._return_nodes[node.name] = list(
node.nodes_of_class(nodes.Return, skip_klass=nodes.FunctionDef)
)
def _check_consistent_returns(self, node: nodes.FunctionDef) -> None:
"""Check that all return statements inside a function are consistent.
Return statements are consistent if:
- all returns are explicit and if there is no implicit return;
- all returns are empty and if there is, possibly, an implicit return.
Args:
node (nodes.FunctionDef): the function holding the return statements.
"""
# explicit return statements are those with a not None value
explicit_returns = [
_node for _node in self._return_nodes[node.name] if _node.value is not None
]
if not explicit_returns:
return
if len(explicit_returns) == len(
self._return_nodes[node.name]
) and self._is_node_return_ended(node):
return
self.add_message("inconsistent-return-statements", node=node)
def _is_if_node_return_ended(self, node: nodes.If) -> bool:
"""Check if the If node ends with an explicit return statement.
Args:
node (nodes.If): If node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# Do not check if inner function definition are return ended.
is_if_returning = any(
self._is_node_return_ended(_ifn)
for _ifn in node.body
if not isinstance(_ifn, nodes.FunctionDef)
)
if not node.orelse:
# If there is not orelse part then the if statement is returning if :
# - there is at least one return statement in its siblings;
# - the if body is itself returning.
if not self._has_return_in_siblings(node):
return False
return is_if_returning
# If there is an orelse part then both if body and orelse part should return.
is_orelse_returning = any(
self._is_node_return_ended(_ore)
for _ore in node.orelse
if not isinstance(_ore, nodes.FunctionDef)
)
return is_if_returning and is_orelse_returning
def _is_raise_node_return_ended(self, node: nodes.Raise) -> bool:
"""Check if the Raise node ends with an explicit return statement.
Args:
node (nodes.Raise): Raise node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# a Raise statement doesn't need to end with a return statement
# but if the exception raised is handled, then the handler has to
# ends with a return statement
if not node.exc:
# Ignore bare raises
return True
if not utils.is_node_inside_try_except(node):
# If the raise statement is not inside a try/except statement
# then the exception is raised and cannot be caught. No need
# to infer it.
return True
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable or not hasattr(exc, "pytype"):
return False
exc_name = exc.pytype().split(".")[-1]
handlers = utils.get_exception_handlers(node, exc_name)
handlers = list(handlers) if handlers is not None else []
if handlers:
# among all the handlers handling the exception at least one
# must end with a return statement
return any(self._is_node_return_ended(_handler) for _handler in handlers)
# if no handlers handle the exception then it's ok
return True
def _is_node_return_ended(self, node: nodes.NodeNG) -> bool:
"""Check if the node ends with an explicit return statement.
Args:
node (nodes.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# Recursion base case
if isinstance(node, nodes.Return):
return True
if isinstance(node, nodes.Call):
try:
funcdef_node = node.func.inferred()[0]
if self._is_function_def_never_returning(funcdef_node):
return True
except astroid.InferenceError:
pass
# Avoid the check inside while loop as we don't know
# if they will be completed
if isinstance(node, nodes.While):
return True
if isinstance(node, nodes.Raise):
return self._is_raise_node_return_ended(node)
if isinstance(node, nodes.If):
return self._is_if_node_return_ended(node)
if isinstance(node, nodes.TryExcept):
handlers = {
_child
for _child in node.get_children()
if isinstance(_child, nodes.ExceptHandler)
}
all_but_handler = set(node.get_children()) - handlers
return any(
self._is_node_return_ended(_child) for _child in all_but_handler
) and all(self._is_node_return_ended(_child) for _child in handlers)
if (
isinstance(node, nodes.Assert)
and isinstance(node.test, nodes.Const)
and not node.test.value
):
# consider assert False as a return node
return True
# recurses on the children of the node
return any(self._is_node_return_ended(_child) for _child in node.get_children())
@staticmethod
def _has_return_in_siblings(node: nodes.NodeNG) -> bool:
"""Returns True if there is at least one return in the node's siblings"""
next_sibling = node.next_sibling()
while next_sibling:
if isinstance(next_sibling, nodes.Return):
return True
next_sibling = next_sibling.next_sibling()
return False
def _is_function_def_never_returning(self, node: nodes.FunctionDef) -> bool:
"""Return True if the function never returns. False otherwise.
Args:
node (nodes.FunctionDef): function definition node to be analyzed.
Returns:
bool: True if the function never returns, False otherwise.
"""
if isinstance(node, nodes.FunctionDef) and node.returns:
return (
isinstance(node.returns, nodes.Attribute)
and node.returns.attrname == "NoReturn"
or isinstance(node.returns, nodes.Name)
and node.returns.name == "NoReturn"
)
try:
return node.qname() in self._never_returning_functions
except TypeError:
return False
def _check_return_at_the_end(self, node):
"""Check for presence of a *single* return statement at the end of a
function. "return" or "return None" are useless because None is the
default return type if they are missing.
NOTE: produces a message only if there is a single return statement
in the function body. Otherwise _check_consistent_returns() is called!
Per its implementation and PEP8 we can have a "return None" at the end
of the function body if there are other return statements before that!
"""
if len(self._return_nodes[node.name]) > 1:
return
if len(node.body) <= 1:
return
last = node.body[-1]
if isinstance(last, nodes.Return):
# e.g. "return"
if last.value is None:
self.add_message("useless-return", node=node)
# return None"
elif isinstance(last.value, nodes.Const) and (last.value.value is None):
self.add_message("useless-return", node=node)
def _check_unnecessary_dict_index_lookup(
self, node: Union[nodes.For, nodes.Comprehension]
) -> None:
"""Add message when accessing dict values by index lookup."""
# Verify that we have an .items() call and
# that the object which is iterated is used as a subscript in the
# body of the for.
# Is it a proper items call?
if (
isinstance(node.iter, nodes.Call)
and isinstance(node.iter.func, nodes.Attribute)
and node.iter.func.attrname == "items"
):
inferred = utils.safe_infer(node.iter.func)
if not isinstance(inferred, astroid.BoundMethod):
return
iterating_object_name = node.iter.func.expr.as_string()
# Verify that the body of the for loop uses a subscript
# with the object that was iterated. This uses some heuristics
# in order to make sure that the same object is used in the
# for body.
children = (
node.body if isinstance(node, nodes.For) else node.parent.get_children()
)
for child in children:
for subscript in child.nodes_of_class(nodes.Subscript):
if not isinstance(subscript.value, (nodes.Name, nodes.Attribute)):
continue
value = subscript.slice
if isinstance(node, nodes.For) and (
isinstance(subscript.parent, nodes.Assign)
and subscript in subscript.parent.targets
or isinstance(subscript.parent, nodes.AugAssign)
and subscript == subscript.parent.target
):
# Ignore this subscript if it is the target of an assignment
# Early termination; after reassignment dict index lookup will be necessary
return
if isinstance(subscript.parent, nodes.Delete):
# Ignore this subscript if it's used with the delete keyword
return
# Case where .items is assigned to k,v (i.e., for k, v in d.items())
if isinstance(value, nodes.Name):
if (
not isinstance(node.target, nodes.Tuple)
# Ignore 1-tuples: for k, in d.items()
or len(node.target.elts) < 2
or value.name != node.target.elts[0].name
or iterating_object_name != subscript.value.as_string()
):
continue
if (
isinstance(node, nodes.For)
and value.lookup(value.name)[1][-1].lineno > node.lineno
):
# Ignore this subscript if it has been redefined after
# the for loop. This checks for the line number using .lookup()
# to get the line number where the iterating object was last
# defined and compare that to the for loop's line number
continue
self.add_message(
"unnecessary-dict-index-lookup",
node=subscript,
args=(node.target.elts[1].as_string()),
)
# Case where .items is assigned to single var (i.e., for item in d.items())
elif isinstance(value, nodes.Subscript):
if (
not isinstance(node.target, nodes.AssignName)
or node.target.name != value.value.name
or iterating_object_name != subscript.value.as_string()
):
continue
if (
isinstance(node, nodes.For)
and value.value.lookup(value.value.name)[1][-1].lineno
> node.lineno
):
# Ignore this subscript if it has been redefined after
# the for loop. This checks for the line number using .lookup()
# to get the line number where the iterating object was last
# defined and compare that to the for loop's line number
continue
# check if subscripted by 0 (key)
inferred = utils.safe_infer(value.slice)
if not isinstance(inferred, nodes.Const) or inferred.value != 0:
continue
self.add_message(
"unnecessary-dict-index-lookup",
node=subscript,
args=("1".join(value.as_string().rsplit("0", maxsplit=1)),),
)
| 1 | 20,548 | I think here should be a comma before remove, in all the cases | PyCQA-pylint | py |
@@ -12,11 +12,16 @@ class OHEMSampler(BaseSampler):
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
+ stages=0,
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
- self.bbox_roi_extractor = context.bbox_roi_extractor
- self.bbox_head = context.bbox_head
+ if not hasattr(context, "num_stages"):
+ self.bbox_roi_extractor = context.bbox_roi_extractor
+ self.bbox_head = context.bbox_head
+ else:
+ self.bbox_roi_extractor = context.bbox_roi_extractor[stages]
+ self.bbox_head = context.bbox_head[stages]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad(): | 1 | import torch
from .base_sampler import BaseSampler
from ..transforms import bbox2roi
class OHEMSampler(BaseSampler):
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
bbox_feats = self.bbox_roi_extractor(
feats[:self.bbox_roi_extractor.num_inputs], rois)
cls_score, _ = self.bbox_head(bbox_feats)
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduce=False)['loss_cls']
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
assign_result.labels[neg_inds], feats)
| 1 | 17,165 | Single quote is used by default in this project. | open-mmlab-mmdetection | py |
@@ -177,10 +177,8 @@ func (c *Controller) updateSpc(oldSpc, newSpc interface{}) {
c.recorder.Event(spc, corev1.EventTypeWarning, "Update", message)
return
}
- // Enqueue spc only when there is a pending pool to be created.
- if c.isPoolPending(spc) {
- c.enqueueSpc(newSpc)
- }
+ // Don't reconcile on spc
+ return
}
// deleteSpc is the delete event handler for spc. | 1 | /*
Copyright 2017 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spc
import (
"fmt"
"github.com/golang/glog"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned"
openebsScheme "github.com/openebs/maya/pkg/client/generated/clientset/versioned/scheme"
informers "github.com/openebs/maya/pkg/client/generated/informers/externalversions"
listers "github.com/openebs/maya/pkg/client/generated/listers/openebs.io/v1alpha1"
ndmclientset "github.com/openebs/maya/pkg/client/generated/openebs.io/ndm/v1alpha1/clientset/internalclientset"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
const controllerAgentName = "spc-controller"
// Controller is the controller implementation for SPC resources
type Controller struct {
// kubeclientset is a standard kubernetes clientset
kubeclientset kubernetes.Interface
// clientset is a openebs custom resource package generated for custom API group.
clientset clientset.Interface
// ndmclientset is a ndm custom resource package generated for custom API group.
ndmclientset ndmclientset.Interface
spcLister listers.StoragePoolClaimLister
// spcSynced is used for caches sync to get populated
spcSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
}
// ControllerBuilder is the builder object for controller.
type ControllerBuilder struct {
Controller *Controller
}
// NewControllerBuilder returns an empty instance of controller builder.
func NewControllerBuilder() *ControllerBuilder {
return &ControllerBuilder{
Controller: &Controller{},
}
}
// withKubeClient fills kube client to controller object.
func (cb *ControllerBuilder) withKubeClient(ks kubernetes.Interface) *ControllerBuilder {
cb.Controller.kubeclientset = ks
return cb
}
// withOpenEBSClient fills openebs client to controller object.
func (cb *ControllerBuilder) withOpenEBSClient(cs clientset.Interface) *ControllerBuilder {
cb.Controller.clientset = cs
return cb
}
// withNDMClient fills ndm client to controller object.
func (cb *ControllerBuilder) withNDMClient(ndmcs ndmclientset.Interface) *ControllerBuilder {
cb.Controller.ndmclientset = ndmcs
return cb
}
// withSpcLister fills spc lister to controller object.
func (cb *ControllerBuilder) withSpcLister(sl informers.SharedInformerFactory) *ControllerBuilder {
spcInformer := sl.Openebs().V1alpha1().StoragePoolClaims()
cb.Controller.spcLister = spcInformer.Lister()
return cb
}
// withspcSynced adds object sync information in cache to controller object.
func (cb *ControllerBuilder) withspcSynced(sl informers.SharedInformerFactory) *ControllerBuilder {
spcInformer := sl.Openebs().V1alpha1().StoragePoolClaims()
cb.Controller.spcSynced = spcInformer.Informer().HasSynced
return cb
}
// withWorkqueue adds workqueue to controller object.
func (cb *ControllerBuilder) withWorkqueueRateLimiting() *ControllerBuilder {
cb.Controller.workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "SPC")
return cb
}
// withRecorder adds recorder to controller object.
func (cb *ControllerBuilder) withRecorder(ks kubernetes.Interface) *ControllerBuilder {
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
cb.Controller.recorder = recorder
return cb
}
// withEventHandler adds event handlers controller object.
func (cb *ControllerBuilder) withEventHandler(spcInformerFactory informers.SharedInformerFactory) *ControllerBuilder {
spcInformer := spcInformerFactory.Openebs().V1alpha1().StoragePoolClaims()
// Set up an event handler for when SPC resources change
spcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cb.Controller.addSpc,
UpdateFunc: cb.Controller.updateSpc,
// This will enter the sync loop and no-op, because the spc has been deleted from the store.
DeleteFunc: cb.Controller.deleteSpc,
})
return cb
}
// Build returns a controller instance.
func (cb *ControllerBuilder) Build() (*Controller, error) {
err := openebsScheme.AddToScheme(scheme.Scheme)
if err != nil {
return nil, err
}
return cb.Controller, nil
}
// addSpc is the add event handler for spc
func (c *Controller) addSpc(obj interface{}) {
spc, ok := obj.(*apis.StoragePoolClaim)
if !ok {
runtime.HandleError(fmt.Errorf("Couldn't get spc object %#v", obj))
return
}
if spc.Annotations[string(apis.OpenEBSDisableReconcileKey)] == "true" {
message := fmt.Sprintf("reconcile is disabled via %q annotation", string(apis.OpenEBSDisableReconcileKey))
c.recorder.Event(spc, corev1.EventTypeWarning, "Create", message)
return
}
glog.V(4).Infof("Queuing SPC %s for add event", spc.Name)
c.enqueueSpc(spc)
}
// updateSpc is the update event handler for spc.
func (c *Controller) updateSpc(oldSpc, newSpc interface{}) {
spc, ok := newSpc.(*apis.StoragePoolClaim)
if !ok {
runtime.HandleError(fmt.Errorf("Couldn't get spc object %#v", newSpc))
return
}
if spc.Annotations[string(apis.OpenEBSDisableReconcileKey)] == "true" {
message := fmt.Sprintf("reconcile is disabled via %q annotation", string(apis.OpenEBSDisableReconcileKey))
c.recorder.Event(spc, corev1.EventTypeWarning, "Update", message)
return
}
// Enqueue spc only when there is a pending pool to be created.
if c.isPoolPending(spc) {
c.enqueueSpc(newSpc)
}
}
// deleteSpc is the delete event handler for spc.
func (c *Controller) deleteSpc(obj interface{}) {
spc, ok := obj.(*apis.StoragePoolClaim)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
runtime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
spc, ok = tombstone.Obj.(*apis.StoragePoolClaim)
if !ok {
runtime.HandleError(fmt.Errorf("Tombstone contained object that is not a storagepoolclaim %#v", obj))
return
}
}
if spc.Annotations[string(apis.OpenEBSDisableReconcileKey)] == "true" {
message := fmt.Sprintf("reconcile is disabled via %q annotation", string(apis.OpenEBSDisableReconcileKey))
c.recorder.Event(spc, corev1.EventTypeWarning, "Delete", message)
return
}
glog.V(4).Infof("Deleting storagepoolclaim %s", spc.Name)
c.enqueueSpc(spc)
}
| 1 | 16,826 | S1023: redundant `return` statement (from `gosimple`) | openebs-maya | go |
@@ -43,3 +43,6 @@ func (s *webhook) Run(ctx context.Context) error {
func (s *webhook) Notify(event model.Event) {
}
+
+func (s *webhook) Close(ctx context.Context) {
+} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notifier
import (
"context"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/model"
)
type webhook struct {
name string
config config.NotificationReceiverWebhook
logger *zap.Logger
}
func newWebhookSender(name string, cfg config.NotificationReceiverWebhook, logger *zap.Logger) *webhook {
return &webhook{
name: name,
config: cfg,
logger: logger.Named("webhook"),
}
}
func (s *webhook) Run(ctx context.Context) error {
return nil
}
func (s *webhook) Notify(event model.Event) {
}
| 1 | 8,797 | `ctx` is unused in Close | pipe-cd-pipe | go |
@@ -0,0 +1,9 @@
+<?php
+
+namespace Shopsys\ProductFeed\HeurekaBundle\Model\HeurekaCategory;
+
+use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
+
+class HeurekaCategoryNotFoundException extends NotFoundHttpException
+{
+} | 1 | 1 | 9,485 | I'm confused. This exception is thrown when `HeurekaCategory` is not found in database. But this exception extends Http exception. Why? What has database search common with http? If the only reason is that it is the same in the whole project, then fine. But then we have even bigger problem - we don't know how to use exceptions. | shopsys-shopsys | php |
|
@@ -0,0 +1,7 @@
+_base_ = './tood_r101_fpn_mstrain_2x_coco.py'
+
+model = dict(
+ backbone=dict(
+ dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
+ stage_with_dcn=(False, True, True, True)),
+ bbox_head=dict(num_dcn_on_head=2)) | 1 | 1 | 26,868 | Is it necessary to add the suffix `on_head`, because it belongs to` bbox_head`? | open-mmlab-mmdetection | py |
|
@@ -548,6 +548,14 @@ AM_COND_IF(BUILDOPT_SYSTEMD_AND_LIBMOUNT,
AC_DEFINE([BUILDOPT_LIBSYSTEMD_AND_LIBMOUNT], 1, [Define if systemd and libmount]))
if test x$with_libsystemd = xyes; then OSTREE_FEATURES="$OSTREE_FEATURES systemd"; fi
+AC_ARG_WITH(modern-grub,
+ AS_HELP_STRING([--with-modern-grub],
+ [Omit grub linux and initrd suffixes for EFI/BIOS booting on GRUB >2.02 (default: no)]),,
+ [with_modern_grub=no])
+AS_IF([ test x$with_modern_grub = xyes], [
+ AC_DEFINE([WITH_MODERN_GRUB], 1, [Define if we have a GRUB version newer than 2.02])
+])
+
AC_ARG_WITH(builtin-grub2-mkconfig,
AS_HELP_STRING([--with-builtin-grub2-mkconfig],
[Use a builtin minimal grub2-mkconfig to generate a GRUB2 configuration file (default: no)]),, | 1 | AC_PREREQ([2.63])
dnl To perform a release, follow the instructions in `docs/CONTRIBUTING.md`.
m4_define([year_version], [2020])
m4_define([release_version], [9])
m4_define([package_version], [year_version.release_version])
AC_INIT([libostree], [package_version], [[email protected]])
is_release_build=no
AC_CONFIG_HEADER([config.h])
AC_CONFIG_MACRO_DIR([buildutil])
AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE([1.13 -Wno-portability foreign no-define tar-ustar no-dist-gzip dist-xz
color-tests subdir-objects])
AM_MAINTAINER_MODE([enable])
AM_SILENT_RULES([yes])
AC_USE_SYSTEM_EXTENSIONS
AC_SYS_LARGEFILE
AC_PROG_CC
AM_PROG_CC_C_O
AC_PROG_YACC
dnl Versioning information
AC_SUBST([YEAR_VERSION], [year_version])
AC_SUBST([RELEASE_VERSION], [release_version])
AC_SUBST([PACKAGE_VERSION], [package_version])
AS_IF([echo "$CFLAGS" | grep -q -E -e '-Werror($| )'], [], [
CC_CHECK_FLAGS_APPEND([WARN_CFLAGS], [CFLAGS], [\
-pipe \
-Wall \
-Werror=shadow \
-Werror=empty-body \
-Werror=strict-prototypes \
-Werror=missing-prototypes \
-Werror=implicit-function-declaration \
"-Werror=format=2 -Werror=format-security -Werror=format-nonliteral" \
-Werror=pointer-arith -Werror=init-self \
-Werror=missing-declarations \
-Werror=return-type \
-Werror=switch \
-Werror=overflow \
-Werror=int-conversion \
-Werror=parentheses \
-Werror=undef \
-Werror=incompatible-pointer-types \
-Werror=misleading-indentation \
-Werror=missing-include-dirs -Werror=aggregate-return \
-Wstrict-aliasing=2 \
-Werror=unused-result \
])])
AC_SUBST(WARN_CFLAGS)
AC_ARG_ENABLE(sanitizers,
AS_HELP_STRING([--enable-sanitizers],
[Enable ASAN and UBSAN (default: no)]),,
[enable_sanitizers=no])
AM_CONDITIONAL(BUILDOPT_ASAN, [test x$enable_sanitizers != xno])
AM_COND_IF([BUILDOPT_ASAN], [
sanitizer_flags="-fsanitize=address -fsanitize=undefined -fsanitize-undefined-trap-on-error"
CFLAGS="$CFLAGS ${sanitizer_flags}"
CXXFLAGS="$CXXFLAGS ${sanitizer_flags}"
AC_DEFINE([BUILDOPT_ASAN], 1, [Define if we are building with asan and ubsan])
])
AC_MSG_CHECKING([for -fsanitize=thread in CFLAGS])
if echo $CFLAGS | grep -q -e -fsanitize=thread; then
AC_MSG_RESULT([yes])
using_tsan=yes
else
AC_MSG_RESULT([no])
fi
AM_CONDITIONAL(BUILDOPT_TSAN, [test x$using_tsan = xyes])
AM_COND_IF([BUILDOPT_TSAN],
[AC_DEFINE([BUILDOPT_TSAN], 1, [Define if we are building with -fsanitize=thread])])
# Initialize libtool
LT_PREREQ([2.2.4])
LT_INIT([disable-static])
OSTREE_FEATURES=""
AC_SUBST([OSTREE_FEATURES])
GLIB_TESTS
LIBGLNX_CONFIGURE
dnl These bits attempt to mirror https://github.com/coreutils/gnulib/blob/e369b04cca4da1534c98628b8ee4648bfca2bb3a/m4/parse-datetime.m4#L27
AC_CHECK_FUNCS([nanotime clock_gettime])
AC_STRUCT_TIMEZONE
AC_CHECK_HEADER([sys/xattr.h],,[AC_MSG_ERROR([You must have sys/xattr.h from glibc])])
AS_IF([test "$YACC" != "bison -y"], [AC_MSG_ERROR([bison not found but required])])
PKG_PROG_PKG_CONFIG
# PKG_CHECK_VAR added to pkg-config 0.28
m4_define_default(
[PKG_CHECK_VAR],
[AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])
AS_IF([test -z "$$1"], [$1=`$PKG_CONFIG --variable="$3" "$2"`])
AS_IF([test -n "$$1"], [$4], [$5])])
PKG_CHECK_VAR(BASH_COMPLETIONSDIR, [bash-completion], [completionsdir], ,
BASH_COMPLETIONSDIR="${datadir}/bash-completion/completions")
AC_SUBST(BASH_COMPLETIONSDIR)
AM_PATH_GLIB_2_0(,,AC_MSG_ERROR([GLib not found]))
dnl When bumping the gio-unix-2.0 dependency (or glib-2.0 in general),
dnl remember to bump GLIB_VERSION_MIN_REQUIRED and
dnl GLIB_VERSION_MAX_ALLOWED in Makefile.am
GIO_DEPENDENCY="gio-unix-2.0 >= 2.40.0"
PKG_CHECK_MODULES(OT_DEP_GIO_UNIX, $GIO_DEPENDENCY)
dnl 5.1.0 is an arbitrary version here
PKG_CHECK_MODULES(OT_DEP_LZMA, liblzma >= 5.0.5)
dnl Needed for rollsum
PKG_CHECK_MODULES(OT_DEP_ZLIB, zlib)
dnl We're not actually linking to this, just using the header
PKG_CHECK_MODULES(OT_DEP_E2P, e2p)
dnl Arbitrary version that's in CentOS7.2 now
CURL_DEPENDENCY=7.29.0
AC_ARG_WITH(curl,
AS_HELP_STRING([--with-curl], [Use libcurl @<:@default=no@:>@]),
[], [with_curl=no])
AS_IF([test x$with_curl != xno ], [
PKG_CHECK_MODULES(OT_DEP_CURL, libcurl >= $CURL_DEPENDENCY)
with_curl=yes
AC_DEFINE([HAVE_LIBCURL], 1, [Define if we have libcurl.pc])
dnl Currently using libcurl requires soup for trivial-httpd for tests
with_soup_default=yes
], [with_soup_default=check])
AM_CONDITIONAL(USE_CURL, test x$with_curl != xno)
if test x$with_curl = xyes; then OSTREE_FEATURES="$OSTREE_FEATURES libcurl"; fi
AC_ARG_ENABLE(http2,
AS_HELP_STRING([--disable-http2],
[Disable use of http2 (default: no)]),,
[enable_http2=yes])
AS_IF([test x$enable_http2 != xno ], [
AC_DEFINE([BUILDOPT_HTTP2], 1, [Define if we enable http2 by default])
], [
OSTREE_FEATURES="$OSTREE_FEATURES no-http2"
])
dnl When bumping the libsoup-2.4 dependency, remember to bump
dnl SOUP_VERSION_MIN_REQUIRED and SOUP_VERSION_MAX_ALLOWED in
dnl Makefile.am
SOUP_DEPENDENCY="libsoup-2.4 >= 2.39.1"
AC_ARG_WITH(soup,
AS_HELP_STRING([--with-soup], [Use libsoup @<:@default=yes@:>@]),
[], [with_soup=$with_soup_default])
AS_IF([test x$with_soup != xno], [
AC_ARG_ENABLE(libsoup_client_certs,
AS_HELP_STRING([--enable-libsoup-client-certs],
[Require availability of new enough libsoup TLS client cert API (default: auto)]),,
[enable_libsoup_client_certs=auto])
AC_MSG_CHECKING([for $SOUP_DEPENDENCY])
PKG_CHECK_EXISTS($SOUP_DEPENDENCY, have_soup=yes, have_soup=no)
AC_MSG_RESULT([$have_soup])
AS_IF([ test x$have_soup = xno && test x$with_soup != xcheck], [
AC_MSG_ERROR([libsoup is enabled but could not be found])
])
AS_IF([test x$have_soup = xyes], [
PKG_CHECK_MODULES(OT_DEP_SOUP, $SOUP_DEPENDENCY)
AC_DEFINE([HAVE_LIBSOUP], 1, [Define if we have libsoup.pc])
with_soup=yes
save_CFLAGS=$CFLAGS
CFLAGS=$OT_DEP_SOUP_CFLAGS
have_libsoup_client_certs=no
AC_CHECK_DECL([SOUP_SESSION_TLS_INTERACTION], [
AC_DEFINE([HAVE_LIBSOUP_CLIENT_CERTS], 1, [Define if we have libsoup client certs])
have_libsoup_client_certs=yes
], [], [#include <libsoup/soup.h>])
AS_IF([test x$enable_libsoup_client_certs = xyes && test x$have_libsoup_client_certs != xyes], [
AC_MSG_ERROR([libsoup client certs explicitly requested but not found])
])
CFLAGS=$save_CFLAGS
], [
with_soup=no
])
], [ with_soup=no ])
if test x$with_soup != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libsoup"; fi
AM_CONDITIONAL(USE_LIBSOUP, test x$with_soup != xno)
AM_CONDITIONAL(HAVE_LIBSOUP_CLIENT_CERTS, test x$have_libsoup_client_certs = xyes)
AC_ARG_ENABLE(trivial-httpd-cmdline,
[AS_HELP_STRING([--enable-trivial-httpd-cmdline],
[Continue to support "ostree trivial-httpd" [default=no]])],,
enable_trivial_httpd_cmdline=no)
AS_IF([test x$enable_trivial_httpd_cmdline = xyes],
[AC_DEFINE([BUILDOPT_ENABLE_TRIVIAL_HTTPD_CMDLINE], 1, [Define if we are enabling ostree trivial-httpd entrypoint])]
)
AS_IF([test x$with_curl = xyes && test x$with_soup = xno], [
AC_MSG_WARN([Curl enabled, but libsoup is not; libsoup is needed for tests (make check, etc.)])
])
AM_CONDITIONAL(USE_CURL_OR_SOUP, test x$with_curl != xno || test x$with_soup != xno)
AS_IF([test x$with_curl != xno || test x$with_soup != xno],
[AC_DEFINE([HAVE_LIBCURL_OR_LIBSOUP], 1, [Define if we have soup or curl])])
AS_IF([test x$with_curl = xyes], [fetcher_backend=curl], [test x$with_soup = xyes], [fetcher_backend=libsoup], [fetcher_backend=none])
m4_ifdef([GOBJECT_INTROSPECTION_CHECK], [
GOBJECT_INTROSPECTION_CHECK([1.34.0])
])
AM_CONDITIONAL(BUILDOPT_INTROSPECTION, test "x$found_introspection" = xyes)
LIBGPGME_DEPENDENCY="1.1.8"
AC_ARG_WITH(gpgme,
AS_HELP_STRING([--with-gpgme], [Use gpgme @<:@default=yes@:>@]),
[], [with_gpgme=yes])
AS_IF([test x$with_gpgme != xno], [
PKG_CHECK_MODULES(OT_DEP_GPGME, gpgme-pthread >= $LIBGPGME_DEPENDENCY, have_gpgme=yes, [
m4_ifdef([AM_PATH_GPGME_PTHREAD], [
AM_PATH_GPGME_PTHREAD($LIBGPGME_DEPENDENCY, have_gpgme=yes, have_gpgme=no)
],[ have_gpgme=no ])
])
AS_IF([ test x$have_gpgme = xno ], [
AC_MSG_ERROR([Need GPGME_PTHREAD version $LIBGPGME_DEPENDENCY or later])
])
OSTREE_FEATURES="$OSTREE_FEATURES gpgme"
PKG_CHECK_MODULES(OT_DEP_GPG_ERROR, [gpg-error], [], [
dnl This apparently doesn't ship a pkg-config file either, and we need
dnl to link to it directly.
AC_PATH_PROG(GPG_ERROR_CONFIG, [gpg-error-config], [AC_MSG_ERROR([Missing gpg-error-config])])
OT_DEP_GPG_ERROR_CFLAGS="$( $GPG_ERROR_CONFIG --cflags )"
OT_DEP_GPG_ERROR_LIBS="$( $GPG_ERROR_CONFIG --libs )"
])
OT_DEP_GPGME_CFLAGS="${OT_DEP_GPGME_CFLAGS} ${OT_DEP_GPG_ERROR_CFLAGS}"
OT_DEP_GPGME_LIBS="${OT_DEP_GPGME_LIBS} ${OT_DEP_GPG_ERROR_LIBS}"
],
[
AC_DEFINE([OSTREE_DISABLE_GPGME], 1, [Define to disable internal GPGME support])
with_gpgme=no
]
)
AM_CONDITIONAL(USE_GPGME, test "x$have_gpgme" = xyes)
LIBSODIUM_DEPENDENCY="1.0.14"
AC_ARG_WITH(ed25519_libsodium,
AS_HELP_STRING([--with-ed25519-libsodium], [Use libsodium for ed25519 @<:@default=no@:>@]),
[], [with_ed25519_libsodium=no])
AS_IF([test x$with_ed25519_libsodium != xno], [
AC_DEFINE([HAVE_LIBSODIUM], 1, [Define if using libsodium])
PKG_CHECK_MODULES(OT_DEP_LIBSODIUM, libsodium >= $LIBSODIUM_DEPENDENCY, have_libsodium=yes, have_libsodium=no)
AS_IF([ test x$have_libsodium = xno ], [
AC_MSG_ERROR([Need LIBSODIUM version $LIBSODIUM_DEPENDENCY or later])
])
OSTREE_FEATURES="$OSTREE_FEATURES sign-ed25519"
], with_ed25519_libsodium=no )
AM_CONDITIONAL(USE_LIBSODIUM, test "x$have_libsodium" = xyes)
LIBARCHIVE_DEPENDENCY="libarchive >= 2.8.0"
# What's in RHEL7.2.
FUSE_DEPENDENCY="fuse >= 2.9.2"
AC_CHECK_HEADERS([linux/fsverity.h])
AS_IF([test x$ac_cv_header_linux_fsverity_h = xyes ],
[OSTREE_FEATURES="$OSTREE_FEATURES ex-fsverity"])
# check for gtk-doc
m4_ifdef([GTK_DOC_CHECK], [
GTK_DOC_CHECK([1.15], [--flavour no-tmpl])
],[
enable_gtk_doc=no
AM_CONDITIONAL([ENABLE_GTK_DOC], false)
])
AC_ARG_ENABLE(man,
[AS_HELP_STRING([--enable-man],
[generate man pages [default=auto]])],,
enable_man=maybe)
AS_IF([test "$enable_man" != no], [
AC_PATH_PROG([XSLTPROC], [xsltproc])
AS_IF([test -z "$XSLTPROC"], [
AS_IF([test "$enable_man" = yes], [
AC_MSG_ERROR([xsltproc is required for --enable-man])
])
enable_man=no
],[
enable_man=yes
])
])
AM_CONDITIONAL(ENABLE_MAN, test "$enable_man" != no)
AC_ARG_ENABLE(rust,
[AS_HELP_STRING([--enable-rust],
[Compile Rust code instead of C [default=no]])],,
[enable_rust=no; rust_debug_release=no])
AS_IF([test "$enable_rust" = yes], [
AC_PATH_PROG([cargo], [cargo])
AS_IF([test -z "$cargo"], [AC_MSG_ERROR([cargo is required for --enable-rust])])
AC_PATH_PROG([rustc], [rustc])
AS_IF([test -z "$rustc"], [AC_MSG_ERROR([rustc is required for --enable-rust])])
dnl These bits based on gnome:librsvg/configure.ac
dnl By default, we build in public release mode.
AC_ARG_ENABLE(rust-debug,
AC_HELP_STRING([--enable-rust-debug],
[Build Rust code with debugging information [default=no]]),
[rust_debug_release=$enableval],
[rust_debug_release=release])
AC_MSG_CHECKING(whether to build Rust code with debugging information)
if test "x$rust_debug_release" = "xyes" ; then
rust_debug_release=debug
AC_MSG_RESULT(yes)
else
AC_MSG_RESULT(no)
fi
RUST_TARGET_SUBDIR=${rust_debug_release}
AC_SUBST([RUST_TARGET_SUBDIR])
])
AM_CONDITIONAL(RUST_DEBUG, [test "x$rust_debug_release" = "xdebug"])
AM_CONDITIONAL(ENABLE_RUST, [test "$enable_rust" != no])
AC_ARG_WITH(libarchive,
AS_HELP_STRING([--without-libarchive], [Do not use libarchive]),
:, with_libarchive=maybe)
AS_IF([ test x$with_libarchive != xno ], [
AC_MSG_CHECKING([for $LIBARCHIVE_DEPENDENCY])
PKG_CHECK_EXISTS($LIBARCHIVE_DEPENDENCY, have_libarchive=yes, have_libarchive=no)
AC_MSG_RESULT([$have_libarchive])
AS_IF([ test x$have_libarchive = xno && test x$with_libarchive != xmaybe ], [
AC_MSG_ERROR([libarchive is enabled but could not be found])
])
AS_IF([ test x$have_libarchive = xyes], [
AC_DEFINE([HAVE_LIBARCHIVE], 1, [Define if we have libarchive.pc])
PKG_CHECK_MODULES(OT_DEP_LIBARCHIVE, $LIBARCHIVE_DEPENDENCY)
save_LIBS=$LIBS
LIBS=$OT_DEP_LIBARCHIVE_LIBS
AC_CHECK_FUNCS(archive_read_support_filter_all)
LIBS=$save_LIBS
with_libarchive=yes
], [
with_libarchive=no
])
], [ with_libarchive=no ])
if test x$with_libarchive != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libarchive"; fi
AM_CONDITIONAL(USE_LIBARCHIVE, test $with_libarchive != no)
dnl This is what is in RHEL7 anyways
SELINUX_DEPENDENCY="libselinux >= 2.1.13"
AC_ARG_WITH(selinux,
AS_HELP_STRING([--without-selinux], [Do not use SELinux]),
:, with_selinux=maybe)
AS_IF([ test x$with_selinux != xno ], [
AC_MSG_CHECKING([for $SELINUX_DEPENDENCY])
PKG_CHECK_EXISTS($SELINUX_DEPENDENCY, have_selinux=yes, have_selinux=no)
AC_MSG_RESULT([$have_selinux])
AS_IF([ test x$have_selinux = xno && test x$with_selinux != xmaybe ], [
AC_MSG_ERROR([SELinux is enabled but could not be found])
])
AS_IF([ test x$have_selinux = xyes], [
AC_DEFINE([HAVE_SELINUX], 1, [Define if we have libselinux.pc])
PKG_CHECK_MODULES(OT_DEP_SELINUX, $SELINUX_DEPENDENCY)
with_selinux=yes
], [
with_selinux=no
])
], [ with_selinux=no ])
if test x$with_selinux != xno; then OSTREE_FEATURES="$OSTREE_FEATURES selinux"; fi
AM_CONDITIONAL(USE_SELINUX, test $with_selinux != no)
AC_ARG_WITH(smack,
AS_HELP_STRING([--with-smack], [Enable smack]),
:, with_smack=no)
AS_IF([ test x$with_smack = xyes], [
AC_DEFINE([WITH_SMACK], 1, [Define if we have smack.pc])
])
AM_CONDITIONAL(USE_SMACK, test $with_smack != no)
dnl crypto
AC_ARG_WITH(crypto,
AS_HELP_STRING([--with-crypto], [Choose library for checksums, one of glib, openssl, gnutls (default: glib)]),
:, with_crypto=glib)
AS_IF([test $with_crypto = glib],
[],
[test $with_crypto = openssl],
[with_openssl=yes],
[test $with_crypto = gnutls],
[],
[AC_MSG_ERROR([Invalid --with-crypto $with_crypto])]
)
dnl begin openssl (really just libcrypto right now)
dnl Note this option is now deprecated in favor of --with-crypto=openssl
OPENSSL_DEPENDENCY="libcrypto >= 1.0.1"
AC_ARG_WITH(openssl,
AS_HELP_STRING([--with-openssl], [Enable use of OpenSSL libcrypto (checksums)]),with_openssl=$withval,with_openssl=no)
AS_IF([ test x$with_openssl != xno ], [
PKG_CHECK_MODULES(OT_DEP_CRYPTO, $OPENSSL_DEPENDENCY)
AC_DEFINE([HAVE_OPENSSL], 1, [Define if we have openssl])
with_crypto=openssl
with_openssl=yes
], [
with_openssl=no
])
if test x$with_openssl != xno; then OSTREE_FEATURES="$OSTREE_FEATURES openssl"; fi
AM_CONDITIONAL(USE_OPENSSL, test $with_openssl != no)
dnl end openssl
dnl begin gnutls; in contrast to openssl this one only
dnl supports --with-crypto=gnutls
GNUTLS_DEPENDENCY="gnutls >= 3.5.0"
AS_IF([ test $with_crypto = gnutls ], [
PKG_CHECK_MODULES(OT_DEP_CRYPTO, $GNUTLS_DEPENDENCY)
AC_DEFINE([HAVE_GNUTLS], 1, [Define if we have gnutls])
OSTREE_FEATURES="$OSTREE_FEATURES gnutls"
])
AM_CONDITIONAL(USE_GNUTLS, test $with_crypto = gnutls)
dnl end gnutls
dnl Avahi dependency for finding repos
AVAHI_DEPENDENCY="avahi-client >= 0.6.31 avahi-glib >= 0.6.31"
AC_ARG_WITH(avahi,
AS_HELP_STRING([--without-avahi], [Do not use Avahi]),
:, with_avahi=maybe)
AS_IF([ test x$with_avahi != xno ], [
AC_MSG_CHECKING([for $AVAHI_DEPENDENCY])
PKG_CHECK_EXISTS($AVAHI_DEPENDENCY, have_avahi=yes, have_avahi=no)
AC_MSG_RESULT([$have_avahi])
AS_IF([ test x$have_avahi = xno && test x$with_avahi != xmaybe ], [
AC_MSG_ERROR([Avahi is enabled but could not be found])
])
AS_IF([ test x$have_avahi = xyes], [
AC_DEFINE([HAVE_AVAHI], 1, [Define if we have avahi-client.pc and avahi-glib.pc])
PKG_CHECK_MODULES(OT_DEP_AVAHI, $AVAHI_DEPENDENCY)
with_avahi=yes
], [
with_avahi=no
])
], [ with_avahi=no ])
if test x$with_avahi != xno; then OSTREE_FEATURES="$OSTREE_FEATURES avahi"; fi
AM_CONDITIONAL(USE_AVAHI, test $with_avahi != no)
dnl This is what is in RHEL7.2 right now, picking it arbitrarily
LIBMOUNT_DEPENDENCY="mount >= 2.23.0"
AC_ARG_WITH(libmount,
AS_HELP_STRING([--without-libmount], [Do not use libmount]),
:, with_libmount=maybe)
AS_IF([ test x$with_libmount != xno ], [
AC_MSG_CHECKING([for $LIBMOUNT_DEPENDENCY])
PKG_CHECK_EXISTS($LIBMOUNT_DEPENDENCY, have_libmount=yes, have_libmount=no)
AC_MSG_RESULT([$have_libmount])
AS_IF([ test x$have_libmount = xno && test x$with_libmount != xmaybe ], [
AC_MSG_ERROR([libmount is enabled but could not be found])
])
AS_IF([ test x$have_libmount = xyes], [
AC_DEFINE([HAVE_LIBMOUNT], 1, [Define if we have libmount.pc])
PKG_CHECK_MODULES(OT_DEP_LIBMOUNT, $LIBMOUNT_DEPENDENCY)
with_libmount=yes
save_LIBS=$LIBS
LIBS=$OT_DEP_LIBMOUNT_LIBS
AC_CHECK_FUNCS(mnt_unref_cache)
LIBS=$save_LIBS
], [
with_libmount=no
])
], [ with_libmount=no ])
if test x$with_libmount != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libmount"; fi
AM_CONDITIONAL(USE_LIBMOUNT, test $with_libmount != no)
# Enabled by default because I think people should use it.
AC_ARG_ENABLE(rofiles-fuse,
[AS_HELP_STRING([--enable-rofiles-fuse],
[generate rofiles-fuse helper [default=yes]])],,
enable_rofiles_fuse=yes)
AS_IF([ test x$enable_rofiles_fuse != xno ], [
PKG_CHECK_MODULES(BUILDOPT_FUSE, $FUSE_DEPENDENCY)
], [enable_rofiles_fuse=no])
AM_CONDITIONAL(BUILDOPT_FUSE, test x$enable_rofiles_fuse = xyes)
AC_ARG_WITH(dracut,
AS_HELP_STRING([--with-dracut],
[Install dracut module (default: no)]),,
[with_dracut=no])
case x$with_dracut in
xno) ;;
xyes) ;;
xyesbutnoconf) ;;
*) AC_MSG_ERROR([Unknown --with-dracut value $with_dracut])
esac
AM_CONDITIONAL(BUILDOPT_DRACUT, test x$with_dracut = xyes || test x$with_dracut = xyesbutnoconf)
AM_CONDITIONAL(BUILDOPT_DRACUT_CONF, test x$with_dracut = xyes)
AC_ARG_WITH(mkinitcpio,
AS_HELP_STRING([--with-mkinitcpio],
[Install mkinitcpio module (default: no)]),,
[with_mkinitcpio=no])
AM_CONDITIONAL(BUILDOPT_MKINITCPIO, test x$with_mkinitcpio = xyes)
dnl We have separate checks for libsystemd and the unit dir for historical reasons
AC_ARG_WITH(libsystemd,
AS_HELP_STRING([--without-libsystemd], [Do not use libsystemd]),
:, with_libsystemd=maybe)
AS_IF([ test x$with_libsystemd != xno ], [
AC_MSG_CHECKING([for libsystemd])
PKG_CHECK_EXISTS(libsystemd, have_libsystemd=yes, have_libsystemd=no)
AC_MSG_RESULT([$have_libsystemd])
AS_IF([ test x$have_libsystemd = xno && test x$with_libsystemd != xmaybe ], [
AC_MSG_ERROR([libsystemd is enabled but could not be found])
])
AS_IF([ test x$have_libsystemd = xyes], [
AC_DEFINE([HAVE_LIBSYSTEMD], 1, [Define if we have libsystemd.pc])
PKG_CHECK_MODULES([LIBSYSTEMD], [libsystemd])
with_libsystemd=yes
], [
with_libsystemd=no
])
], [ with_libsystemd=no ])
AS_IF([test "x$with_libsystemd" = "xyes"], [
AC_ARG_WITH([systemdsystemunitdir],
AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files]),
[],
[with_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd)])
AS_IF([test "x$with_systemdsystemunitdir" != "xno"], [
AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])
])
AC_ARG_WITH([systemdsystemgeneratordir],
AS_HELP_STRING([--with-systemdsystemgeneratordir=DIR], [Directory for systemd generators]),
[],
[with_systemdsystemgeneratordir=$($PKG_CONFIG --variable=systemdsystemgeneratordir systemd)])
AS_IF([test "x$with_systemdsystemgeneratordir" != "xno"], [
AC_SUBST([systemdsystemgeneratordir], [$with_systemdsystemgeneratordir])
])
])
AM_CONDITIONAL(BUILDOPT_SYSTEMD, test x$with_libsystemd = xyes)
dnl If we have both, we use the "new /var" model with ostree-system-generator
AM_CONDITIONAL(BUILDOPT_SYSTEMD_AND_LIBMOUNT,[test x$with_libsystemd = xyes && test x$with_libmount = xyes])
AM_COND_IF(BUILDOPT_SYSTEMD_AND_LIBMOUNT,
AC_DEFINE([BUILDOPT_LIBSYSTEMD_AND_LIBMOUNT], 1, [Define if systemd and libmount]))
if test x$with_libsystemd = xyes; then OSTREE_FEATURES="$OSTREE_FEATURES systemd"; fi
AC_ARG_WITH(builtin-grub2-mkconfig,
AS_HELP_STRING([--with-builtin-grub2-mkconfig],
[Use a builtin minimal grub2-mkconfig to generate a GRUB2 configuration file (default: no)]),,
[with_builtin_grub2_mkconfig=no])
AM_CONDITIONAL(BUILDOPT_BUILTIN_GRUB2_MKCONFIG, test x$with_builtin_grub2_mkconfig = xyes)
AM_COND_IF(BUILDOPT_BUILTIN_GRUB2_MKCONFIG,
AC_DEFINE([USE_BUILTIN_GRUB2_MKCONFIG], 1, [Define if using internal ostree-grub-generator]))
AC_ARG_WITH(grub2-mkconfig-path,
AS_HELP_STRING([--with-grub2-mkconfig-path],
[Path to grub2-mkconfig]))
AS_IF([test x$with_grub2_mkconfig_path = x], [
dnl Otherwise, look for the path to the system generator. On some
dnl distributions GRUB2 *-mkconfig executable has 'grub2' prefix and
dnl on some 'grub'. We default to grub2-mkconfig.
AC_CHECK_PROGS(GRUB2_MKCONFIG, [grub2-mkconfig grub-mkconfig], [grub2-mkconfig])
],[GRUB2_MKCONFIG=$with_grub2_mkconfig_path])
AC_DEFINE_UNQUOTED([GRUB2_MKCONFIG_PATH], ["$GRUB2_MKCONFIG"], [The system grub2-mkconfig executable name])
AC_ARG_WITH(static-compiler,
AS_HELP_STRING([--with-static-compiler],
[Use the given compiler to build ostree-prepare-root statically linked (default: no)]),,
[with_static_compiler=no])
AM_CONDITIONAL(BUILDOPT_USE_STATIC_COMPILER, test "x$with_static_compiler" != xno)
AC_SUBST(STATIC_COMPILER, $with_static_compiler)
dnl for tests (but we can't use asan with gjs or any introspection,
dnl see https://github.com/google/sanitizers/wiki/AddressSanitizerAsDso for more info)
AS_IF([test "x$found_introspection" = xyes && test x$using_asan != xyes], [
AC_PATH_PROG(GJS, [gjs])
if test -n "$GJS"; then
have_gjs=yes
else
have_gjs=no
fi
], [have_gjs=no])
AM_CONDITIONAL(BUILDOPT_GJS, test x$have_gjs = xyes)
# Do we enable building experimental (non-stable) API?
# The OSTREE_ENABLE_EXPERIMENTAL_API #define is used internally and in public
# headers, so any consumer of libostree who wants to use experimental API must
# #define OSTREE_ENABLE_EXPERIMENTAL_API 1
# before including libostree headers. This means the name in the AC_DEFINE below
# is public API.
AC_ARG_ENABLE([experimental-api],
[AS_HELP_STRING([--enable-experimental-api],
[Enable unstable experimental API in libostree [default=no]])],,
[enable_experimental_api=no])
AS_IF([test x$enable_experimental_api = xyes],
[AC_DEFINE([OSTREE_ENABLE_EXPERIMENTAL_API],[1],[Define if experimental API should be enabled])
OSTREE_FEATURES="$OSTREE_FEATURES experimental"]
)
AM_CONDITIONAL([ENABLE_EXPERIMENTAL_API],[test x$enable_experimental_api = xyes])
AM_CONDITIONAL([BUILDOPT_IS_DEVEL_BUILD],[test x$is_release_build != xyes])
AM_COND_IF([BUILDOPT_IS_DEVEL_BUILD],
AC_DEFINE([BUILDOPT_IS_DEVEL_BUILD], [1], [Define if doing a development build])
release_build_type=devel,
release_build_type=release)
OSTREE_FEATURES="$OSTREE_FEATURES $release_build_type"
# P2P API is public in OSTree >= 2018.6
OSTREE_FEATURES="$OSTREE_FEATURES p2p"
# Strip leading whitespace
OSTREE_FEATURES=$(echo ${OSTREE_FEATURES})
AC_CONFIG_FILES([
Makefile
apidoc/Makefile
src/libostree/ostree-1.pc
src/libostree/ostree-version.h
])
AC_OUTPUT
echo "
libostree $VERSION ($release_build_type)
features: $OSTREE_FEATURES
===============
introspection: $found_introspection
Rust (internal oxidation): $rust_debug_release
rofiles-fuse: $enable_rofiles_fuse
HTTP backend: $fetcher_backend
\"ostree trivial-httpd\": $enable_trivial_httpd_cmdline
SELinux: $with_selinux
fs-verity: $ac_cv_header_linux_fsverity_h
cryptographic checksums: $with_crypto
systemd: $with_libsystemd
libmount: $with_libmount
libsodium (ed25519 signatures): $with_ed25519_libsodium
libarchive (parse tar files directly): $with_libarchive
static deltas: yes (always enabled now)
O_TMPFILE: $enable_otmpfile
wrpseudo-compat: $enable_wrpseudo_compat
man pages (xsltproc): $enable_man
api docs (gtk-doc): $enable_gtk_doc
installed tests: $enable_installed_tests
gjs-based tests: $have_gjs
dracut: $with_dracut
mkinitcpio: $with_mkinitcpio
Static compiler for ostree-prepare-root: $with_static_compiler
Experimental API $enable_experimental_api"
AS_IF([test x$with_builtin_grub2_mkconfig = xyes], [
echo " builtin grub2-mkconfig (instead of system): $with_builtin_grub2_mkconfig"
], [
echo " grub2-mkconfig path: $GRUB2_MKCONFIG"
])
echo ""
| 1 | 19,147 | Hmm, I wonder if this should just be e.g. `--with-grub-2.02` instead. (Not sure Autoconf supports periods in these switches.) Today's modern GRUB is tomorrow's ancient GRUB. :) Or maybe we should be specific about the feature this is enabling, which might be safer given that each distro carries so many patches. E.g. `--without-grub-efi-16-suffixes`? | ostreedev-ostree | c |
@@ -27,13 +27,13 @@ import (
// Instance is a compute instance.
type Instance struct {
*api.Instance
- client daisyCompute.Client
+ Client daisyCompute.Client
Project, Zone string
}
// Cleanup deletes the Instance.
func (i *Instance) Cleanup() {
- if err := i.client.DeleteInstance(i.Project, i.Zone, i.Name); err != nil {
+ if err := i.Client.DeleteInstance(i.Project, i.Zone, i.Name); err != nil {
fmt.Printf("Error deleting instance: %v\n", err)
}
} | 1 | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package compute contains wrappers around the GCE compute API.
package compute
import (
"fmt"
"strings"
"time"
daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
api "google.golang.org/api/compute/v1"
)
// Instance is a compute instance.
type Instance struct {
*api.Instance
client daisyCompute.Client
Project, Zone string
}
// Cleanup deletes the Instance.
func (i *Instance) Cleanup() {
if err := i.client.DeleteInstance(i.Project, i.Zone, i.Name); err != nil {
fmt.Printf("Error deleting instance: %v\n", err)
}
}
// WaitForSerialOutput waits to a string match on a serial port.
func (i *Instance) WaitForSerialOutput(match string, port int64, interval, timeout time.Duration) error {
var start int64
var errs int
tick := time.Tick(interval)
timedout := time.Tick(timeout)
for {
select {
case <-timedout:
return fmt.Errorf("timed out waiting for %q", match)
case <-tick:
resp, err := i.client.GetSerialPortOutput(i.Project, i.Zone, i.Name, port, start)
if err != nil {
status, sErr := i.client.InstanceStatus(i.Project, i.Zone, i.Name)
if sErr != nil {
err = fmt.Errorf("%v, error getting InstanceStatus: %v", err, sErr)
} else {
err = fmt.Errorf("%v, InstanceStatus: %q", err, status)
}
// Wait until machine restarts to evaluate SerialOutput.
if status == "TERMINATED" || status == "STOPPED" || status == "STOPPING" {
continue
}
// Retry up to 3 times in a row on any error if we successfully got InstanceStatus.
if errs < 3 {
errs++
continue
}
return err
}
start = resp.Next
for _, ln := range strings.Split(resp.Contents, "\n") {
if i := strings.Index(ln, match); i != -1 {
return nil
}
}
errs = 0
}
}
}
// CreateInstance creates a compute instance.
func CreateInstance(client daisyCompute.Client, project, zone string, i *api.Instance) (*Instance, error) {
if err := client.CreateInstance(project, zone, i); err != nil {
return nil, err
}
return &Instance{Instance: i, client: client, Project: project, Zone: zone}, nil
}
// BuildInstanceMetadataItem create an metadata item
func BuildInstanceMetadataItem(key, value string) *api.MetadataItems {
return &api.MetadataItems{
Key: key,
Value: func() *string { v := value; return &v }(),
}
}
| 1 | 8,674 | Add a GetSerialPortOutput method to Instance that way you don't need to access the client, also it makes the call cleaner as you don't have the odd i.Client and path.Base(i.Project), path.Base(i.Zone) | GoogleCloudPlatform-compute-image-tools | go |
@@ -138,9 +138,11 @@ Workshops::Application.routes.draw do
get '/group-training' => "pages#show", as: :group_training, id: "group-training"
get '/humans-present/oss' => redirect('https://www.youtube.com/watch?v=VMBhumlUP-A')
get '/backbone.js' => redirect('/backbone')
- get '/backbone-js-on-rails' => redirect('/products/1-backbone-js-on-rails')
- get '/geocoding-on-rails' => redirect('/products/22-geocoding-on-rails')
+ get "/backbone-js-on-rails" => redirect("/products/1-backbone-js-on-rails"), as: :backbone_js_on_rails
+ get "/geocoding-on-rails" => redirect("/products/22-geocoding-on-rails"), as: :geocoding_on_rails
get '/geocodingonrails' => redirect('/products/22-geocoding-on-rails')
+ get "/ios-on-rails" => redirect("/products/25-ios-on-rails-beta"), as: :ios_on_rails
+ get "/ruby-science" => redirect("/products/13-ruby-science"), as: :ruby_science
get '/gettingstartedwithios' => redirect('/workshops/24-getting-started-with-ios-development?utm_source=podcast')
get '/5by5' => redirect('/workshops/19-design-for-developers?utm_source=5by5')
get '/rubyist-booster-shot' => "pages#show", as: :rubyist_booster_shot, id: "rubyist-booster-shot" | 1 | Workshops::Application.routes.draw do
use_doorkeeper
mount RailsAdmin::Engine => '/admin', :as => 'admin'
root to: 'homes#show'
get '/api/v1/me.json' => 'api/v1/users#show', as: :resource_owner
namespace :api do
namespace :v1 do
resources :completions, only: [:index, :show, :create, :destroy]
end
end
namespace :teams do
resources :invitations, only: [:create] do
resources :acceptances, only: [:new, :create]
end
resource :team, only: :edit
end
get '/pages/tmux' => redirect('https://www.youtube.com/watch?v=CKC8Ph-s2F4')
if Rails.env.staging? || Rails.env.production?
get '/products/:id' => redirect('/workshops/18-test-driven-rails'),
constraints: { id: /(10|12).*/ }
get '/products/:id' => redirect('/workshops/19-design-for-developers'),
constraints: { id: /(9|11).*/ }
get '/products/:id' => redirect('https://www.youtube.com/watch?v=CKC8Ph-s2F4'),
constraints: { id: /(4).*/ }
get '/products/14' => redirect('/prime')
get '/products/14-prime' => redirect('/prime')
end
resource :session, controller: 'sessions'
get '/courses.json' => redirect('/workshops.json')
get '/courses/:id' => redirect('/workshops/%{id}')
resources :workshops, only: [:show] do
resources :purchases, only: [:new, :create]
resources :redemptions, only: [:new]
end
resources :products, only: [:index, :show] do
resources :redemptions, only: [:new]
resources :purchases, only: [:new, :create]
end
get '/products/:id/purchases/:lookup' => redirect("/purchases/%{lookup}")
resources :books, only: :show, controller: 'products' do
resources :redemptions, only: [:new]
resources :purchases, only: [:create]
end
resources :screencasts, only: :show, controller: 'products' do
resources :redemptions, only: [:new]
resources :purchases, only: [:create]
end
resources :shows, only: :show, controller: 'products' do
resources :redemptions, only: [:new]
resources :purchases, only: [:show]
end
get '/the-weekly-iteration' => 'weekly_iterations#show', as: :weekly_iteration
get '/videos/:id' => 'episodes#show', as: :public_video
resources :purchases, only: [:show, :index] do
resources :videos, only: [:show]
member do
get 'paypal'
end
end
namespace :subscriber do
resources :books, only: [] do
resources :purchases, only: [:new, :create]
end
resources :screencasts, only: [] do
resources :purchases, only: [:new, :create]
end
resources :shows, only: [] do
resources :purchases, only: [:new, :create]
end
resources :workshops, only: [] do
resources :purchases, only: [:new, :create]
end
resources :invoices, only: [:index, :show]
resource :cancellation, only: [:new, :create]
resource :downgrade, only: :create
resource :refund, only: [:new, :create]
end
resource :subscription, only: [:new, :edit, :update]
resource :credit_card, only: [:update]
resources :individual_plans, only: [] do
resources :purchases, only: [:new, :create]
resources :stripe_redemptions, only: [:new]
end
resources :teams_team_plans, only: [] do
resources :purchases, only: [:new, :create]
resources :stripe_redemptions, only: [:new]
end
get '/podcast.xml' => redirect('http://podcasts.thoughtbot.com/giantrobots.xml')
get '/podcast' => redirect('http://podcasts.thoughtbot.com/giantrobots')
get '/podcast/articles' => 'articles#index', id: 'podcast'
get '/podcast/:id' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}")
get '/podcasts' => redirect('http://podcasts.thoughtbot.com/giantrobots')
get '/podcasts/:id' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}")
get '/giantrobots.xml' => redirect('http://podcasts.thoughtbot.com/giantrobots.xml')
get '/giantrobots' => redirect('http://podcasts.thoughtbot.com/giantrobots')
get '/giantrobots/:id.mp3' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}.mp3")
get '/giantrobots/:id' => redirect("http://podcasts.thoughtbot.com/giantrobots/%{id}")
get '/buildphase.xml' => redirect('http://podcasts.thoughtbot.com/buildphase.xml')
get '/buildphase' => redirect('http://podcasts.thoughtbot.com/buildphase')
get '/buildphase/:id.mp3' => redirect("http://podcasts.thoughtbot.com/buildphase/%{id}.mp3")
get '/buildphase/:id' => redirect("http://podcasts.thoughtbot.com/buildphase/%{id}")
resources :design_for_developers_resources, path: 'design-for-developers-resources', only: [:index, :show]
resources :test_driven_rails_resources, path: 'test-driven-rails-resources', only: [:index]
get '/d4d-resources' => redirect('/design-for-developers-resources')
resources :topics, only: :index, path: 'trails'
get '/auth/:provider/callback', to: 'auth_callbacks#create'
resource :timeline, only: :show
get "/pages/*id" => 'pages#show', format: false
get '/prime' => 'promoted_catalogs#show', as: :prime
get '/privacy' => 'pages#show', as: :privacy, id: 'privacy'
get '/terms' => 'pages#show', as: :terms, id: 'terms'
get '/directions' => "pages#show", as: :directions, id: "directions"
get '/group-training' => "pages#show", as: :group_training, id: "group-training"
get '/humans-present/oss' => redirect('https://www.youtube.com/watch?v=VMBhumlUP-A')
get '/backbone.js' => redirect('/backbone')
get '/backbone-js-on-rails' => redirect('/products/1-backbone-js-on-rails')
get '/geocoding-on-rails' => redirect('/products/22-geocoding-on-rails')
get '/geocodingonrails' => redirect('/products/22-geocoding-on-rails')
get '/gettingstartedwithios' => redirect('/workshops/24-getting-started-with-ios-development?utm_source=podcast')
get '/5by5' => redirect('/workshops/19-design-for-developers?utm_source=5by5')
get '/rubyist-booster-shot' => "pages#show", as: :rubyist_booster_shot, id: "rubyist-booster-shot"
get '/live' => redirect(OfficeHours.url)
patch '/my_account' => 'users#update', as: 'edit_my_account'
get '/my_account' => 'users#edit', as: 'my_account'
resources :users, controller: 'users' do
resources :notes, only: [:create, :edit, :update]
resource :timeline, only: :show
resource :password, :controller => 'passwords', :only => [:create, :edit, :update]
end
get '/sign_up' => 'users#new', as: 'sign_up_app'
get '/sign_in' => 'sessions#new', as: 'sign_in_app'
resources :passwords, controller: 'passwords', :only => [:create, :new]
resource :dashboard, only: :show
mount StripeEvent::Engine, at: 'stripe-webhook'
get ':id' => 'topics#show', as: :topic
get '/:id/articles' => redirect('http://robots.thoughtbot.com/tags/%{id}')
end
| 1 | 10,109 | Line is too long. [104/80] | thoughtbot-upcase | rb |
@@ -93,6 +93,8 @@ func TestReconcileClusterPool(t *testing.T) {
expectedMissingDependenciesMessage string
expectedAssignedClaims int
expectedUnassignedClaims int
+ expectedAssignedCDs int
+ expectedRunning int
expectedLabels map[string]string // Tested on all clusters, so will not work if your test has pre-existing cds in the pool.
// Map, keyed by claim name, of expected Status.Conditions['Pending'].Reason.
// (The clusterpool controller always sets this condition's Status to True.) | 1 | package clusterpool
import (
"context"
"sort"
"testing"
"time"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
hivev1 "github.com/openshift/hive/apis/hive/v1"
"github.com/openshift/hive/pkg/constants"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
testclaim "github.com/openshift/hive/pkg/test/clusterclaim"
testcd "github.com/openshift/hive/pkg/test/clusterdeployment"
testcp "github.com/openshift/hive/pkg/test/clusterpool"
"github.com/openshift/hive/pkg/test/generic"
testgeneric "github.com/openshift/hive/pkg/test/generic"
testsecret "github.com/openshift/hive/pkg/test/secret"
)
const (
testNamespace = "test-namespace"
testLeasePoolName = "aws-us-east-1"
credsSecretName = "aws-creds"
imageSetName = "test-image-set"
)
func TestReconcileClusterPool(t *testing.T) {
scheme := runtime.NewScheme()
hivev1.AddToScheme(scheme)
corev1.AddToScheme(scheme)
rbacv1.AddToScheme(scheme)
poolBuilder := testcp.FullBuilder(testNamespace, testLeasePoolName, scheme).
GenericOptions(
testgeneric.WithFinalizer(finalizer),
).
Options(
testcp.ForAWS(credsSecretName, "us-east-1"),
testcp.WithBaseDomain("test-domain"),
testcp.WithImageSet(imageSetName),
)
initializedPoolBuilder := poolBuilder.Options(testcp.WithCondition(hivev1.ClusterPoolCondition{
Status: corev1.ConditionUnknown,
Type: hivev1.ClusterPoolMissingDependenciesCondition,
}),
testcp.WithCondition(hivev1.ClusterPoolCondition{
Status: corev1.ConditionUnknown,
Type: hivev1.ClusterPoolCapacityAvailableCondition,
}),
)
cdBuilder := func(name string) testcd.Builder {
return testcd.FullBuilder(name, name, scheme).Options(
testcd.WithPowerState(hivev1.HibernatingClusterPowerState),
)
}
unclaimedCDBuilder := func(name string) testcd.Builder {
return cdBuilder(name).Options(
testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName),
)
}
nowish := time.Now()
tests := []struct {
name string
existing []runtime.Object
noClusterImageSet bool
noCredsSecret bool
expectError bool
expectedTotalClusters int
expectedObservedSize int32
expectedObservedReady int32
expectedDeletedClusters []string
expectFinalizerRemoved bool
expectedMissingDependenciesStatus corev1.ConditionStatus
expectedCapacityStatus corev1.ConditionStatus
expectedMissingDependenciesMessage string
expectedAssignedClaims int
expectedUnassignedClaims int
expectedLabels map[string]string // Tested on all clusters, so will not work if your test has pre-existing cds in the pool.
// Map, keyed by claim name, of expected Status.Conditions['Pending'].Reason.
// (The clusterpool controller always sets this condition's Status to True.)
// Not checked if nil.
expectedClaimPendingReasons map[string]string
}{
{
name: "initialize conditions",
existing: []runtime.Object{
poolBuilder.Build(testcp.WithSize(1)),
},
expectedMissingDependenciesStatus: corev1.ConditionUnknown,
expectedCapacityStatus: corev1.ConditionUnknown,
},
{
name: "create all clusters",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithClusterDeploymentLabels(map[string]string{"foo": "bar"})),
},
expectedTotalClusters: 5,
expectedObservedSize: 0,
expectedObservedReady: 0,
expectedLabels: map[string]string{"foo": "bar"},
},
{
name: "scale up",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 5,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "scale up with no more capacity",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedCapacityStatus: corev1.ConditionFalse,
},
{
name: "scale up with some capacity",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxSize(4)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedCapacityStatus: corev1.ConditionTrue,
},
{
name: "scale up with no more capacity including claimed",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxSize(3)),
cdBuilder("c1").Build(testcd.Installed(), testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test")),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 2,
expectedObservedReady: 1,
expectedCapacityStatus: corev1.ConditionFalse,
},
{
name: "scale up with some capacity including claimed",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxSize(4)),
cdBuilder("c1").Build(testcd.Installed(), testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test")),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 4,
expectedObservedSize: 2,
expectedObservedReady: 1,
expectedCapacityStatus: corev1.ConditionTrue,
},
{
name: "scale up with no more max concurrent",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxConcurrent(1)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "scale up with one more max concurrent",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "scale up with max concurrent and max size",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(6), testcp.WithMaxSize(5), testcp.WithMaxConcurrent(1)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedCapacityStatus: corev1.ConditionTrue,
},
{
name: "scale up with max concurrent and max size 2",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(6), testcp.WithMaxSize(5), testcp.WithMaxConcurrent(1)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(testcd.Installed()),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 3,
expectedCapacityStatus: corev1.ConditionTrue,
},
{
name: "scale up with max concurrent and max size 3",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(6), testcp.WithMaxSize(4), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedCapacityStatus: corev1.ConditionTrue,
},
{
name: "no scale up with max concurrent and some deleting",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").GenericOptions(generic.Deleted()).Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 2,
expectedObservedReady: 1,
},
{
name: "no scale up with max concurrent and some deleting claimed clusters",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxConcurrent(2)),
cdBuilder("c1").GenericOptions(generic.Deleted()).Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 2,
expectedObservedReady: 1,
},
{
name: "scale up with max concurrent and some deleting",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(5), testcp.WithMaxConcurrent(3)),
unclaimedCDBuilder("c1").GenericOptions(generic.Deleted()).Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 4,
expectedObservedSize: 2,
expectedObservedReady: 1,
},
{
name: "scale down",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(testcd.Installed()),
unclaimedCDBuilder("c4").Build(testcd.Installed()),
unclaimedCDBuilder("c5").Build(testcd.Installed()),
unclaimedCDBuilder("c6").Build(testcd.Installed()),
},
expectedTotalClusters: 3,
expectedObservedSize: 6,
expectedObservedReady: 6,
},
{
name: "scale down with max concurrent enough",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3), testcp.WithMaxConcurrent(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(testcd.Installed()),
unclaimedCDBuilder("c4").Build(testcd.Installed()),
unclaimedCDBuilder("c5").Build(testcd.Installed()),
unclaimedCDBuilder("c6").Build(testcd.Installed()),
},
expectedTotalClusters: 3,
expectedObservedSize: 6,
expectedObservedReady: 6,
},
{
name: "scale down with max concurrent not enough",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(testcd.Installed()),
unclaimedCDBuilder("c4").Build(testcd.Installed()),
unclaimedCDBuilder("c5").Build(testcd.Installed()),
unclaimedCDBuilder("c6").Build(testcd.Installed()),
},
expectedTotalClusters: 4,
expectedObservedSize: 6,
expectedObservedReady: 6,
},
{
name: "delete installing clusters first",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(),
},
expectedTotalClusters: 1,
expectedObservedSize: 2,
expectedObservedReady: 1,
expectedDeletedClusters: []string{"c2"},
},
{
name: "delete most recent installing clusters first",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1)),
unclaimedCDBuilder("c1").GenericOptions(
testgeneric.WithCreationTimestamp(time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC)),
).Build(),
unclaimedCDBuilder("c2").GenericOptions(
testgeneric.WithCreationTimestamp(time.Date(2020, 2, 2, 3, 4, 5, 6, time.UTC)),
).Build(),
},
expectedTotalClusters: 1,
expectedObservedSize: 2,
expectedObservedReady: 0,
expectedDeletedClusters: []string{"c2"},
},
{
name: "delete installed clusters when there are not enough installing to delete",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
unclaimedCDBuilder("c4").Build(testcd.Installed()),
unclaimedCDBuilder("c5").Build(testcd.Installed()),
unclaimedCDBuilder("c6").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 6,
expectedObservedReady: 4,
expectedDeletedClusters: []string{"c3", "c6"},
},
{
name: "clusters deleted when clusterpool deleted",
existing: []runtime.Object{
initializedPoolBuilder.GenericOptions(testgeneric.Deleted()).Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(),
unclaimedCDBuilder("c2").Build(),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 0,
expectFinalizerRemoved: true,
},
{
name: "finalizer added to clusterpool",
existing: []runtime.Object{
initializedPoolBuilder.GenericOptions(testgeneric.WithoutFinalizer(finalizer)).Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "clusters not part of pool are not counted against pool size",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").Build(),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "claimed clusters are not counted against pool size",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "clusters in different pool are not counted against pool size",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").Build(
testcd.WithClusterPoolReference(testNamespace, "other-pool", "test-claim"),
),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "deleting clusters are not counted against pool size",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").GenericOptions(testgeneric.Deleted()).Build(testcd.Installed()),
cdBuilder("c5").GenericOptions(testgeneric.Deleted()).Build(),
},
expectedTotalClusters: 5,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "missing ClusterImageSet",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1)),
},
noClusterImageSet: true,
expectError: true,
expectedMissingDependenciesStatus: corev1.ConditionTrue,
expectedMissingDependenciesMessage: `cluster image set: clusterimagesets.hive.openshift.io "test-image-set" not found`,
},
{
name: "missing creds secret",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1)),
},
noCredsSecret: true,
expectError: true,
expectedMissingDependenciesStatus: corev1.ConditionTrue,
expectedMissingDependenciesMessage: `credentials secret: secrets "aws-creds" not found`,
},
{
name: "missing ClusterImageSet",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1)),
},
noClusterImageSet: true,
expectError: true,
expectedMissingDependenciesStatus: corev1.ConditionTrue,
expectedMissingDependenciesMessage: `cluster image set: clusterimagesets.hive.openshift.io "test-image-set" not found`,
},
{
name: "multiple missing dependents",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1)),
},
noClusterImageSet: true,
noCredsSecret: true,
expectError: true,
expectedMissingDependenciesStatus: corev1.ConditionTrue,
expectedMissingDependenciesMessage: `[cluster image set: clusterimagesets.hive.openshift.io "test-image-set" not found, credentials secret: secrets "aws-creds" not found]`,
},
{
name: "missing dependents resolved",
existing: []runtime.Object{
initializedPoolBuilder.Build(
testcp.WithSize(1),
testcp.WithCondition(hivev1.ClusterPoolCondition{
Type: hivev1.ClusterPoolMissingDependenciesCondition,
Status: corev1.ConditionTrue,
}),
),
},
expectedMissingDependenciesStatus: corev1.ConditionFalse,
expectedMissingDependenciesMessage: "Dependencies verified",
expectedTotalClusters: 1,
expectedObservedSize: 0,
expectedObservedReady: 0,
},
{
name: "max size should include the deleting unclaimed clusters",
existing: []runtime.Object{
initializedPoolBuilder.Build(
testcp.WithSize(2),
testcp.WithMaxSize(3),
testcp.WithCondition(hivev1.ClusterPoolCondition{
Type: hivev1.ClusterPoolCapacityAvailableCondition,
Status: corev1.ConditionFalse,
}),
),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").GenericOptions(generic.Deleted()).Build(testcd.Installed()),
},
expectedTotalClusters: 3,
expectedObservedSize: 2,
expectedObservedReady: 2,
expectedCapacityStatus: corev1.ConditionFalse,
},
{
name: "max capacity resolved",
existing: []runtime.Object{
initializedPoolBuilder.Build(
testcp.WithSize(2),
testcp.WithMaxSize(3),
testcp.WithCondition(hivev1.ClusterPoolCondition{
Type: hivev1.ClusterPoolCapacityAvailableCondition,
Status: corev1.ConditionFalse,
}),
),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
},
expectedTotalClusters: 2,
expectedObservedSize: 2,
expectedObservedReady: 2,
expectedCapacityStatus: corev1.ConditionTrue,
},
{
name: "with pull secret",
existing: []runtime.Object{
initializedPoolBuilder.Build(
testcp.WithSize(1),
testcp.WithPullSecret("test-pull-secret"),
),
testsecret.FullBuilder(testNamespace, "test-pull-secret", scheme).
Build(testsecret.WithDataKeyValue(".dockerconfigjson", []byte("test docker config data"))),
},
expectedTotalClusters: 1,
expectedObservedSize: 0,
expectedObservedReady: 0,
},
{
name: "missing pull secret",
existing: []runtime.Object{
initializedPoolBuilder.Build(
testcp.WithSize(1),
testcp.WithPullSecret("test-pull-secret"),
),
},
expectError: true,
expectedMissingDependenciesStatus: corev1.ConditionTrue,
expectedMissingDependenciesMessage: `pull secret: secrets "test-pull-secret" not found`,
},
{
name: "pull secret missing docker config",
existing: []runtime.Object{
initializedPoolBuilder.Build(
testcp.WithSize(1),
testcp.WithPullSecret("test-pull-secret"),
),
testsecret.FullBuilder(testNamespace, "test-pull-secret", scheme).Build(),
},
expectError: true,
expectedMissingDependenciesStatus: corev1.ConditionTrue,
expectedMissingDependenciesMessage: `pull secret: pull secret does not contain .dockerconfigjson data`,
},
{
name: "assign to claim",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
testclaim.FullBuilder(testNamespace, "test-claim", scheme).Build(testclaim.WithPool(testLeasePoolName)),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedAssignedClaims: 1,
expectedUnassignedClaims: 0,
expectedClaimPendingReasons: map[string]string{"test-claim": "ClusterAssigned"},
},
{
name: "no ready clusters to assign to claim",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(),
unclaimedCDBuilder("c2").Build(),
unclaimedCDBuilder("c3").Build(),
testclaim.FullBuilder(testNamespace, "test-claim", scheme).Build(testclaim.WithPool(testLeasePoolName)),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 0,
expectedAssignedClaims: 0,
expectedUnassignedClaims: 1,
expectedClaimPendingReasons: map[string]string{"test-claim": "NoClusters"},
},
{
name: "assign to multiple claims",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
// Claims are assigned in FIFO order by creationTimestamp
testclaim.FullBuilder(testNamespace, "test-claim-1", scheme).Build(
testclaim.WithPool(testLeasePoolName),
testclaim.Generic(testgeneric.WithCreationTimestamp(nowish.Add(-time.Second*2))),
),
testclaim.FullBuilder(testNamespace, "test-claim-2", scheme).Build(
testclaim.WithPool(testLeasePoolName),
testclaim.Generic(testgeneric.WithCreationTimestamp(nowish.Add(-time.Second))),
),
testclaim.FullBuilder(testNamespace, "test-claim-3", scheme).Build(
testclaim.WithPool(testLeasePoolName),
testclaim.Generic(testgeneric.WithCreationTimestamp(nowish)),
),
},
expectedTotalClusters: 6,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedAssignedClaims: 2,
expectedUnassignedClaims: 1,
expectedClaimPendingReasons: map[string]string{
"test-claim-1": "ClusterAssigned",
"test-claim-2": "ClusterAssigned",
"test-claim-3": "NoClusters",
},
},
{
name: "do not assign to claims for other pools",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
testclaim.FullBuilder(testNamespace, "test-claim", scheme).Build(
testclaim.WithPool("other-pool"),
testclaim.WithCondition(hivev1.ClusterClaimCondition{
Type: hivev1.ClusterClaimPendingCondition,
Status: corev1.ConditionFalse,
Reason: "ThisShouldNotChange",
Message: "Claim ignored because not in the pool",
}),
),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedAssignedClaims: 0,
expectedUnassignedClaims: 1,
expectedClaimPendingReasons: map[string]string{"test-claim": "ThisShouldNotChange"},
},
{
name: "do not assign to claims in other namespaces",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
testclaim.FullBuilder("other-namespace", "test-claim", scheme).Build(
testclaim.WithPool(testLeasePoolName),
testclaim.WithCondition(hivev1.ClusterClaimCondition{
Type: hivev1.ClusterClaimPendingCondition,
Status: corev1.ConditionFalse,
Reason: "ThisShouldNotChange",
Message: "Claim ignored because not in the namespace",
}),
),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedAssignedClaims: 0,
expectedUnassignedClaims: 1,
expectedClaimPendingReasons: map[string]string{"test-claim": "ThisShouldNotChange"},
},
{
name: "do not delete previously claimed clusters",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "do not delete previously claimed clusters 2",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.Deleted(), testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
},
{
name: "delete previously claimed clusters",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedDeletedClusters: []string{"c4"},
},
{
name: "deleting previously claimed clusters should use max concurrent",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 1,
},
{
name: "deleting previously claimed clusters should use max concurrent 2",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").GenericOptions(testgeneric.Deleted()).Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 4,
expectedObservedSize: 2,
expectedObservedReady: 1,
},
{
name: "deleting previously claimed clusters should use max concurrent 3",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(3), testcp.WithMaxConcurrent(3)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").GenericOptions(testgeneric.Deleted()).Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 3,
expectedObservedSize: 2,
expectedObservedReady: 1,
expectedDeletedClusters: []string{"c4"},
},
{
name: "scale up should include previouly deleted in max concurrent",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(4), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 2,
expectedObservedSize: 2,
expectedObservedReady: 1,
expectedDeletedClusters: []string{"c4"},
},
{
name: "scale up should include previouly deleted in max concurrent all used by deleting previously claimed",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(4), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(testcd.Installed()),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
cdBuilder("c5").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 2,
expectedObservedSize: 2,
expectedObservedReady: 2,
expectedDeletedClusters: []string{"c4", "c5"},
},
{
name: "scale up should include previouly deleted in max concurrent all used by installing one cluster and deleting one previously claimed cluster",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(4), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
cdBuilder("c5").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 3,
expectedObservedSize: 2,
expectedObservedReady: 1,
expectedDeletedClusters: []string{"c4"},
},
{
name: "scale down should include previouly deleted in max concurrent",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedDeletedClusters: []string{"c4"},
},
{
name: "scale down should include previouly deleted in max concurrent all used by deleting previously claimed clusters",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(testcd.Installed()),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
cdBuilder("c5").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 3,
expectedObservedSize: 3,
expectedObservedReady: 3,
expectedDeletedClusters: []string{"c4", "c5"},
},
{
name: "scale down should include previouly deleted in max concurrent all used by one installing cluster and deleting one previously claimed cluster",
existing: []runtime.Object{
initializedPoolBuilder.Build(testcp.WithSize(1), testcp.WithMaxConcurrent(2)),
unclaimedCDBuilder("c1").Build(testcd.Installed()),
unclaimedCDBuilder("c2").Build(testcd.Installed()),
unclaimedCDBuilder("c3").Build(),
cdBuilder("c4").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
cdBuilder("c5").
GenericOptions(testgeneric.WithAnnotation(constants.ClusterClaimRemoveClusterAnnotation, "true")).
Build(
testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "test-claim"),
),
},
expectedTotalClusters: 4,
expectedObservedSize: 3,
expectedObservedReady: 2,
expectedDeletedClusters: []string{"c4"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if !test.noClusterImageSet {
test.existing = append(
test.existing,
&hivev1.ClusterImageSet{
ObjectMeta: metav1.ObjectMeta{Name: imageSetName},
Spec: hivev1.ClusterImageSetSpec{ReleaseImage: "test-release-image"},
},
)
}
if !test.noCredsSecret {
test.existing = append(
test.existing, testsecret.FullBuilder(testNamespace, credsSecretName, scheme).
Build(testsecret.WithDataKeyValue("dummykey", []byte("dummyval"))),
)
}
fakeClient := fake.NewFakeClientWithScheme(scheme, test.existing...)
logger := log.New()
logger.SetLevel(log.DebugLevel)
controllerExpectations := controllerutils.NewExpectations(logger)
rcp := &ReconcileClusterPool{
Client: fakeClient,
logger: logger,
expectations: controllerExpectations,
}
reconcileRequest := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: testLeasePoolName,
Namespace: testNamespace,
},
}
_, err := rcp.Reconcile(context.TODO(), reconcileRequest)
if test.expectError {
assert.Error(t, err, "expected error from reconcile")
} else {
assert.NoError(t, err, "expected no error from reconcile")
}
cds := &hivev1.ClusterDeploymentList{}
err = fakeClient.List(context.Background(), cds)
require.NoError(t, err)
assert.Len(t, cds.Items, test.expectedTotalClusters, "unexpected number of total clusters")
for _, expectedDeletedName := range test.expectedDeletedClusters {
for _, cd := range cds.Items {
assert.NotEqual(t, expectedDeletedName, cd.Name, "expected cluster to have been deleted")
}
}
for _, cd := range cds.Items {
assert.Equal(t, hivev1.HibernatingClusterPowerState, cd.Spec.PowerState, "expected cluster to be hibernating")
if test.expectedLabels != nil {
for k, v := range test.expectedLabels {
assert.Equal(t, v, cd.Labels[k])
}
}
}
pool := &hivev1.ClusterPool{}
err = fakeClient.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeasePoolName}, pool)
if test.expectFinalizerRemoved {
assert.True(t, apierrors.IsNotFound(err), "expected pool to be deleted")
} else {
assert.NoError(t, err, "unexpected error getting clusterpool")
assert.Contains(t, pool.Finalizers, finalizer, "expect finalizer on clusterpool")
assert.Equal(t, test.expectedObservedSize, pool.Status.Size, "unexpected observed size")
assert.Equal(t, test.expectedObservedReady, pool.Status.Ready, "unexpected observed ready count")
}
if test.expectedMissingDependenciesStatus != "" {
missingDependenciesCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolMissingDependenciesCondition)
if assert.NotNil(t, missingDependenciesCondition, "did not find MissingDependencies condition") {
assert.Equal(t, test.expectedMissingDependenciesStatus, missingDependenciesCondition.Status,
"unexpected MissingDependencies conditon status")
if test.expectedMissingDependenciesMessage != "" {
assert.Equal(t, test.expectedMissingDependenciesMessage, missingDependenciesCondition.Message,
"unexpected MissingDependencies conditon message")
}
}
}
if test.expectedCapacityStatus != "" {
capacityAvailableCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolCapacityAvailableCondition)
if assert.NotNil(t, capacityAvailableCondition, "did not find CapacityAvailable condition") {
assert.Equal(t, test.expectedCapacityStatus, capacityAvailableCondition.Status,
"unexpected CapacityAvailable conditon status")
}
}
claims := &hivev1.ClusterClaimList{}
err = fakeClient.List(context.Background(), claims)
require.NoError(t, err)
actualAssignedClaims := 0
actualUnassignedClaims := 0
for _, claim := range claims.Items {
if test.expectedClaimPendingReasons != nil {
if reason, ok := test.expectedClaimPendingReasons[claim.Name]; ok {
actualCond := controllerutils.FindClusterClaimCondition(claim.Status.Conditions, hivev1.ClusterClaimPendingCondition)
if assert.NotNil(t, actualCond, "did not find Pending condition on claim %s", claim.Name) {
assert.Equal(t, reason, actualCond.Reason, "wrong reason on Pending condition for claim %s", claim.Name)
}
}
}
if claim.Spec.Namespace == "" {
actualUnassignedClaims++
} else {
actualAssignedClaims++
}
}
assert.Equal(t, test.expectedAssignedClaims, actualAssignedClaims, "unexpected number of assigned claims")
assert.Equal(t, test.expectedUnassignedClaims, actualUnassignedClaims, "unexpected number of unassigned claims")
})
}
}
func TestReconcileRBAC(t *testing.T) {
scheme := runtime.NewScheme()
hivev1.AddToScheme(scheme)
corev1.AddToScheme(scheme)
rbacv1.AddToScheme(scheme)
tests := []struct {
name string
existing []runtime.Object
expectedBindings []rbacv1.RoleBinding
expectedErr string
}{{
name: "no binding referring to cluster role 1",
existing: []runtime.Object{},
}, {
name: "no binding referring to cluster role 2",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "admin"},
},
},
}, {
name: "binding referring to cluster role but no namespace for pool 1",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "binding referring to cluster role but no namespace for pool 2",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "some-namespace-1",
},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "binding referring to cluster role but no namespace for pool 3",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "some-namespace-1",
},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "some-namespace-2",
Labels: map[string]string{constants.ClusterPoolNameLabel: "some-other-pool"},
},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "binding referring to cluster role with one namespace for pool 4",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "binding referring to cluster role with multiple namespace for pool",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-2",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-2",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "multiple binding referring to cluster role with one namespace for pool",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "multiple binding referring to cluster role with multiple namespace for pool",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-2",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-2",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "pre existing role binding that is same",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "pre existing role bindings that are same",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "pre existing role binding that are different 1",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "pre existing role binding that are different 2",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "pre existing role bindings that are different 3",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "pre existing role bindings that are different 4",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}, {Kind: "Group", Name: "test-admin-group-another"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}, {
name: "pre existing role bindings that are different 5",
existing: []runtime.Object{
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-1",
Labels: map[string]string{constants.ClusterPoolNameLabel: testLeasePoolName},
},
},
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-1"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
expectedBindings: []rbacv1.RoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-cluster-1",
Name: "hive-cluster-pool-admin-binding",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}, {Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test",
},
Subjects: []rbacv1.Subject{{Kind: "User", Name: "test-admin-another"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: "test-2",
},
Subjects: []rbacv1.Subject{{Kind: "Group", Name: "test-admin-group-1"}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "hive-cluster-pool-admin"},
},
},
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClient := fake.NewFakeClientWithScheme(scheme, test.existing...)
logger := log.New()
logger.SetLevel(log.DebugLevel)
controllerExpectations := controllerutils.NewExpectations(logger)
rcp := &ReconcileClusterPool{
Client: fakeClient,
logger: logger,
expectations: controllerExpectations,
}
err := rcp.reconcileRBAC(&hivev1.ClusterPool{
ObjectMeta: metav1.ObjectMeta{
Name: testLeasePoolName,
Namespace: testNamespace,
},
}, logger)
if test.expectedErr == "" {
require.NoError(t, err)
rbs := &rbacv1.RoleBindingList{}
err = fakeClient.List(context.Background(), rbs)
require.NoError(t, err)
sort.Slice(rbs.Items, func(i, j int) bool {
return rbs.Items[i].Namespace < rbs.Items[j].Namespace && rbs.Items[i].Name < rbs.Items[j].Name
})
for idx := range rbs.Items {
rbs.Items[idx].TypeMeta = metav1.TypeMeta{}
rbs.Items[idx].ResourceVersion = ""
}
assert.Equal(t, test.expectedBindings, rbs.Items)
} else {
require.Regexp(t, err, test.expectedErr)
}
})
}
}
| 1 | 18,860 | Since CD updates (assignment & power state) are now done in this controller... | openshift-hive | go |
@@ -13,10 +13,18 @@ const MongooseError = require('./mongooseError');
* @api private
*/
-function ParallelValidateError(doc) {
+function ParallelValidateError(doc, opts) {
const msg = 'Can\'t validate() the same doc multiple times in parallel. Document: ';
MongooseError.call(this, msg + doc._id);
this.name = 'ParallelValidateError';
+ if (opts && opts.parentStack) {
+ // Provide a full async stack, most recent first
+ this.stack = this.stack + '\n\n' + opts.parentStack.join('\n\n');
+ }
+ // You need to know to look for this, but having it can be very helpful
+ // for tracking down issues when combined with the deepStackTrace schema
+ // option
+ this.conflictStack = opts && opts.conflictStack;
}
/*! | 1 | 'use strict';
/*!
* Module dependencies.
*/
const MongooseError = require('./mongooseError');
/**
* ParallelValidate Error constructor.
*
* @inherits MongooseError
* @api private
*/
function ParallelValidateError(doc) {
const msg = 'Can\'t validate() the same doc multiple times in parallel. Document: ';
MongooseError.call(this, msg + doc._id);
this.name = 'ParallelValidateError';
}
/*!
* Inherits from MongooseError.
*/
ParallelValidateError.prototype = Object.create(MongooseError.prototype);
ParallelValidateError.prototype.constructor = MongooseError;
/*!
* exports
*/
module.exports = ParallelValidateError; | 1 | 14,130 | Hmm I'd rather not support this option going forward - it seems like a one-off just to work around this particular issue. Would it be fine to just remove the `deepStackTrace` option? The rest of the PR looks great - I love the idea of switching to sets. | Automattic-mongoose | js |
@@ -0,0 +1,17 @@
+class CommentCreator
+ def initialize(parsed_email)
+ @parsed_email = parsed_email
+ end
+
+ def run
+ Comment.create(
+ comment_text: parsed_email.comment_text,
+ user: parsed_email.comment_user,
+ proposal: parsed_email.proposal
+ )
+ end
+
+ private
+
+ attr_reader :parsed_email
+end | 1 | 1 | 14,971 | on the one hand I like how little this is doing. On the other hand, `inbound_mail_parser` is doing most of the work here so maybe it's not as helpful as I originally thought | 18F-C2 | rb |
|
@@ -238,7 +238,7 @@ TEST_F(BlackBoxPersistence, AsyncRTPSAsReliableWithPersistence)
ASSERT_TRUE(reader.isInitialized());
- writer.make_persistent(db_file_name(), guid_prefix()).asynchronously(eprosima::fastrtps::rtps::RTPSWriterPublishMode::ASYNCHRONOUS_WRITER).init();
+ writer.make_persistent(db_file_name(), guid_prefix()).history_depth(10).asynchronously(eprosima::fastrtps::rtps::RTPSWriterPublishMode::ASYNCHRONOUS_WRITER).init();
ASSERT_TRUE(writer.isInitialized());
| 1 | // Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "BlackboxTests.hpp"
#include "RTPSAsSocketReader.hpp"
#include "RTPSAsSocketWriter.hpp"
#include "RTPSWithRegistrationReader.hpp"
#include "RTPSWithRegistrationWriter.hpp"
#include <thread>
#include <thread>
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
class BlackBoxPersistence : public ::testing::Test
{
public:
const std::string& db_file_name() const
{
return db_file_name_;
}
const eprosima::fastrtps::rtps::GuidPrefix_t& guid_prefix() const
{
return guid_prefix_;
}
std::list<HelloWorld> not_received_data;
void run_one_send_recv_test(RTPSWithRegistrationReader<HelloWorldType>& reader, RTPSWithRegistrationWriter<HelloWorldType>& writer, uint32_t seq_check = 0, bool reliable = false)
{
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
std::cout << "Discovery finished." << std::endl;
auto data = default_helloworld_data_generator();
size_t n_samples = data.size();
not_received_data.insert(not_received_data.end(), data.begin(), data.end());
reader.expected_data(not_received_data);
reader.startReception();
// Send data
writer.send(data);
// In this test all data should be sent.
ASSERT_TRUE(data.empty());
// Block reader until reception finished or timeout.
if (seq_check > 0)
{
std::cout << "Reader waiting for sequence " << seq_check << "." << std::endl;
reader.block_until_seq_number_greater_or_equal({ 0,seq_check });
}
else
{
if (reliable)
{
std::cout << "Reader waiting for " << n_samples << " samples." << std::endl;
reader.block_for_all();
}
else
{
std::cout << "Reader waiting for 2 samples." << std::endl;
reader.block_for_at_least(2);
}
}
std::cout << "Last received sequence was " << reader.get_last_received_sequence_number() << std::endl;
std::cout << "Destroying reader..." << std::endl;
reader.destroy();
std::cout << "Destroying writer..." << std::endl;
writer.destroy();
data = reader.not_received_data();
print_non_received_messages(data, default_helloworld_print);
not_received_data = data;
}
protected:
std::string db_file_name_;
eprosima::fastrtps::rtps::GuidPrefix_t guid_prefix_;
virtual void SetUp()
{
// Get info about current test
auto info = ::testing::UnitTest::GetInstance()->current_test_info();
// Create DB file name from test name and PID
std::ostringstream ss;
ss << info->test_case_name() << "_" << info->name() << "_" << GET_PID() << ".db";
db_file_name_ = ss.str();
// Fill guid prefix
int32_t* p_value = (int32_t*)guid_prefix_.value;
*p_value++ = info->line();
*p_value = GET_PID();
guid_prefix_.value[8] = HAVE_SECURITY;
guid_prefix_.value[9] = 3; //PREALLOCATED_MEMORY_MODE
eprosima::fastrtps::rtps::LocatorList_t loc;
eprosima::fastrtps::rtps::IPFinder::getIP4Address(&loc);
if (loc.size() > 0)
{
guid_prefix_.value[10] = loc.begin()->address[14];
guid_prefix_.value[11] = loc.begin()->address[15];
}
else
{
guid_prefix_.value[10] = 127;
guid_prefix_.value[11] = 1;
}
}
virtual void TearDown()
{
std::remove(db_file_name_.c_str());
}
};
TEST_F(BlackBoxPersistence, RTPSAsNonReliableWithPersistence)
{
RTPSWithRegistrationReader<HelloWorldType> reader(TEST_TOPIC_NAME);
RTPSWithRegistrationWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
std::string ip("239.255.1.4");
reader.make_persistent(db_file_name(), guid_prefix()).add_to_multicast_locator_list(ip, global_port).init();
ASSERT_TRUE(reader.isInitialized());
writer.make_persistent(db_file_name(), guid_prefix()).reliability(eprosima::fastrtps::rtps::ReliabilityKind_t::BEST_EFFORT).init();
ASSERT_TRUE(writer.isInitialized());
// Discover, send and receive
run_one_send_recv_test(reader, writer, 0, false);
// Stop and start reader and writer
std::this_thread::sleep_for(std::chrono::seconds(1));
std::cout << "First round finished." << std::endl;
reader.init();
writer.init();
// Discover, send and receive
run_one_send_recv_test(reader, writer, 13, false);
reader.destroy();
writer.destroy();
std::cout << "Second round finished." << std::endl;
}
TEST_F(BlackBoxPersistence, AsyncRTPSAsNonReliableWithPersistence)
{
RTPSWithRegistrationReader<HelloWorldType> reader(TEST_TOPIC_NAME);
RTPSWithRegistrationWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
std::string ip("239.255.1.4");
reader.make_persistent(db_file_name(), guid_prefix()).add_to_multicast_locator_list(ip, global_port).init();
ASSERT_TRUE(reader.isInitialized());
writer.make_persistent(db_file_name(), guid_prefix()).reliability(eprosima::fastrtps::rtps::ReliabilityKind_t::BEST_EFFORT).
asynchronously(eprosima::fastrtps::rtps::RTPSWriterPublishMode::ASYNCHRONOUS_WRITER).init();
ASSERT_TRUE(writer.isInitialized());
// Discover, send and receive
run_one_send_recv_test(reader, writer, 0, false);
// Stop and start reader and writer
std::this_thread::sleep_for(std::chrono::seconds(1));
std::cout << "First round finished." << std::endl;
reader.init();
writer.init();
// Discover, send and receive
run_one_send_recv_test(reader, writer, 13, false);
std::cout << "Second round finished." << std::endl;
}
TEST_F(BlackBoxPersistence, RTPSAsReliableWithPersistence)
{
RTPSWithRegistrationReader<HelloWorldType> reader(TEST_TOPIC_NAME);
RTPSWithRegistrationWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
std::string ip("239.255.1.4");
reader.make_persistent(db_file_name(), guid_prefix()).add_to_multicast_locator_list(ip, global_port).
reliability(eprosima::fastrtps::rtps::ReliabilityKind_t::RELIABLE).init();
ASSERT_TRUE(reader.isInitialized());
writer.make_persistent(db_file_name(), guid_prefix()).init();
ASSERT_TRUE(writer.isInitialized());
// Discover, send and receive
run_one_send_recv_test(reader, writer, 0, true);
// Stop and start reader and writer
std::this_thread::sleep_for(std::chrono::seconds(1));
std::cout << "First round finished." << std::endl;
reader.init();
writer.init();
// Discover, send and receive
run_one_send_recv_test(reader, writer, 20, true);
std::cout << "Second round finished." << std::endl;
}
TEST_F(BlackBoxPersistence, AsyncRTPSAsReliableWithPersistence)
{
RTPSWithRegistrationReader<HelloWorldType> reader(TEST_TOPIC_NAME);
RTPSWithRegistrationWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
std::string ip("239.255.1.4");
reader.make_persistent(db_file_name(), guid_prefix()).add_to_multicast_locator_list(ip, global_port).
reliability(eprosima::fastrtps::rtps::ReliabilityKind_t::RELIABLE).init();
ASSERT_TRUE(reader.isInitialized());
writer.make_persistent(db_file_name(), guid_prefix()).asynchronously(eprosima::fastrtps::rtps::RTPSWriterPublishMode::ASYNCHRONOUS_WRITER).init();
ASSERT_TRUE(writer.isInitialized());
// Discover, send and receive
run_one_send_recv_test(reader, writer, 0, true);
// Stop and start reader and writer
std::this_thread::sleep_for(std::chrono::seconds(1));
std::cout << "First round finished." << std::endl;
reader.init();
writer.init();
// Discover, send and receive
run_one_send_recv_test(reader, writer, 20, true);
reader.destroy();
writer.destroy();
std::cout << "Second round finished." << std::endl;
}
| 1 | 16,071 | This line is too long | eProsima-Fast-DDS | cpp |
@@ -54,7 +54,7 @@ func run(ctx context.Context) {
ticker := time.NewTicker(config.SvcPollInterval())
for {
if err := config.SetConfig(); err != nil {
- logger.Errorf(err.Error())
+ logger.Fatalf(err.Error())
}
// This sets up the patching system to run in the background. | 1 | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// osconfig_agent interacts with the osconfig api.
package main
import (
"context"
"flag"
"log"
"time"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/config"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/inventory"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/logger"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/ospackage"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/ospatch"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/tasker"
"github.com/GoogleCloudPlatform/compute-image-tools/go/packages"
"github.com/GoogleCloudPlatform/compute-image-tools/go/service"
)
var version string
func init() {
// We do this here so the -X value doesn't need the full path.
config.SetVersion(version)
}
type logWritter struct{}
func (l *logWritter) Write(b []byte) (int, error) {
logger.Debug(logger.LogEntry{CallDepth: 3, Message: string(b)})
return len(b), nil
}
func run(ctx context.Context) {
res, err := config.Instance()
if err != nil {
logger.Fatalf("get instance error: %v", err)
}
ticker := time.NewTicker(config.SvcPollInterval())
for {
if err := config.SetConfig(); err != nil {
logger.Errorf(err.Error())
}
// This sets up the patching system to run in the background.
ospatch.Configure(ctx)
if config.OSPackageEnabled() {
ospackage.Run(ctx, res)
}
if config.OSInventoryEnabled() {
// This should always run after ospackage.SetConfig.
inventory.Run()
}
select {
case <-ticker.C:
continue
case <-ctx.Done():
logger.Close()
return
}
}
}
func main() {
flag.Parse()
ctx := context.Background()
if config.Debug() {
packages.DebugLogger = log.New(&logWritter{}, "", 0)
}
proj, err := config.Project()
if err != nil {
logger.Fatalf(err.Error())
}
logger.Init(ctx, proj)
defer logger.Close()
switch action := flag.Arg(0); action {
case "":
if err := service.Register(ctx, "google_osconfig_agent", "Google OSConfig Agent", "", run, "run"); err != nil {
logger.Fatalf("service.Register error: %v", err)
}
case "noservice":
run(ctx)
return
case "inventory":
inventory.Run()
tasker.Close()
return
case "ospackage":
res, err := config.Instance()
if err != nil {
logger.Fatalf("get instance error: %v", err)
}
ospackage.Run(ctx, res)
tasker.Close()
return
case "ospatch":
ospatch.Run(ctx, make(chan struct{}))
return
default:
logger.Fatalf("Unknown arg %q", action)
}
}
| 1 | 8,550 | This isn't a fatal error, we don't want to crash just because we can't set configs, we have sane defaults set | GoogleCloudPlatform-compute-image-tools | go |
@@ -93,7 +93,7 @@ func (t *V4Trie) Get(cidr V4CIDR) interface{} {
}
func (t *V4Trie) LookupPath(buffer []V4TrieEntry, cidr V4CIDR) []V4TrieEntry {
- return t.root.lookupPath(buffer, cidr)
+ return t.root.lookupPath(buffer[:0], cidr)
}
// LPM does a longest prefix match on the trie | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ip
import (
"encoding/binary"
"log"
"math/bits"
)
type V4Trie struct {
root *V4Node
}
type V4Node struct {
cidr V4CIDR
children [2]*V4Node
data interface{}
}
func (t *V4Trie) Delete(cidr V4CIDR) {
if t.root == nil {
// Trie is empty.
return
}
if V4CommonPrefix(t.root.cidr, cidr) != t.root.cidr {
// Trie does not contain prefix.
return
}
t.root = deleteInternal(t.root, cidr)
}
func deleteInternal(n *V4Node, cidr V4CIDR) *V4Node {
if !n.cidr.ContainsV4(cidr.addr) {
// Not in trie.
return n
}
if cidr == n.cidr {
// Found the node. If either child is nil then this was just an intermediate node
// and it no longer has any data in it so we replace it by its remaining child.
if n.children[0] == nil {
// 0th child is nil, return the other child (or nil if both children were nil)
return n.children[1]
} else if n.children[1] == nil {
// oth child non-nil but 1st child is nil, return oth child.
return n.children[0]
} else {
// Intermediate node but it has two children so it is still required.
n.data = nil
return n
}
}
// If we get here, then this node is a parent of the CIDR we're looking for.
// Figure out which child to recurse on.
childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1))
oldChild := n.children[childIdx]
if oldChild == nil {
return n
}
newChild := deleteInternal(oldChild, cidr)
n.children[childIdx] = newChild
if newChild == nil {
// One of our children has been deleted completely, check if this node is an intermediate node
// that needs to be cleaned up.
if n.data == nil {
return n.children[1-childIdx]
}
}
return n
}
type V4TrieEntry struct {
CIDR V4CIDR
Data interface{}
}
func (t *V4Trie) Get(cidr V4CIDR) interface{} {
return t.root.get(cidr)
}
func (t *V4Trie) LookupPath(buffer []V4TrieEntry, cidr V4CIDR) []V4TrieEntry {
return t.root.lookupPath(buffer, cidr)
}
// LPM does a longest prefix match on the trie
func (t *V4Trie) LPM(cidr V4CIDR) (V4CIDR, interface{}) {
n := t.root
var match *V4Node
for {
if n == nil {
break
}
if !n.cidr.ContainsV4(cidr.addr) {
break
}
if n.data != nil {
match = n
}
if cidr == n.cidr {
break
}
// If we get here, then this node is a parent of the CIDR we're looking for.
// Figure out which child to recurse on.
childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1))
n = n.children[childIdx]
}
if match == nil || match.data == nil {
return V4CIDR{}, nil
}
return match.cidr, match.data
}
func (n *V4Node) lookupPath(buffer []V4TrieEntry, cidr V4CIDR) []V4TrieEntry {
if n == nil {
return nil
}
if !n.cidr.ContainsV4(cidr.addr) {
// Not in trie.
return nil
}
if n.data != nil {
buffer = append(buffer, V4TrieEntry{CIDR: n.cidr, Data: n.data})
}
if cidr == n.cidr {
if n.data == nil {
// CIDR is an intermediate node with no data so CIDR isn't actually in the trie.
return nil
}
return buffer
}
// If we get here, then this node is a parent of the CIDR we're looking for.
// Figure out which child to recurse on.
childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1))
child := n.children[childIdx]
return child.lookupPath(buffer, cidr)
}
func (n *V4Node) get(cidr V4CIDR) interface{} {
if n == nil {
return nil
}
if !n.cidr.ContainsV4(cidr.addr) {
// Not in trie.
return nil
}
if cidr == n.cidr {
if n.data == nil {
// CIDR is an intermediate node with no data so CIDR isn't actually in the trie.
return nil
}
return n.data
}
// If we get here, then this node is a parent of the CIDR we're looking for.
// Figure out which child to recurse on.
childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1))
child := n.children[childIdx]
return child.get(cidr)
}
func (t *V4Trie) CoveredBy(cidr V4CIDR) bool {
return V4CommonPrefix(t.root.cidr, cidr) == cidr
}
func (t *V4Trie) Covers(cidr V4CIDR) bool {
return t.root.covers(cidr)
}
func (n *V4Node) covers(cidr V4CIDR) bool {
if n == nil {
return false
}
if V4CommonPrefix(n.cidr, cidr) != n.cidr {
// Not in trie.
return false
}
if n.data != nil {
return true
}
// If we get here, then this node is a parent of the CIDR we're looking for.
// Figure out which child to recurse on.
childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1))
child := n.children[childIdx]
return child.covers(cidr)
}
func (t *V4Trie) Intersects(cidr V4CIDR) bool {
return t.root.intersects(cidr)
}
func (n *V4Node) intersects(cidr V4CIDR) bool {
if n == nil {
return false
}
common := V4CommonPrefix(n.cidr, cidr)
if common == cidr {
// This node's CIDR is contained within the target CIDR so we must have
// some value that is inside the target CIDR.
return true
}
if common != n.cidr {
// The CIDRs are disjoint.
return false
}
// If we get here, then this node is a parent of the CIDR we're looking for.
// Figure out which child to recurse on.
childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1))
child := n.children[childIdx]
return child.intersects(cidr)
}
func (n *V4Node) appendTo(s []V4TrieEntry) []V4TrieEntry {
if n == nil {
return s
}
if n.data != nil {
s = append(s, V4TrieEntry{
CIDR: n.cidr,
Data: n.data,
})
}
s = n.children[0].appendTo(s)
s = n.children[1].appendTo(s)
return s
}
func (n *V4Node) visit(f func(cidr V4CIDR, data interface{}) bool) bool {
if n == nil {
return true
}
if n.data != nil {
keepGoing := f(n.cidr, n.data)
if !keepGoing {
return false
}
}
keepGoing := n.children[0].visit(f)
if !keepGoing {
return false
}
return n.children[1].visit(f)
}
func (t *V4Trie) ToSlice() []V4TrieEntry {
return t.root.appendTo(nil)
}
func (t *V4Trie) Visit(f func(cidr V4CIDR, data interface{}) bool) {
t.root.visit(f)
}
func (t *V4Trie) Update(cidr V4CIDR, value interface{}) {
if value == nil {
log.Panic("Can't store nil in a V4Trie")
}
parentsPtr := &t.root
thisNode := t.root
for {
if thisNode == nil {
// We've run off the end of the tree, create new child to hold this data.
newNode := &V4Node{
cidr: cidr,
data: value,
}
*parentsPtr = newNode
return
}
if thisNode.cidr == cidr {
// Found a node with exactly this CIDR, just update the data.
thisNode.data = value
return
}
// If we get here, there are three cases:
// - CIDR of this node contains the new CIDR, in which case we need look for matching child
// - The new CIDR contains this node, in which case we need to insert a new node as the parent of this one.
// - The two CIDRs are disjoint, in which case we need to insert a new intermediate node as the parent of
// thisNode and the new CIDR.
commonPrefix := V4CommonPrefix(cidr, thisNode.cidr)
if commonPrefix.prefix == thisNode.cidr.prefix {
// Common is this node's CIDR so this node is parent of the new CIDR. Figure out which child to recurse on.
childIdx := cidr.addr.NthBit(uint(commonPrefix.prefix + 1))
parentsPtr = &thisNode.children[childIdx]
thisNode = thisNode.children[childIdx]
continue
}
if commonPrefix.prefix == cidr.prefix {
// Common is new CIDR so this node is a child of the new CIDR. Insert new node.
newNode := &V4Node{
cidr: cidr,
data: value,
}
childIdx := thisNode.cidr.addr.NthBit(uint(commonPrefix.prefix + 1))
newNode.children[childIdx] = thisNode
*parentsPtr = newNode
return
}
// Neither CIDR contains the other. Create an internal node with this node and new CIDR as children.
newInternalNode := &V4Node{
cidr: commonPrefix,
}
childIdx := thisNode.cidr.addr.NthBit(uint(commonPrefix.prefix + 1))
newInternalNode.children[childIdx] = thisNode
newInternalNode.children[1-childIdx] = &V4Node{
cidr: cidr,
data: value,
}
*parentsPtr = newInternalNode
return
}
}
func V4CommonPrefix(a, b V4CIDR) V4CIDR {
var result V4CIDR
var maxLen uint8
if b.prefix < a.prefix {
maxLen = b.prefix
} else {
maxLen = a.prefix
}
a32 := a.addr.AsUint32()
b32 := b.addr.AsUint32()
xored := a32 ^ b32 // Has a zero bit wherever the two values are the same.
commonPrefixLen := uint8(bits.LeadingZeros32(xored))
if commonPrefixLen > maxLen {
result.prefix = maxLen
} else {
result.prefix = commonPrefixLen
}
mask := uint32(0xffffffff) << (32 - result.prefix)
commonPrefix32 := mask & a32
binary.BigEndian.PutUint32(result.addr[:], commonPrefix32)
return result
}
| 1 | 17,500 | I wondered why `buffer` was passed into `LookupPath`. What is happening here? Is `buffer[:0]` equivalent to `[]V4TrieEntry{}`, and hence `buffer` isn't needed any more? | projectcalico-felix | c |
@@ -0,0 +1,18 @@
+<table style="font-size: 15px; font-style: italic; margin: 15px; background-color: #eee; width: 520px">
+ <tr>
+ <td style="width: 50px; vertical-align: top; padding: 15px">
+ <%= link_to(
+ image_tag(
+ attachments["avatar.png"].url,
+ alt: @user_message_author,
+ title: @user_message_author
+ ),
+ user_url(@user_message_author, :host => SERVER_URL),
+ :target => "_blank"
+ ) %>
+ </td>
+ <td style="text-align: left; vertical-align: top; padding-right: 10px">
+ <%= captured %>
+ </td>
+ </tr>
+</table> | 1 | 1 | 10,315 | I don't really like the name of this file, especially the encoding of an implementation detail (the fact that it's a table) in the name. Can we just use `_message_body.html.erb` instead maybe? | openstreetmap-openstreetmap-website | rb |
|
@@ -220,6 +220,9 @@ type NetworkPolicyRule struct {
// action “nil” defaults to Allow action, which would be the case for rules created for
// K8s Network Policy.
Action *secv1alpha1.RuleAction `json:"action,omitempty" protobuf:"bytes,6,opt,name=action,casttype=github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1.RuleAction"`
+ // EnableLogging is used to indicate if agent should generate logs
+ // when rules are matched. Should be default to false.
+ EnableLogging bool `json:"enableLogging" protobuf:"varint,7,opt,name=enableLogging"`
}
// Protocol defines network protocols supported for things like container ports. | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
statsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/stats/v1alpha1"
)
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroup is the message format of antrea/pkg/controller/types.AppliedToGroup in an API response.
type AppliedToGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Pods is a list of Pods selected by this group.
Pods []GroupMemberPod `json:"pods,omitempty" protobuf:"bytes,2,rep,name=pods"`
// GroupMembers is list of resources selected by this group. This eventually will replace Pods
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,3,rep,name=groupMembers"`
}
// PodReference represents a Pod Reference.
type PodReference struct {
// The name of this pod.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this pod.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// NamedPort represents a Port with a name on Pod.
type NamedPort struct {
// Port represents the Port number.
Port int32 `json:"port,omitempty" protobuf:"varint,1,opt,name=port"`
// Name represents the associated name with this Port number.
Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
// Protocol for port. Must be UDP, TCP, or SCTP.
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol"`
}
// GroupMemberPod represents a GroupMember related to Pods.
type GroupMemberPod struct {
// Pod maintains the reference to the Pod.
Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
// IP maintains the IPAddress associated with the Pod.
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,2,opt,name=ip"`
// Ports maintain the named port mapping of this Pod.
Ports []NamedPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// ExternalEntityReference represents a ExternalEntity Reference.
type ExternalEntityReference struct {
// The name of this ExternalEntity.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this ExternalEntity.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Endpoint represents an external endpoint.
type Endpoint struct {
// IP is the IP address of the Endpoint.
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Ports is the list NamedPort of the Endpoint.
Ports []NamedPort `json:"ports,omitempty" protobuf:"bytes,2,rep,name=ports"`
}
// GroupMember represents resource member to be populated in Groups.
// This supersedes GroupMemberPod, and will eventually replace it.
type GroupMember struct {
// Pod maintains the reference to the Pod.
Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
// ExternalEntity maintains the reference to the ExternalEntity.
ExternalEntity *ExternalEntityReference `json:"externalEntity,omitempty" protobuf:"bytes,2,opt,name=externalEntity"`
// Endpoints maintains a list of EndPoints associated with this groupMember.
Endpoints []Endpoint `json:"endpoints,omitempty" protobuf:"bytes,3,rep,name=endpoints"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupPatch describes the incremental update of an AppliedToGroup.
type AppliedToGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedPods []GroupMemberPod `json:"addedPods,omitempty" protobuf:"bytes,2,rep,name=addedPods"`
RemovedPods []GroupMemberPod `json:"removedPods,omitempty" protobuf:"bytes,3,rep,name=removedPods"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,4,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,5,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupList is a list of AppliedToGroup objects.
type AppliedToGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AppliedToGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroup is the message format of antrea/pkg/controller/types.AddressGroup in an API response.
type AddressGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Pods []GroupMemberPod `json:"pods,omitempty" protobuf:"bytes,2,rep,name=pods"`
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,3,rep,name=groupMembers"`
}
// IPAddress describes a single IP address. Either an IPv4 or IPv6 address must be set.
type IPAddress []byte
// IPNet describes an IP network.
type IPNet struct {
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
PrefixLength int32 `json:"prefixLength,omitempty" protobuf:"varint,2,opt,name=prefixLength"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupPatch describes the incremental update of an AddressGroup.
type AddressGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedPods []GroupMemberPod `json:"addedPods,omitempty" protobuf:"bytes,2,rep,name=addedPods"`
RemovedPods []GroupMemberPod `json:"removedPods,omitempty" protobuf:"bytes,3,rep,name=removedPods"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,4,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,5,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupList is a list of AddressGroup objects.
type AddressGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AddressGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
type NetworkPolicyType string
const (
K8sNetworkPolicy NetworkPolicyType = "K8sNetworkPolicy"
AntreaClusterNetworkPolicy NetworkPolicyType = "AntreaClusterNetworkPolicy"
AntreaNetworkPolicy NetworkPolicyType = "AntreaNetworkPolicy"
)
type NetworkPolicyReference struct {
// Type of the NetworkPolicy.
Type NetworkPolicyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=NetworkPolicyType"`
// Namespace of the NetworkPolicy. It's empty for Antrea ClusterNetworkPolicy.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the NetworkPolicy.
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the NetworkPolicy.
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +genclient
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy is the message format of antrea/pkg/controller/types.NetworkPolicy in an API response.
type NetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Rules is a list of rules to be applied to the selected Pods.
Rules []NetworkPolicyRule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
// AppliedToGroups is a list of names of AppliedToGroups to which this policy applies.
AppliedToGroups []string `json:"appliedToGroups,omitempty" protobuf:"bytes,3,rep,name=appliedToGroups"`
// Priority represents the relative priority of this Network Policy as compared to
// other Network Policies. Priority will be unset (nil) for K8s Network Policy.
Priority *float64 `json:"priority,omitempty" protobuf:"fixed64,4,opt,name=priority"`
// TierPriority represents the priority of the Tier associated with this Network
// Policy. The TierPriority will remain nil for K8s NetworkPolicy.
TierPriority *int32 `json:"tierPriority,omitempty" protobuf:"varint,5,opt,name=tierPriority"`
// Reference to the original NetworkPolicy that the internal NetworkPolicy is created for.
SourceRef *NetworkPolicyReference `json:"sourceRef,omitempty" protobuf:"bytes,6,opt,name=sourceRef"`
}
// Direction defines traffic direction of NetworkPolicyRule.
type Direction string
const (
DirectionIn Direction = "In"
DirectionOut Direction = "Out"
)
// NetworkPolicyRule describes a particular set of traffic that is allowed.
type NetworkPolicyRule struct {
// The direction of this rule.
// If it's set to In, From must be set and To must not be set.
// If it's set to Out, To must be set and From must not be set.
Direction Direction `json:"direction,omitempty" protobuf:"bytes,1,opt,name=direction"`
// From represents sources which should be able to access the pods selected by the policy.
From NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"`
// To represents destinations which should be able to be accessed by the pods selected by the policy.
To NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,3,opt,name=to"`
// Services is a list of services which should be matched.
Services []Service `json:"services,omitempty" protobuf:"bytes,4,rep,name=services"`
// Priority defines the priority of the Rule as compared to other rules in the
// NetworkPolicy.
Priority int32 `json:"priority,omitempty" protobuf:"varint,5,opt,name=priority"`
// Action specifies the action to be applied on the rule. i.e. Allow/Drop. An empty
// action “nil” defaults to Allow action, which would be the case for rules created for
// K8s Network Policy.
Action *secv1alpha1.RuleAction `json:"action,omitempty" protobuf:"bytes,6,opt,name=action,casttype=github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1.RuleAction"`
}
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
// ProtocolSCTP is the SCTP protocol.
ProtocolSCTP Protocol = "SCTP"
)
// Service describes a port to allow traffic on.
type Service struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
// field defaults to TCP.
// +optional
Protocol *Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol"`
// The port name or number on the given protocol. If not specified, this matches all port numbers.
// +optional
Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
}
// NetworkPolicyPeer describes a peer of NetworkPolicyRules.
// It could be a list of names of AddressGroups and/or a list of IPBlock.
type NetworkPolicyPeer struct {
// A list of names of AddressGroups.
AddressGroups []string `json:"addressGroups,omitempty" protobuf:"bytes,1,rep,name=addressGroups"`
// A list of IPBlock.
IPBlocks []IPBlock `json:"ipBlocks,omitempty" protobuf:"bytes,2,rep,name=ipBlocks"`
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24"). The except entry describes CIDRs that should
// not be included within this rule.
type IPBlock struct {
// CIDR is an IPNet represents the IP Block.
CIDR IPNet `json:"cidr" protobuf:"bytes,1,name=cidr"`
// Except is a slice of IPNets that should not be included within an IP Block.
// Except values will be rejected if they are outside the CIDR range.
// +optional
Except []IPNet `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeStatsSummary contains stats produced on a Node. It's used by the antrea-agents to report stats to the antrea-controller.
type NodeStatsSummary struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The TrafficStats of K8s NetworkPolicies collected from the Node.
NetworkPolicies []NetworkPolicyStats `json:"networkPolicies,omitempty" protobuf:"bytes,2,rep,name=networkPolicies"`
// The TrafficStats of Antrea ClusterNetworkPolicies collected from the Node.
AntreaClusterNetworkPolicies []NetworkPolicyStats `json:"antreaClusterNetworkPolicies,omitempty" protobuf:"bytes,3,rep,name=antreaClusterNetworkPolicies"`
// The TrafficStats of Antrea NetworkPolicies collected from the Node.
AntreaNetworkPolicies []NetworkPolicyStats `json:"antreaNetworkPolicies,omitempty" protobuf:"bytes,4,rep,name=antreaNetworkPolicies"`
}
// NetworkPolicyStats contains the information and traffic stats of a NetworkPolicy.
type NetworkPolicyStats struct {
// The reference of the NetworkPolicy.
NetworkPolicy NetworkPolicyReference `json:"networkPolicy,omitempty" protobuf:"bytes,1,opt,name=networkPolicy"`
// The stats of the NetworkPolicy.
TrafficStats statsv1alpha1.TrafficStats `json:"trafficStats,omitempty" protobuf:"bytes,2,opt,name=trafficStats"`
}
| 1 | 24,490 | Feel better not to expose our internal implementation in API/CRD, so no need to mention agent here. Consider: EnableLogging indicates whether or not to generate logs when rules are matched. Default to false. | antrea-io-antrea | go |
@@ -124,6 +124,16 @@ RocksEngine::RocksEngine(GraphSpaceID spaceId,
status = rocksdb::DB::Open(options, path, &db);
}
CHECK(status.ok()) << status.ToString();
+ if (!readonly && spaceId_ != 0 /* only for storage*/) {
+ rocksdb::ReadOptions readOptions;
+ std::string dataVersionValue = "";
+ status = db->Get(readOptions, NebulaKeyUtils::dataVersionKey(), &dataVersionValue);
+ if (status.IsNotFound()) {
+ rocksdb::WriteOptions writeOptions;
+ status = db->Put(writeOptions, NebulaKeyUtils::dataVersionKey(), "3.0");
+ }
+ CHECK(status.ok()) << status.ToString();
+ }
db_.reset(db);
extractorLen_ = sizeof(PartitionID) + vIdLen;
partsNum_ = allParts().size(); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "kvstore/RocksEngine.h"
#include <folly/String.h>
#include <rocksdb/convenience.h>
#include "common/base/Base.h"
#include "common/fs/FileUtils.h"
#include "common/utils/NebulaKeyUtils.h"
#include "kvstore/KVStore.h"
DEFINE_bool(move_files, false, "Move the SST files instead of copy when ingest into dataset");
namespace nebula {
namespace kvstore {
using fs::FileType;
using fs::FileUtils;
namespace {
/***************************************
*
* Implementation of WriteBatch
*
**************************************/
class RocksWriteBatch : public WriteBatch {
private:
rocksdb::WriteBatch batch_;
public:
RocksWriteBatch() : batch_(FLAGS_rocksdb_batch_size) {}
virtual ~RocksWriteBatch() = default;
nebula::cpp2::ErrorCode put(folly::StringPiece key, folly::StringPiece value) override {
if (batch_.Put(toSlice(key), toSlice(value)).ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode remove(folly::StringPiece key) override {
if (batch_.Delete(toSlice(key)).ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
// Remove all keys in the range [start, end)
nebula::cpp2::ErrorCode removeRange(folly::StringPiece start, folly::StringPiece end) override {
if (batch_.DeleteRange(toSlice(start), toSlice(end)).ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
rocksdb::WriteBatch* data() {
return &batch_;
}
};
} // Anonymous namespace
/***************************************
*
* Implementation of WriteBatch
*
**************************************/
RocksEngine::RocksEngine(GraphSpaceID spaceId,
int32_t vIdLen,
const std::string& dataPath,
const std::string& walPath,
std::shared_ptr<rocksdb::MergeOperator> mergeOp,
std::shared_ptr<rocksdb::CompactionFilterFactory> cfFactory,
bool readonly)
: KVEngine(spaceId),
spaceId_(spaceId),
dataPath_(folly::stringPrintf("%s/nebula/%d", dataPath.c_str(), spaceId)) {
// set wal path as dataPath by default
if (walPath.empty()) {
walPath_ = folly::stringPrintf("%s/nebula/%d", dataPath.c_str(), spaceId);
} else {
walPath_ = folly::stringPrintf("%s/nebula/%d", walPath.c_str(), spaceId);
}
auto path = folly::stringPrintf("%s/data", dataPath_.c_str());
if (FileUtils::fileType(path.c_str()) == FileType::NOTEXIST) {
if (readonly) {
LOG(FATAL) << "Path " << path << " not exist";
} else {
if (!FileUtils::makeDir(path)) {
LOG(FATAL) << "makeDir " << path << " failed";
}
}
}
if (FileUtils::fileType(path.c_str()) != FileType::DIRECTORY) {
LOG(FATAL) << path << " is not directory";
}
openBackupEngine(spaceId);
rocksdb::Options options;
rocksdb::DB* db = nullptr;
rocksdb::Status status = initRocksdbOptions(options, spaceId, vIdLen);
CHECK(status.ok()) << status.ToString();
if (mergeOp != nullptr) {
options.merge_operator = mergeOp;
}
if (cfFactory != nullptr) {
options.compaction_filter_factory = cfFactory;
}
if (readonly) {
status = rocksdb::DB::OpenForReadOnly(options, path, &db);
} else {
status = rocksdb::DB::Open(options, path, &db);
}
CHECK(status.ok()) << status.ToString();
db_.reset(db);
extractorLen_ = sizeof(PartitionID) + vIdLen;
partsNum_ = allParts().size();
LOG(INFO) << "open rocksdb on " << path;
backup();
}
void RocksEngine::stop() {
if (db_) {
// Because we trigger compaction in WebService, we need to stop all
// background work before we stop HttpServer.
rocksdb::CancelAllBackgroundWork(db_.get(), true);
}
}
std::unique_ptr<WriteBatch> RocksEngine::startBatchWrite() {
return std::make_unique<RocksWriteBatch>();
}
nebula::cpp2::ErrorCode RocksEngine::commitBatchWrite(std::unique_ptr<WriteBatch> batch,
bool disableWAL,
bool sync,
bool wait) {
rocksdb::WriteOptions options;
options.disableWAL = disableWAL;
options.sync = sync;
options.no_slowdown = !wait;
auto* b = static_cast<RocksWriteBatch*>(batch.get());
rocksdb::Status status = db_->Write(options, b->data());
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else if (!wait && status.IsIncomplete()) {
return nebula::cpp2::ErrorCode::E_WRITE_STALLED;
}
LOG(ERROR) << "Write into rocksdb failed because of " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
nebula::cpp2::ErrorCode RocksEngine::get(const std::string& key, std::string* value) {
rocksdb::ReadOptions options;
rocksdb::Status status = db_->Get(options, rocksdb::Slice(key), value);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else if (status.IsNotFound()) {
VLOG(3) << "Get: " << key << " Not Found";
return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND;
} else {
VLOG(3) << "Get Failed: " << key << " " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
std::vector<Status> RocksEngine::multiGet(const std::vector<std::string>& keys,
std::vector<std::string>* values) {
rocksdb::ReadOptions options;
std::vector<rocksdb::Slice> slices;
for (size_t index = 0; index < keys.size(); index++) {
slices.emplace_back(keys[index]);
}
auto status = db_->MultiGet(options, slices, values);
std::vector<Status> ret;
std::transform(status.begin(), status.end(), std::back_inserter(ret), [](const auto& s) {
if (s.ok()) {
return Status::OK();
} else if (s.IsNotFound()) {
return Status::KeyNotFound();
} else {
return Status::Error();
}
});
return ret;
}
nebula::cpp2::ErrorCode RocksEngine::range(const std::string& start,
const std::string& end,
std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering;
rocksdb::Iterator* iter = db_->NewIterator(options);
if (iter) {
iter->Seek(rocksdb::Slice(start));
}
storageIter->reset(new RocksRangeIter(iter, start, end));
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
nebula::cpp2::ErrorCode RocksEngine::prefix(const std::string& prefix,
std::unique_ptr<KVIterator>* storageIter,
const void* snapshot) {
// In fact, we don't need to check prefix.size() >= extractorLen_, which is caller's duty to make
// sure the prefix bloom filter exists. But this is quite error-prone, so we do a check here.
if (FLAGS_enable_rocksdb_prefix_filtering && prefix.size() >= extractorLen_) {
return prefixWithExtractor(prefix, snapshot, storageIter);
} else {
return prefixWithoutExtractor(prefix, snapshot, storageIter);
}
}
nebula::cpp2::ErrorCode RocksEngine::prefixWithExtractor(const std::string& prefix,
const void* snapshot,
std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
if (snapshot != nullptr) {
options.snapshot = reinterpret_cast<const rocksdb::Snapshot*>(snapshot);
}
options.prefix_same_as_start = true;
rocksdb::Iterator* iter = db_->NewIterator(options);
if (iter) {
iter->Seek(rocksdb::Slice(prefix));
}
storageIter->reset(new RocksPrefixIter(iter, prefix));
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
nebula::cpp2::ErrorCode RocksEngine::prefixWithoutExtractor(
const std::string& prefix, const void* snapshot, std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
if (snapshot != nullptr) {
options.snapshot = reinterpret_cast<const rocksdb::Snapshot*>(snapshot);
}
// prefix_same_as_start is false by default
options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering;
rocksdb::Iterator* iter = db_->NewIterator(options);
if (iter) {
iter->Seek(rocksdb::Slice(prefix));
}
storageIter->reset(new RocksPrefixIter(iter, prefix));
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
nebula::cpp2::ErrorCode RocksEngine::rangeWithPrefix(const std::string& start,
const std::string& prefix,
std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
// prefix_same_as_start is false by default
options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering;
rocksdb::Iterator* iter = db_->NewIterator(options);
if (iter) {
iter->Seek(rocksdb::Slice(start));
}
storageIter->reset(new RocksPrefixIter(iter, prefix));
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
nebula::cpp2::ErrorCode RocksEngine::scan(std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
options.total_order_seek = true;
rocksdb::Iterator* iter = db_->NewIterator(options);
iter->SeekToFirst();
storageIter->reset(new RocksCommonIter(iter));
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
nebula::cpp2::ErrorCode RocksEngine::put(std::string key, std::string value) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
rocksdb::Status status = db_->Put(options, key, value);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
VLOG(3) << "Put Failed: " << key << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode RocksEngine::multiPut(std::vector<KV> keyValues) {
rocksdb::WriteBatch updates(FLAGS_rocksdb_batch_size);
for (size_t i = 0; i < keyValues.size(); i++) {
updates.Put(keyValues[i].first, keyValues[i].second);
}
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
rocksdb::Status status = db_->Write(options, &updates);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
VLOG(3) << "MultiPut Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode RocksEngine::remove(const std::string& key) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
auto status = db_->Delete(options, key);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
VLOG(3) << "Remove Failed: " << key << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode RocksEngine::multiRemove(std::vector<std::string> keys) {
rocksdb::WriteBatch deletes(FLAGS_rocksdb_batch_size);
for (size_t i = 0; i < keys.size(); i++) {
deletes.Delete(keys[i]);
}
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
rocksdb::Status status = db_->Write(options, &deletes);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
VLOG(3) << "MultiRemove Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode RocksEngine::removeRange(const std::string& start, const std::string& end) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
auto status = db_->DeleteRange(options, db_->DefaultColumnFamily(), start, end);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
VLOG(3) << "RemoveRange Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
std::string RocksEngine::partKey(PartitionID partId) {
return NebulaKeyUtils::systemPartKey(partId);
}
void RocksEngine::addPart(PartitionID partId) {
auto ret = put(partKey(partId), "");
if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) {
partsNum_++;
CHECK_GE(partsNum_, 0);
}
}
void RocksEngine::removePart(PartitionID partId) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
std::vector<std::string> sysKeysToDelete;
sysKeysToDelete.emplace_back(partKey(partId));
sysKeysToDelete.emplace_back(NebulaKeyUtils::systemCommitKey(partId));
auto code = multiRemove(sysKeysToDelete);
if (code == nebula::cpp2::ErrorCode::SUCCEEDED) {
partsNum_--;
CHECK_GE(partsNum_, 0);
}
}
std::vector<PartitionID> RocksEngine::allParts() {
std::unique_ptr<KVIterator> iter;
std::vector<PartitionID> parts;
static const std::string prefixStr = NebulaKeyUtils::systemPrefix();
auto retCode = this->prefix(prefixStr, &iter);
if (nebula::cpp2::ErrorCode::SUCCEEDED != retCode) {
return parts;
}
while (iter->valid()) {
auto key = iter->key();
CHECK_EQ(key.size(), sizeof(PartitionID) + sizeof(NebulaSystemKeyType));
PartitionID partId = *reinterpret_cast<const PartitionID*>(key.data());
if (!NebulaKeyUtils::isSystemPart(key)) {
VLOG(3) << "Skip: " << std::bitset<32>(partId);
iter->next();
continue;
}
partId = partId >> 8;
parts.emplace_back(partId);
iter->next();
}
return parts;
}
int32_t RocksEngine::totalPartsNum() {
return partsNum_;
}
nebula::cpp2::ErrorCode RocksEngine::ingest(const std::vector<std::string>& files,
bool verifyFileChecksum) {
rocksdb::IngestExternalFileOptions options;
options.move_files = FLAGS_move_files;
options.verify_file_checksum = verifyFileChecksum;
rocksdb::Status status = db_->IngestExternalFile(files, options);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << "Ingest Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode RocksEngine::setOption(const std::string& configKey,
const std::string& configValue) {
std::unordered_map<std::string, std::string> configOptions = {{configKey, configValue}};
rocksdb::Status status = db_->SetOptions(configOptions);
if (status.ok()) {
LOG(INFO) << "SetOption Succeeded: " << configKey << ":" << configValue;
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << "SetOption Failed: " << configKey << ":" << configValue;
return nebula::cpp2::ErrorCode::E_INVALID_PARM;
}
}
nebula::cpp2::ErrorCode RocksEngine::setDBOption(const std::string& configKey,
const std::string& configValue) {
std::unordered_map<std::string, std::string> configOptions = {{configKey, configValue}};
rocksdb::Status status = db_->SetDBOptions(configOptions);
if (status.ok()) {
LOG(INFO) << "SetDBOption Succeeded: " << configKey << ":" << configValue;
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << "SetDBOption Failed: " << configKey << ":" << configValue;
return nebula::cpp2::ErrorCode::E_INVALID_PARM;
}
}
ErrorOr<nebula::cpp2::ErrorCode, std::string> RocksEngine::getProperty(
const std::string& property) {
std::string value;
if (!db_->GetProperty(property, &value)) {
return nebula::cpp2::ErrorCode::E_INVALID_PARM;
} else {
return value;
}
}
nebula::cpp2::ErrorCode RocksEngine::compact() {
rocksdb::CompactRangeOptions options;
options.change_level = FLAGS_rocksdb_compact_change_level;
options.target_level = FLAGS_rocksdb_compact_target_level;
rocksdb::Status status = db_->CompactRange(options, nullptr, nullptr);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << "CompactAll Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode RocksEngine::flush() {
rocksdb::FlushOptions options;
rocksdb::Status status = db_->Flush(options);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << "Flush Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
}
nebula::cpp2::ErrorCode RocksEngine::backup() {
if (!backupDb_) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
LOG(INFO) << "begin to backup space " << spaceId_ << " on path " << backupPath_;
bool flushBeforeBackup = true;
auto status = backupDb_->CreateNewBackup(db_.get(), flushBeforeBackup);
if (status.ok()) {
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << "backup failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_BACKUP_FAILED;
}
}
void RocksEngine::openBackupEngine(GraphSpaceID spaceId) {
// If backup dir is not empty, set backup related options
if (FLAGS_rocksdb_table_format == "PlainTable" && !FLAGS_rocksdb_backup_dir.empty()) {
backupPath_ =
folly::stringPrintf("%s/rocksdb_backup/%d", FLAGS_rocksdb_backup_dir.c_str(), spaceId);
if (FileUtils::fileType(backupPath_.c_str()) == FileType::NOTEXIST) {
if (!FileUtils::makeDir(backupPath_)) {
LOG(FATAL) << "makeDir " << backupPath_ << " failed";
}
}
rocksdb::BackupEngine* backupDb;
rocksdb::BackupableDBOptions backupOptions(backupPath_);
backupOptions.backup_log_files = false;
auto status = rocksdb::BackupEngine::Open(rocksdb::Env::Default(), backupOptions, &backupDb);
CHECK(status.ok()) << status.ToString();
backupDb_.reset(backupDb);
LOG(INFO) << "open plain table backup engine on " << backupPath_;
std::string dataPath = folly::stringPrintf("%s/data", dataPath_.c_str());
auto walDir = dataPath;
if (!FLAGS_rocksdb_wal_dir.empty()) {
walDir = folly::stringPrintf("%s/rocksdb_wal/%d", FLAGS_rocksdb_wal_dir.c_str(), spaceId);
} else {
LOG(WARNING) << "rocksdb wal is stored with data";
}
rocksdb::RestoreOptions restoreOptions;
restoreOptions.keep_log_files = true;
status = backupDb_->RestoreDBFromLatestBackup(dataPath, walDir, restoreOptions);
LOG(INFO) << "try to restore from backup path " << backupPath_;
if (status.IsNotFound()) {
LOG(WARNING) << "no valid backup found";
return;
} else if (!status.ok()) {
LOG(FATAL) << status.ToString();
}
LOG(INFO) << "restore from latest backup successfully"
<< ", backup path " << backupPath_ << ", wal path " << walDir << ", data path "
<< dataPath;
}
}
nebula::cpp2::ErrorCode RocksEngine::createCheckpoint(const std::string& checkpointPath) {
LOG(INFO) << "Target checkpoint data path : " << checkpointPath;
if (fs::FileUtils::exist(checkpointPath) && !fs::FileUtils::remove(checkpointPath.data(), true)) {
LOG(ERROR) << "Remove exist checkpoint data dir failed: " << checkpointPath;
return nebula::cpp2::ErrorCode::E_STORE_FAILURE;
}
rocksdb::Checkpoint* checkpoint;
rocksdb::Status status = rocksdb::Checkpoint::Create(db_.get(), &checkpoint);
if (!status.ok()) {
LOG(ERROR) << "Init checkpoint Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_FAILED_TO_CHECKPOINT;
}
std::unique_ptr<rocksdb::Checkpoint> cp(checkpoint);
status = cp->CreateCheckpoint(checkpointPath, 0);
if (!status.ok()) {
LOG(ERROR) << "Create checkpoint Failed: " << status.ToString();
return nebula::cpp2::ErrorCode::E_FAILED_TO_CHECKPOINT;
}
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
ErrorOr<nebula::cpp2::ErrorCode, std::string> RocksEngine::backupTable(
const std::string& name,
const std::string& tablePrefix,
std::function<bool(const folly::StringPiece& key)> filter) {
auto backupPath = folly::stringPrintf(
"%s/checkpoints/%s/%s.sst", dataPath_.c_str(), name.c_str(), tablePrefix.c_str());
VLOG(3) << "Start writing the sst file with table (" << tablePrefix
<< ") to file: " << backupPath;
auto parent = backupPath.substr(0, backupPath.rfind('/'));
if (!FileUtils::exist(parent)) {
if (!FileUtils::makeDir(parent)) {
LOG(ERROR) << "Make dir " << parent << " failed";
return nebula::cpp2::ErrorCode::E_BACKUP_FAILED;
}
}
std::unique_ptr<KVIterator> iter;
auto ret = prefix(tablePrefix, &iter);
if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) {
return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE;
}
if (!iter->valid()) {
return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE;
}
rocksdb::Options options;
options.file_checksum_gen_factory = rocksdb::GetFileChecksumGenCrc32cFactory();
rocksdb::SstFileWriter sstFileWriter(rocksdb::EnvOptions(), options);
auto s = sstFileWriter.Open(backupPath);
if (!s.ok()) {
LOG(ERROR) << "BackupTable failed, path: " << backupPath << ", error: " << s.ToString();
return nebula::cpp2::ErrorCode::E_BACKUP_TABLE_FAILED;
}
for (; iter->valid(); iter->next()) {
if (filter && filter(iter->key())) {
continue;
}
s = sstFileWriter.Put(iter->key().toString(), iter->val().toString());
if (!s.ok()) {
LOG(ERROR) << "BackupTable failed, path: " << backupPath << ", error: " << s.ToString();
sstFileWriter.Finish();
return nebula::cpp2::ErrorCode::E_BACKUP_TABLE_FAILED;
}
}
s = sstFileWriter.Finish();
if (!s.ok()) {
LOG(WARNING) << "Failed to insert data when backupTable, " << backupPath
<< ", error: " << s.ToString();
return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE;
}
if (sstFileWriter.FileSize() == 0) {
return nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE;
}
if (backupPath[0] == '/') {
return backupPath;
}
auto result = FileUtils::realPath(backupPath.c_str());
if (!result.ok()) {
return nebula::cpp2::ErrorCode::E_BACKUP_TABLE_FAILED;
}
return result.value();
}
} // namespace kvstore
} // namespace nebula
| 1 | 33,444 | use `NebulaKeyUtilsV3::dataVersionValue()` to replace the literal value? | vesoft-inc-nebula | cpp |
@@ -372,10 +372,15 @@ def get_path_if_valid(pathstr, cwd=None, relative=False, check_exists=False):
path = None
if check_exists:
- if path is not None and os.path.exists(path):
- log.url.debug("URL is a local file")
- else:
- path = None
+ if path is not None:
+ try:
+ if os.path.exists(path):
+ log.url.debug("URL is a local file")
+ except UnicodeEncodeError:
+ log.url.debug(
+ "URL contains characters which are not present in the " \
+ "current locale")
+ path = None
return path
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utils regarding URL handling."""
import re
import base64
import os.path
import ipaddress
import posixpath
import urllib.parse
from PyQt5.QtCore import QUrl
from PyQt5.QtNetwork import QHostInfo, QHostAddress, QNetworkProxy
from qutebrowser.config import config
from qutebrowser.utils import log, qtutils, message, utils
from qutebrowser.commands import cmdexc
from qutebrowser.browser.network import pac
# FIXME: we probably could raise some exceptions on invalid URLs
# https://github.com/qutebrowser/qutebrowser/issues/108
class InvalidUrlError(ValueError):
"""Error raised if a function got an invalid URL.
Inherits ValueError because that was the exception originally used for
that, so there still might be some code around which checks for that.
"""
def __init__(self, url):
if url.isValid():
raise ValueError("Got valid URL {}!".format(url.toDisplayString()))
self.url = url
self.msg = get_errstring(url)
super().__init__(self.msg)
def _parse_search_term(s):
"""Get a search engine name and search term from a string.
Args:
s: The string to get a search engine for.
Return:
A (engine, term) tuple, where engine is None for the default engine.
"""
s = s.strip()
split = s.split(maxsplit=1)
if len(split) == 2:
engine = split[0]
try:
config.val.url.searchengines[engine]
except KeyError:
engine = None
term = s
else:
term = split[1]
elif not split:
raise ValueError("Empty search term!")
else:
engine = None
term = s
log.url.debug("engine {}, term {!r}".format(engine, term))
return (engine, term)
def _get_search_url(txt):
"""Get a search engine URL for a text.
Args:
txt: Text to search for.
Return:
The search URL as a QUrl.
"""
log.url.debug("Finding search engine for {!r}".format(txt))
engine, term = _parse_search_term(txt)
assert term
if engine is None:
engine = 'DEFAULT'
template = config.val.url.searchengines[engine]
url = qurl_from_user_input(template.format(urllib.parse.quote(term)))
qtutils.ensure_valid(url)
return url
def _is_url_naive(urlstr):
"""Naive check if given URL is really a URL.
Args:
urlstr: The URL to check for, as string.
Return:
True if the URL really is a URL, False otherwise.
"""
url = qurl_from_user_input(urlstr)
assert url.isValid()
if not utils.raises(ValueError, ipaddress.ip_address, urlstr):
# Valid IPv4/IPv6 address
return True
# Qt treats things like "23.42" or "1337" or "0xDEAD" as valid URLs
# which we don't want to. Note we already filtered *real* valid IPs
# above.
if not QHostAddress(urlstr).isNull():
return False
host = url.host()
return '.' in host and not host.endswith('.')
def _is_url_dns(urlstr):
"""Check if a URL is really a URL via DNS.
Args:
url: The URL to check for as a string.
Return:
True if the URL really is a URL, False otherwise.
"""
url = qurl_from_user_input(urlstr)
assert url.isValid()
if (utils.raises(ValueError, ipaddress.ip_address, urlstr) and
not QHostAddress(urlstr).isNull()):
log.url.debug("Bogus IP URL -> False")
# Qt treats things like "23.42" or "1337" or "0xDEAD" as valid URLs
# which we don't want to.
return False
host = url.host()
if not host:
log.url.debug("URL has no host -> False")
return False
log.url.debug("Doing DNS request for {}".format(host))
info = QHostInfo.fromName(host)
return not info.error()
def fuzzy_url(urlstr, cwd=None, relative=False, do_search=True,
force_search=False):
"""Get a QUrl based on a user input which is URL or search term.
Args:
urlstr: URL to load as a string.
cwd: The current working directory, or None.
relative: Whether to resolve relative files.
do_search: Whether to perform a search on non-URLs.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A target QUrl to a search page or the original URL.
"""
urlstr = urlstr.strip()
path = get_path_if_valid(urlstr, cwd=cwd, relative=relative,
check_exists=True)
if not force_search and path is not None:
url = QUrl.fromLocalFile(path)
elif force_search or (do_search and not is_url(urlstr)):
# probably a search term
log.url.debug("URL is a fuzzy search term")
try:
url = _get_search_url(urlstr)
except ValueError: # invalid search engine
url = qurl_from_user_input(urlstr)
else: # probably an address
log.url.debug("URL is a fuzzy address")
url = qurl_from_user_input(urlstr)
log.url.debug("Converting fuzzy term {!r} to URL -> {}".format(
urlstr, url.toDisplayString()))
if do_search and config.val.url.auto_search != 'never' and urlstr:
qtutils.ensure_valid(url)
else:
if not url.isValid():
raise InvalidUrlError(url)
return url
def _has_explicit_scheme(url):
"""Check if a url has an explicit scheme given.
Args:
url: The URL as QUrl.
"""
# Note that generic URI syntax actually would allow a second colon
# after the scheme delimiter. Since we don't know of any URIs
# using this and want to support e.g. searching for scoped C++
# symbols, we treat this as not a URI anyways.
return (url.isValid() and url.scheme() and
(url.host() or url.path()) and
' ' not in url.path() and
not url.path().startswith(':'))
def is_special_url(url):
"""Return True if url is an about:... or other special URL.
Args:
url: The URL as QUrl.
"""
if not url.isValid():
return False
special_schemes = ('about', 'qute', 'file')
return url.scheme() in special_schemes
def is_url(urlstr):
"""Check if url seems to be a valid URL.
Args:
urlstr: The URL as string.
Return:
True if it is a valid URL, False otherwise.
"""
autosearch = config.val.url.auto_search
log.url.debug("Checking if {!r} is a URL (autosearch={}).".format(
urlstr, autosearch))
urlstr = urlstr.strip()
qurl = QUrl(urlstr)
qurl_userinput = qurl_from_user_input(urlstr)
if autosearch == 'never':
# no autosearch, so everything is a URL unless it has an explicit
# search engine.
try:
engine, _term = _parse_search_term(urlstr)
except ValueError:
return False
else:
return engine is None
if not qurl_userinput.isValid():
# This will also catch URLs containing spaces.
return False
if _has_explicit_scheme(qurl):
# URLs with explicit schemes are always URLs
log.url.debug("Contains explicit scheme")
url = True
elif qurl_userinput.host() in ['localhost', '127.0.0.1', '::1']:
log.url.debug("Is localhost.")
url = True
elif is_special_url(qurl):
# Special URLs are always URLs, even with autosearch=never
log.url.debug("Is a special URL.")
url = True
elif autosearch == 'dns':
log.url.debug("Checking via DNS check")
# We want to use qurl_from_user_input here, as the user might enter
# "foo.de" and that should be treated as URL here.
url = _is_url_dns(urlstr)
elif autosearch == 'naive':
log.url.debug("Checking via naive check")
url = _is_url_naive(urlstr)
else: # pragma: no cover
raise ValueError("Invalid autosearch value")
log.url.debug("url = {}".format(url))
return url
def qurl_from_user_input(urlstr):
"""Get a QUrl based on a user input. Additionally handles IPv6 addresses.
QUrl.fromUserInput handles something like '::1' as a file URL instead of an
IPv6, so we first try to handle it as a valid IPv6, and if that fails we
use QUrl.fromUserInput.
WORKAROUND - https://bugreports.qt.io/browse/QTBUG-41089
FIXME - Maybe https://codereview.qt-project.org/#/c/93851/ has a better way
to solve this?
https://github.com/qutebrowser/qutebrowser/issues/109
Args:
urlstr: The URL as string.
Return:
The converted QUrl.
"""
# First we try very liberally to separate something like an IPv6 from the
# rest (e.g. path info or parameters)
match = re.match(r'\[?([0-9a-fA-F:.]+)\]?(.*)', urlstr.strip())
if match:
ipstr, rest = match.groups()
else:
ipstr = urlstr.strip()
rest = ''
# Then we try to parse it as an IPv6, and if we fail use
# QUrl.fromUserInput.
try:
ipaddress.IPv6Address(ipstr)
except ipaddress.AddressValueError:
return QUrl.fromUserInput(urlstr)
else:
return QUrl('http://[{}]{}'.format(ipstr, rest))
def invalid_url_error(url, action):
"""Display an error message for a URL.
Args:
action: The action which was interrupted by the error.
"""
if url.isValid():
raise ValueError("Calling invalid_url_error with valid URL {}".format(
url.toDisplayString()))
errstring = get_errstring(
url, "Trying to {} with invalid URL".format(action))
message.error(errstring)
def raise_cmdexc_if_invalid(url):
"""Check if the given QUrl is invalid, and if so, raise a CommandError."""
if not url.isValid():
raise cmdexc.CommandError(get_errstring(url))
def get_path_if_valid(pathstr, cwd=None, relative=False, check_exists=False):
"""Check if path is a valid path.
Args:
pathstr: The path as string.
cwd: The current working directory, or None.
relative: Whether to resolve relative files.
check_exists: Whether to check if the file
actually exists of filesystem.
Return:
The path if it is a valid path, None otherwise.
"""
pathstr = pathstr.strip()
log.url.debug("Checking if {!r} is a path".format(pathstr))
expanded = os.path.expanduser(pathstr)
if os.path.isabs(expanded):
path = expanded
elif relative and cwd:
path = os.path.join(cwd, expanded)
elif relative:
try:
path = os.path.abspath(expanded)
except OSError:
path = None
else:
path = None
if check_exists:
if path is not None and os.path.exists(path):
log.url.debug("URL is a local file")
else:
path = None
return path
def filename_from_url(url):
"""Get a suitable filename from a URL.
Args:
url: The URL to parse, as a QUrl.
Return:
The suggested filename as a string, or None.
"""
if not url.isValid():
return None
pathname = posixpath.basename(url.path())
if pathname:
return pathname
elif url.host():
return url.host() + '.html'
else:
return None
def host_tuple(url):
"""Get a (scheme, host, port) tuple from a QUrl.
This is suitable to identify a connection, e.g. for SSL errors.
"""
if not url.isValid():
raise InvalidUrlError(url)
scheme, host, port = url.scheme(), url.host(), url.port()
assert scheme
if not host:
raise ValueError("Got URL {} without host.".format(
url.toDisplayString()))
if port == -1:
port_mapping = {
'http': 80,
'https': 443,
'ftp': 21,
}
try:
port = port_mapping[scheme]
except KeyError:
raise ValueError("Got URL {} with unknown port.".format(
url.toDisplayString()))
return scheme, host, port
def get_errstring(url, base="Invalid URL"):
"""Get an error string for a URL.
Args:
url: The URL as a QUrl.
base: The base error string.
Return:
A new string with url.errorString() is appended if available.
"""
url_error = url.errorString()
if url_error:
return base + " - {}".format(url_error)
else:
return base
def same_domain(url1, url2):
"""Check if url1 and url2 belong to the same website.
This will use a "public suffix list" to determine what a "top level domain"
is. All further domains are ignored.
For example example.com and www.example.com are considered the same. but
example.co.uk and test.co.uk are not.
Return:
True if the domains are the same, False otherwise.
"""
if not url1.isValid():
raise InvalidUrlError(url1)
if not url2.isValid():
raise InvalidUrlError(url2)
suffix1 = url1.topLevelDomain()
suffix2 = url2.topLevelDomain()
if suffix1 == '':
return url1.host() == url2.host()
if suffix1 != suffix2:
return False
domain1 = url1.host()[:-len(suffix1)].split('.')[-1]
domain2 = url2.host()[:-len(suffix2)].split('.')[-1]
return domain1 == domain2
def encoded_url(url):
"""Return the fully encoded url as string.
Args:
url: The url to encode as QUrl.
"""
return bytes(url.toEncoded()).decode('ascii')
class IncDecError(Exception):
"""Exception raised by incdec_number on problems.
Attributes:
msg: The error message.
url: The QUrl which caused the error.
"""
def __init__(self, msg, url):
super().__init__(msg)
self.url = url
self.msg = msg
def __str__(self):
return '{}: {}'.format(self.msg, self.url.toString())
def _get_incdec_value(match, incdec, url, count):
"""Get an incremented/decremented URL based on a URL match."""
pre, zeroes, number, post = match.groups()
# This should always succeed because we match \d+
val = int(number)
if incdec == 'decrement':
if val <= 0:
raise IncDecError("Can't decrement {}!".format(val), url)
val -= count
elif incdec == 'increment':
val += count
else:
raise ValueError("Invalid value {} for indec!".format(incdec))
if zeroes:
if len(number) < len(str(val)):
zeroes = zeroes[1:]
elif len(number) > len(str(val)):
zeroes += '0'
return ''.join([pre, zeroes, str(val), post])
def incdec_number(url, incdec, count=1, segments=None):
"""Find a number in the url and increment or decrement it.
Args:
url: The current url
incdec: Either 'increment' or 'decrement'
count: The number to increment or decrement by
segments: A set of URL segments to search. Valid segments are:
'host', 'path', 'query', 'anchor'.
Default: {'path', 'query'}
Return:
The new url with the number incremented/decremented.
Raises IncDecError if the url contains no number.
"""
if not url.isValid():
raise InvalidUrlError(url)
if segments is None:
segments = {'path', 'query'}
valid_segments = {'host', 'path', 'query', 'anchor'}
if segments - valid_segments:
extra_elements = segments - valid_segments
raise IncDecError("Invalid segments: {}".format(
', '.join(extra_elements)), url)
# Make a copy of the QUrl so we don't modify the original
url = QUrl(url)
# Order as they appear in a URL
segment_modifiers = [
('host', url.host, url.setHost),
('path', url.path, url.setPath),
('query', url.query, url.setQuery),
('anchor', url.fragment, url.setFragment),
]
# We're searching the last number so we walk the url segments backwards
for segment, getter, setter in reversed(segment_modifiers):
if segment not in segments:
continue
# Get the last number in a string
match = re.match(r'(.*\D|^)(0*)(\d+)(.*)', getter())
if not match:
continue
setter(_get_incdec_value(match, incdec, url, count))
return url
raise IncDecError("No number found in URL!", url)
def file_url(path):
"""Return a file:// url (as string) to the given local path.
Arguments:
path: The absolute path to the local file
"""
return QUrl.fromLocalFile(path).toString(QUrl.FullyEncoded)
def data_url(mimetype, data):
"""Get a data: QUrl for the given data."""
b64 = base64.b64encode(data).decode('ascii')
url = QUrl('data:{};base64,{}'.format(mimetype, b64))
qtutils.ensure_valid(url)
return url
def safe_display_string(qurl):
"""Get a IDN-homograph phishing safe form of the given QUrl.
If we're dealing with a Punycode-encoded URL, this prepends the hostname in
its encoded form, to make sure those URLs are distinguishable.
See https://github.com/qutebrowser/qutebrowser/issues/2547
and https://bugreports.qt.io/browse/QTBUG-60365
"""
if not qurl.isValid():
raise InvalidUrlError(qurl)
host = qurl.host(QUrl.FullyEncoded)
if '..' in host: # pragma: no cover
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-60364
return '(unparseable URL!) {}'.format(qurl.toDisplayString())
for part in host.split('.'):
if part.startswith('xn--') and host != qurl.host(QUrl.FullyDecoded):
return '({}) {}'.format(host, qurl.toDisplayString())
return qurl.toDisplayString()
class InvalidProxyTypeError(Exception):
"""Error raised when proxy_from_url gets an unknown proxy type."""
def __init__(self, typ):
super().__init__("Invalid proxy type {}!".format(typ))
def proxy_from_url(url):
"""Create a QNetworkProxy from QUrl and a proxy type.
Args:
url: URL of a proxy (possibly with credentials).
Return:
New QNetworkProxy.
"""
if not url.isValid():
raise InvalidUrlError(url)
scheme = url.scheme()
if scheme in ['pac+http', 'pac+https', 'pac+file']:
fetcher = pac.PACFetcher(url)
fetcher.fetch()
return fetcher
types = {
'http': QNetworkProxy.HttpProxy,
'socks': QNetworkProxy.Socks5Proxy,
'socks5': QNetworkProxy.Socks5Proxy,
'direct': QNetworkProxy.NoProxy,
}
if scheme not in types:
raise InvalidProxyTypeError(scheme)
proxy = QNetworkProxy(types[scheme], url.host())
if url.port() != -1:
proxy.setPort(url.port())
if url.userName():
proxy.setUser(url.userName())
if url.password():
proxy.setPassword(url.password())
return proxy
| 1 | 19,331 | No need for the `\` here, inside parentheses whitespace is ignored by Python. | qutebrowser-qutebrowser | py |
@@ -171,6 +171,10 @@ eqp31wM9il1n+guTNyxJd+FzVAH+hCZE5K+tCgVDdVFUlDEHHbS/wqb2PSIoouLV
input: "{http.request.tls.client.san.ips.0}",
expect: "127.0.0.1",
},
+ {
+ input: "{http.request.tls.client.certificate_pem}",
+ expect: "<empty>",
+ },
} {
actual := repl.ReplaceAll(tc.input, "<empty>")
if actual != tc.expect { | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"net/http"
"net/http/httptest"
"testing"
"github.com/caddyserver/caddy/v2"
)
func TestHTTPVarReplacement(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
repl := caddy.NewReplacer()
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
req = req.WithContext(ctx)
req.Host = "example.com:80"
req.RemoteAddr = "localhost:1234"
clientCert := []byte(`-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1DYWRk
eSBUZXN0IENBMB4XDTE4MDcyNDIxMzUwNVoXDTI4MDcyMTIxMzUwNVowHTEbMBkG
A1UEAwwSY2xpZW50LmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
iQKBgQDFDEpzF0ew68teT3xDzcUxVFaTII+jXH1ftHXxxP4BEYBU4q90qzeKFneF
z83I0nC0WAQ45ZwHfhLMYHFzHPdxr6+jkvKPASf0J2v2HDJuTM1bHBbik5Ls5eq+
fVZDP8o/VHKSBKxNs8Goc2NTsr5b07QTIpkRStQK+RJALk4x9QIDAQABo0swSTAJ
BgNVHRMEAjAAMAsGA1UdDwQEAwIHgDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A
AAEwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADgYEANSjz2Sk+
eqp31wM9il1n+guTNyxJd+FzVAH+hCZE5K+tCgVDdVFUlDEHHbS/wqb2PSIoouLV
3Q9fgDkiUod+uIK0IynzIKvw+Cjg+3nx6NQ0IM0zo8c7v398RzB4apbXKZyeeqUH
9fNwfEi+OoXR6s+upSKobCmLGLGi9Na5s5g=
-----END CERTIFICATE-----`)
block, _ := pem.Decode(clientCert)
if block == nil {
t.Fatalf("failed to decode PEM certificate")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
t.Fatalf("failed to decode PEM certificate: %v", err)
}
req.TLS = &tls.ConnectionState{
Version: tls.VersionTLS13,
HandshakeComplete: true,
ServerName: "foo.com",
CipherSuite: tls.TLS_AES_256_GCM_SHA384,
PeerCertificates: []*x509.Certificate{cert},
NegotiatedProtocol: "h2",
NegotiatedProtocolIsMutual: true,
}
res := httptest.NewRecorder()
addHTTPVarsToReplacer(repl, req, res)
for i, tc := range []struct {
input string
expect string
}{
{
input: "{http.request.scheme}",
expect: "https",
},
{
input: "{http.request.host}",
expect: "example.com",
},
{
input: "{http.request.port}",
expect: "80",
},
{
input: "{http.request.hostport}",
expect: "example.com:80",
},
{
input: "{http.request.remote.host}",
expect: "localhost",
},
{
input: "{http.request.remote.port}",
expect: "1234",
},
{
input: "{http.request.host.labels.0}",
expect: "com",
},
{
input: "{http.request.host.labels.1}",
expect: "example",
},
{
input: "{http.request.host.labels.2}",
expect: "<empty>",
},
{
input: "{http.request.tls.cipher_suite}",
expect: "TLS_AES_256_GCM_SHA384",
},
{
input: "{http.request.tls.proto}",
expect: "h2",
},
{
input: "{http.request.tls.proto_mutual}",
expect: "true",
},
{
input: "{http.request.tls.resumed}",
expect: "false",
},
{
input: "{http.request.tls.server_name}",
expect: "foo.com",
},
{
input: "{http.request.tls.version}",
expect: "tls1.3",
},
{
input: "{http.request.tls.client.fingerprint}",
expect: "9f57b7b497cceacc5459b76ac1c3afedbc12b300e728071f55f84168ff0f7702",
},
{
input: "{http.request.tls.client.issuer}",
expect: "CN=Caddy Test CA",
},
{
input: "{http.request.tls.client.serial}",
expect: "2",
},
{
input: "{http.request.tls.client.subject}",
expect: "CN=client.localdomain",
},
{
input: "{http.request.tls.client.san.dns_names}",
expect: "[localhost]",
},
{
input: "{http.request.tls.client.san.dns_names.0}",
expect: "localhost",
},
{
input: "{http.request.tls.client.san.dns_names.1}",
expect: "<empty>",
},
{
input: "{http.request.tls.client.san.ips}",
expect: "[127.0.0.1]",
},
{
input: "{http.request.tls.client.san.ips.0}",
expect: "127.0.0.1",
},
} {
actual := repl.ReplaceAll(tc.input, "<empty>")
if actual != tc.expect {
t.Errorf("Test %d: Expected placeholder %s to be '%s' but got '%s'",
i, tc.input, tc.expect, actual)
}
}
}
| 1 | 15,414 | This doesn't seem like the right expected result... it should be the same as the `clientCert` var defined above. | caddyserver-caddy | go |
@@ -391,8 +391,6 @@ static void wlr_surface_flush_damage(struct wlr_surface *surface,
release:
pixman_region32_clear(&surface->current->surface_damage);
pixman_region32_clear(&surface->current->buffer_damage);
-
- wlr_surface_state_release_buffer(surface->current);
}
static void wlr_surface_commit_pending(struct wlr_surface *surface) { | 1 | #include <assert.h>
#include <stdlib.h>
#include <wayland-server.h>
#include <wlr/util/log.h>
#include <wlr/egl.h>
#include <wlr/render/interface.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/render/matrix.h>
static void wlr_surface_state_reset_buffer(struct wlr_surface_state *state) {
if (state->buffer) {
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
}
static void buffer_destroy(struct wl_listener *listener, void *data) {
struct wlr_surface_state *state =
wl_container_of(listener, state, buffer_destroy_listener);
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
static void wlr_surface_state_release_buffer(struct wlr_surface_state *state) {
if (state->buffer) {
wl_resource_post_event(state->buffer, WL_BUFFER_RELEASE);
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
}
static void wlr_surface_state_set_buffer(struct wlr_surface_state *state,
struct wl_resource *buffer) {
state->buffer = buffer;
if (buffer) {
wl_resource_add_destroy_listener(buffer,
&state->buffer_destroy_listener);
state->buffer_destroy_listener.notify = buffer_destroy;
}
}
static void surface_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t sx, int32_t sy) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER;
surface->pending->sx = sx;
surface->pending->sy = sy;
wlr_surface_state_reset_buffer(surface->pending);
wlr_surface_state_set_buffer(surface->pending, buffer);
}
static void surface_damage(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width, int32_t height) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_SURFACE_DAMAGE;
pixman_region32_union_rect(&surface->pending->surface_damage,
&surface->pending->surface_damage,
x, y, width, height);
}
static void destroy_frame_callback(struct wl_resource *resource) {
struct wlr_frame_callback *cb = wl_resource_get_user_data(resource);
wl_list_remove(&cb->link);
free(cb);
}
static void surface_frame(struct wl_client *client,
struct wl_resource *resource, uint32_t callback) {
struct wlr_frame_callback *cb;
struct wlr_surface *surface = wl_resource_get_user_data(resource);
cb = malloc(sizeof(struct wlr_frame_callback));
if (cb == NULL) {
wl_resource_post_no_memory(resource);
return;
}
cb->resource = wl_resource_create(client,
&wl_callback_interface, 1, callback);
if (cb->resource == NULL) {
free(cb);
wl_resource_post_no_memory(resource);
return;
}
wl_resource_set_implementation(cb->resource,
NULL, cb, destroy_frame_callback);
wl_list_insert(surface->pending->frame_callback_list.prev, &cb->link);
surface->pending->invalid |= WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST;
}
static void surface_set_opaque_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if ((surface->pending->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
pixman_region32_clear(&surface->pending->opaque);
}
surface->pending->invalid |= WLR_SURFACE_INVALID_OPAQUE_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending->opaque, region);
} else {
pixman_region32_clear(&surface->pending->opaque);
}
}
static void surface_set_input_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_INPUT_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending->input, region);
} else {
pixman_region32_init_rect(&surface->pending->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
}
static void wlr_surface_update_size(struct wlr_surface *surface, struct wlr_surface_state *state) {
if (!state->buffer) {
state->height = 0;
state->width = 0;
return;
}
int scale = state->scale;
enum wl_output_transform transform = state->transform;
wlr_texture_get_buffer_size(surface->texture, state->buffer,
&state->buffer_width, &state->buffer_height);
int _width = state->buffer_width / scale;
int _height = state->buffer_height / scale;
if (transform == WL_OUTPUT_TRANSFORM_90 ||
transform == WL_OUTPUT_TRANSFORM_270 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_90 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_270) {
int tmp = _width;
_width = _height;
_height = tmp;
}
struct wlr_frame_callback *cb, *tmp;
wl_list_for_each_safe(cb, tmp, &state->frame_callback_list, link) {
wl_resource_destroy(cb->resource);
}
wl_list_init(&state->frame_callback_list);
state->width = _width;
state->height = _height;
}
static void wlr_surface_to_buffer_region(int scale,
enum wl_output_transform transform, pixman_region32_t *surface_region,
pixman_region32_t *buffer_region,
int width, int height) {
pixman_box32_t *src_rects, *dest_rects;
int nrects, i;
src_rects = pixman_region32_rectangles(surface_region, &nrects);
dest_rects = malloc(nrects * sizeof(*dest_rects));
if (!dest_rects) {
return;
}
for (i = 0; i < nrects; i++) {
switch (transform) {
default:
case WL_OUTPUT_TRANSFORM_NORMAL:
dest_rects[i].x1 = src_rects[i].x1;
dest_rects[i].y1 = src_rects[i].y1;
dest_rects[i].x2 = src_rects[i].x2;
dest_rects[i].y2 = src_rects[i].y2;
break;
case WL_OUTPUT_TRANSFORM_90:
dest_rects[i].x1 = height - src_rects[i].y2;
dest_rects[i].y1 = src_rects[i].x1;
dest_rects[i].x2 = height - src_rects[i].y1;
dest_rects[i].y2 = src_rects[i].x2;
break;
case WL_OUTPUT_TRANSFORM_180:
dest_rects[i].x1 = width - src_rects[i].x2;
dest_rects[i].y1 = height - src_rects[i].y2;
dest_rects[i].x2 = width - src_rects[i].x1;
dest_rects[i].y2 = height - src_rects[i].y1;
break;
case WL_OUTPUT_TRANSFORM_270:
dest_rects[i].x1 = src_rects[i].y1;
dest_rects[i].y1 = width - src_rects[i].x2;
dest_rects[i].x2 = src_rects[i].y2;
dest_rects[i].y2 = width - src_rects[i].x1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED:
dest_rects[i].x1 = width - src_rects[i].x2;
dest_rects[i].y1 = src_rects[i].y1;
dest_rects[i].x2 = width - src_rects[i].x1;
dest_rects[i].y2 = src_rects[i].y2;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_90:
dest_rects[i].x1 = height - src_rects[i].y2;
dest_rects[i].y1 = width - src_rects[i].x2;
dest_rects[i].x2 = height - src_rects[i].y1;
dest_rects[i].y2 = width - src_rects[i].x1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_180:
dest_rects[i].x1 = src_rects[i].x1;
dest_rects[i].y1 = height - src_rects[i].y2;
dest_rects[i].x2 = src_rects[i].x2;
dest_rects[i].y2 = height - src_rects[i].y1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_270:
dest_rects[i].x1 = src_rects[i].y1;
dest_rects[i].y1 = src_rects[i].x1;
dest_rects[i].x2 = src_rects[i].y2;
dest_rects[i].y2 = src_rects[i].x2;
break;
}
}
if (scale != 1) {
for (i = 0; i < nrects; i++) {
dest_rects[i].x1 *= scale;
dest_rects[i].x2 *= scale;
dest_rects[i].y1 *= scale;
dest_rects[i].y2 *= scale;
}
}
pixman_region32_fini(buffer_region);
pixman_region32_init_rects(buffer_region, dest_rects, nrects);
free(dest_rects);
}
/**
* Append pending state to current state and clear pending state.
*/
static void wlr_surface_move_state(struct wlr_surface *surface, struct wlr_surface_state *next,
struct wlr_surface_state *state) {
bool update_damage = false;
bool update_size = false;
if ((next->invalid & WLR_SURFACE_INVALID_SCALE)) {
state->scale = next->scale;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_TRANSFORM)) {
state->transform = next->transform;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER)) {
wlr_surface_state_release_buffer(state);
wlr_surface_state_set_buffer(state, next->buffer);
wlr_surface_state_reset_buffer(next);
update_size = true;
}
if (update_size) {
wlr_surface_update_size(surface, state);
}
if ((next->invalid & WLR_SURFACE_INVALID_SURFACE_DAMAGE)) {
pixman_region32_union(&state->surface_damage,
&state->surface_damage,
&next->surface_damage);
pixman_region32_intersect_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width,
state->height);
pixman_region32_clear(&next->surface_damage);
update_damage = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER_DAMAGE)) {
pixman_region32_union(&state->buffer_damage,
&state->buffer_damage,
&next->buffer_damage);
pixman_region32_clear(&next->buffer_damage);
update_damage = true;
}
if (update_damage) {
pixman_region32_t buffer_damage;
pixman_region32_init(&buffer_damage);
wlr_surface_to_buffer_region(state->scale, state->transform,
&state->surface_damage, &buffer_damage, state->width,
state->height);
pixman_region32_union(&state->buffer_damage,
&state->buffer_damage, &buffer_damage);
pixman_region32_fini(&buffer_damage);
pixman_region32_intersect_rect(&state->buffer_damage,
&state->buffer_damage, 0, 0,
state->buffer_width, state->buffer_height);
}
if ((next->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
// TODO: process buffer
pixman_region32_clear(&next->opaque);
}
if ((next->invalid & WLR_SURFACE_INVALID_INPUT_REGION)) {
// TODO: process buffer
pixman_region32_copy(&state->input, &next->input);
}
if ((next->invalid & WLR_SURFACE_INVALID_SUBSURFACE_POSITION)) {
state->subsurface_position.x = next->subsurface_position.x;
state->subsurface_position.y = next->subsurface_position.y;
next->subsurface_position.x = 0;
next->subsurface_position.y = 0;
}
if ((next->invalid & WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST)) {
wl_list_insert_list(&state->frame_callback_list, &next->frame_callback_list);
wl_list_init(&next->frame_callback_list);
}
state->invalid |= next->invalid;
next->invalid = 0;
}
static void wlr_surface_damage_subsurfaces(struct wlr_subsurface *subsurface) {
// XXX: This is probably the wrong way to do it, because this damage should
// come from the client, but weston doesn't do it correctly either and it
// seems to work ok. See the comment on weston_surface_damage for more info
// about a better approach.
struct wlr_surface *surface = subsurface->surface;
pixman_region32_union_rect(&surface->current->surface_damage,
&surface->current->surface_damage,
0, 0, surface->current->width,
surface->current->height);
subsurface->reordered = false;
struct wlr_subsurface *child;
wl_list_for_each(child, &subsurface->surface->subsurface_list, parent_link) {
wlr_surface_damage_subsurfaces(child);
}
}
static void wlr_surface_flush_damage(struct wlr_surface *surface,
bool reupload_buffer) {
if (!surface->current->buffer) {
return;
}
struct wl_shm_buffer *buffer = wl_shm_buffer_get(surface->current->buffer);
if (!buffer) {
if (wlr_renderer_buffer_is_drm(surface->renderer,
surface->current->buffer)) {
wlr_texture_upload_drm(surface->texture, surface->current->buffer);
goto release;
} else {
wlr_log(L_INFO, "Unknown buffer handle attached");
return;
}
}
uint32_t format = wl_shm_buffer_get_format(buffer);
if (reupload_buffer) {
wlr_texture_upload_shm(surface->texture, format, buffer);
} else {
pixman_region32_t damage = surface->current->buffer_damage;
if (!pixman_region32_not_empty(&damage)) {
goto release;
}
int n;
pixman_box32_t *rects = pixman_region32_rectangles(&damage, &n);
for (int i = 0; i < n; ++i) {
pixman_box32_t rect = rects[i];
if (!wlr_texture_update_shm(surface->texture, format,
rect.x1, rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1,
buffer)) {
break;
}
}
}
release:
pixman_region32_clear(&surface->current->surface_damage);
pixman_region32_clear(&surface->current->buffer_damage);
wlr_surface_state_release_buffer(surface->current);
}
static void wlr_surface_commit_pending(struct wlr_surface *surface) {
int32_t oldw = surface->current->buffer_width;
int32_t oldh = surface->current->buffer_height;
bool null_buffer_commit =
(surface->pending->invalid & WLR_SURFACE_INVALID_BUFFER &&
surface->pending->buffer == NULL);
wlr_surface_move_state(surface, surface->pending, surface->current);
if (null_buffer_commit) {
surface->texture->valid = false;
}
bool reupload_buffer = oldw != surface->current->buffer_width ||
oldh != surface->current->buffer_height;
wlr_surface_flush_damage(surface, reupload_buffer);
// commit subsurface order
struct wlr_subsurface *subsurface;
wl_list_for_each_reverse(subsurface, &surface->subsurface_pending_list,
parent_pending_link) {
wl_list_remove(&subsurface->parent_link);
wl_list_insert(&surface->subsurface_list, &subsurface->parent_link);
if (subsurface->reordered) {
// TODO: damage all the subsurfaces
wlr_surface_damage_subsurfaces(subsurface);
}
}
// TODO: add the invalid bitfield to this callback
wl_signal_emit(&surface->events.commit, surface);
}
static bool wlr_subsurface_is_synchronized(struct wlr_subsurface *subsurface) {
while (subsurface) {
if (subsurface->synchronized) {
return true;
}
if (!subsurface->parent) {
return false;
}
subsurface = subsurface->parent->subsurface;
}
return false;
}
/**
* Recursive function to commit the effectively synchronized children.
*/
static void wlr_subsurface_parent_commit(struct wlr_subsurface *subsurface,
bool synchronized) {
struct wlr_surface *surface = subsurface->surface;
if (synchronized || subsurface->synchronized) {
if (subsurface->has_cache) {
wlr_surface_move_state(surface, subsurface->cached, surface->pending);
wlr_surface_commit_pending(surface);
subsurface->has_cache = false;
subsurface->cached->invalid = 0;
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurface_list, parent_link) {
wlr_subsurface_parent_commit(tmp, true);
}
}
}
static void wlr_subsurface_commit(struct wlr_subsurface *subsurface) {
struct wlr_surface *surface = subsurface->surface;
if (wlr_subsurface_is_synchronized(subsurface)) {
wlr_surface_move_state(surface, surface->pending, subsurface->cached);
subsurface->has_cache = true;
} else {
if (subsurface->has_cache) {
wlr_surface_move_state(surface, subsurface->cached, surface->pending);
wlr_surface_commit_pending(surface);
subsurface->has_cache = false;
} else {
wlr_surface_commit_pending(surface);
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurface_list, parent_link) {
wlr_subsurface_parent_commit(tmp, false);
}
}
}
static void surface_commit(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
struct wlr_subsurface *subsurface = surface->subsurface;
if (subsurface) {
wlr_subsurface_commit(subsurface);
return;
}
wlr_surface_commit_pending(surface);
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurface_list, parent_link) {
wlr_subsurface_parent_commit(tmp, false);
}
}
static void surface_set_buffer_transform(struct wl_client *client,
struct wl_resource *resource, int transform) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_TRANSFORM;
surface->pending->transform = transform;
}
static void surface_set_buffer_scale(struct wl_client *client,
struct wl_resource *resource,
int32_t scale) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_SCALE;
surface->pending->scale = scale;
}
static void surface_damage_buffer(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width,
int32_t height) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER_DAMAGE;
pixman_region32_union_rect(&surface->pending->buffer_damage,
&surface->pending->buffer_damage,
x, y, width, height);
}
const struct wl_surface_interface surface_interface = {
.destroy = surface_destroy,
.attach = surface_attach,
.damage = surface_damage,
.frame = surface_frame,
.set_opaque_region = surface_set_opaque_region,
.set_input_region = surface_set_input_region,
.commit = surface_commit,
.set_buffer_transform = surface_set_buffer_transform,
.set_buffer_scale = surface_set_buffer_scale,
.damage_buffer = surface_damage_buffer
};
static struct wlr_surface_state *wlr_surface_state_create() {
struct wlr_surface_state *state = calloc(1, sizeof(struct wlr_surface_state));
state->scale = 1;
state->transform = WL_OUTPUT_TRANSFORM_NORMAL;
wl_list_init(&state->frame_callback_list);
pixman_region32_init(&state->surface_damage);
pixman_region32_init(&state->buffer_damage);
pixman_region32_init(&state->opaque);
pixman_region32_init_rect(&state->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
return state;
}
static void wlr_surface_state_destroy(struct wlr_surface_state *state) {
wlr_surface_state_reset_buffer(state);
struct wlr_frame_callback *cb, *tmp;
wl_list_for_each_safe(cb, tmp, &state->frame_callback_list, link) {
wl_resource_destroy(cb->resource);
}
pixman_region32_fini(&state->surface_damage);
pixman_region32_fini(&state->buffer_damage);
pixman_region32_fini(&state->opaque);
pixman_region32_fini(&state->input);
free(state);
}
void wlr_subsurface_destroy(struct wlr_subsurface *subsurface) {
wlr_surface_state_destroy(subsurface->cached);
if (subsurface->parent) {
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy_listener.link);
}
wl_resource_set_user_data(subsurface->resource, NULL);
if (subsurface->surface) {
subsurface->surface->subsurface = NULL;
}
free(subsurface);
}
static void destroy_surface(struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
wl_signal_emit(&surface->events.destroy, surface);
if (surface->subsurface) {
wlr_subsurface_destroy(surface->subsurface);
}
wlr_texture_destroy(surface->texture);
wlr_surface_state_destroy(surface->pending);
wlr_surface_state_destroy(surface->current);
free(surface);
}
struct wlr_surface *wlr_surface_create(struct wl_resource *res,
struct wlr_renderer *renderer) {
struct wlr_surface *surface;
if (!(surface = calloc(1, sizeof(struct wlr_surface)))) {
wl_resource_post_no_memory(res);
return NULL;
}
wlr_log(L_DEBUG, "New wlr_surface %p (res %p)", surface, res);
surface->renderer = renderer;
surface->texture = wlr_render_texture_create(renderer);
surface->resource = res;
surface->current = wlr_surface_state_create();
surface->pending = wlr_surface_state_create();
wl_signal_init(&surface->events.commit);
wl_signal_init(&surface->events.destroy);
wl_list_init(&surface->subsurface_list);
wl_list_init(&surface->subsurface_pending_list);
wl_resource_set_implementation(res, &surface_interface,
surface, destroy_surface);
return surface;
}
void wlr_surface_get_matrix(struct wlr_surface *surface,
float (*matrix)[16],
const float (*projection)[16],
const float (*transform)[16]) {
int width = surface->texture->width / surface->current->scale;
int height = surface->texture->height / surface->current->scale;
float scale[16];
wlr_matrix_identity(matrix);
if (transform) {
wlr_matrix_mul(matrix, transform, matrix);
}
wlr_matrix_scale(&scale, width, height, 1);
wlr_matrix_mul(matrix, &scale, matrix);
wlr_matrix_mul(projection, matrix, matrix);
}
int wlr_surface_set_role(struct wlr_surface *surface, const char *role,
struct wl_resource *error_resource, uint32_t error_code) {
assert(role);
if (surface->role == NULL ||
surface->role == role ||
strcmp(surface->role, role) == 0) {
surface->role = role;
return 0;
}
wl_resource_post_error(error_resource, error_code,
"Cannot assign role %s to wl_surface@%d, already has role %s\n",
role,
wl_resource_get_id(surface->resource),
surface->role);
return -1;
}
static void subsurface_resource_destroy(struct wl_resource *resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (subsurface) {
wlr_subsurface_destroy(subsurface);
}
}
static void subsurface_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void subsurface_set_position(struct wl_client *client,
struct wl_resource *resource, int32_t x, int32_t y) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
struct wlr_surface *surface = subsurface->surface;
surface->pending->invalid |= WLR_SURFACE_INVALID_SUBSURFACE_POSITION;
surface->pending->subsurface_position.x = x;
surface->pending->subsurface_position.y = y;
}
static struct wlr_subsurface *subsurface_find_sibling(
struct wlr_subsurface *subsurface, struct wlr_surface *surface) {
struct wlr_surface *parent = subsurface->parent;
struct wlr_subsurface *sibling;
wl_list_for_each(sibling, &parent->subsurface_list, parent_link) {
if (sibling->surface == surface && sibling != subsurface)
return sibling;
}
return NULL;
}
static void subsurface_place_above(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (!subsurface) {
return;
}
struct wlr_surface *sibling_surface =
wl_resource_get_user_data(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_above", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(sibling->parent_pending_link.prev,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_place_below(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
struct wlr_surface *sibling_surface =
wl_resource_get_user_data(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_below", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(&sibling->parent_pending_link,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_set_sync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (subsurface) {
subsurface->synchronized = true;
}
}
static void subsurface_set_desync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = wl_resource_get_user_data(resource);
if (subsurface && subsurface->synchronized) {
subsurface->synchronized = false;
if (!wlr_subsurface_is_synchronized(subsurface)) {
// TODO: do a synchronized commit to flush the cache
wlr_subsurface_parent_commit(subsurface, true);
}
}
}
static const struct wl_subsurface_interface subsurface_implementation = {
.destroy = subsurface_destroy,
.set_position = subsurface_set_position,
.place_above = subsurface_place_above,
.place_below = subsurface_place_below,
.set_sync = subsurface_set_sync,
.set_desync = subsurface_set_desync,
};
static void subsurface_handle_parent_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, parent_destroy_listener);
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy_listener.link);
subsurface->parent = NULL;
}
void wlr_surface_make_subsurface(struct wlr_surface *surface,
struct wlr_surface *parent, uint32_t id) {
assert(surface->subsurface == NULL);
struct wlr_subsurface *subsurface =
calloc(1, sizeof(struct wlr_subsurface));
if (!subsurface) {
return;
}
subsurface->cached = wlr_surface_state_create();
subsurface->synchronized = true;
subsurface->surface = surface;
// link parent
subsurface->parent = parent;
wl_signal_add(&parent->events.destroy,
&subsurface->parent_destroy_listener);
subsurface->parent_destroy_listener.notify =
subsurface_handle_parent_destroy;
wl_list_insert(&parent->subsurface_list, &subsurface->parent_link);
wl_list_insert(&parent->subsurface_pending_list,
&subsurface->parent_pending_link);
struct wl_client *client = wl_resource_get_client(surface->resource);
subsurface->resource =
wl_resource_create(client, &wl_subsurface_interface, 1, id);
wl_resource_set_implementation(subsurface->resource,
&subsurface_implementation, subsurface,
subsurface_resource_destroy);
surface->subsurface = subsurface;
}
struct wlr_surface *wlr_surface_get_main_surface(struct wlr_surface *surface) {
struct wlr_subsurface *sub;
while (surface && (sub = surface->subsurface)) {
surface = sub->parent;
}
return surface;
}
struct wlr_subsurface *wlr_surface_subsurface_at(struct wlr_surface *surface,
double sx, double sy, double *sub_x, double *sub_y) {
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurface_list, parent_link) {
double _sub_x = subsurface->surface->current->subsurface_position.x;
double _sub_y = subsurface->surface->current->subsurface_position.y;
struct wlr_subsurface *sub =
wlr_surface_subsurface_at(subsurface->surface, _sub_x + sx,
_sub_y + sy, sub_x, sub_y);
if (sub) {
// TODO: This won't work for nested subsurfaces. Convert sub_x and
// sub_y to the parent coordinate system
return sub;
}
int sub_width = subsurface->surface->current->buffer_width;
int sub_height = subsurface->surface->current->buffer_height;
if ((sx > _sub_x && sx < _sub_x + sub_width) &&
(sy > _sub_y && sy < _sub_y + sub_height)) {
if (pixman_region32_contains_point(
&subsurface->surface->current->input,
sx - _sub_x, sy - _sub_y, NULL)) {
*sub_x = _sub_x;
*sub_y = _sub_y;
return subsurface;
}
}
}
return NULL;
}
| 1 | 8,370 | need to rename this now. | swaywm-wlroots | c |
@@ -42,7 +42,16 @@ public class ExpectValidPrivateTransactionReceipt implements PrivateCondition {
node.execute(transactions.getPrivateTransactionReceipt(transactionHash));
assertThat(actualReceipt)
.usingRecursiveComparison()
- .ignoringFields("commitmentHash", "logs")
+ .ignoringFields(
+ "commitmentHash",
+ "logs",
+ "blockHash",
+ "blockNumber",
+ "logsBloom",
+ "transactionIndex") // TODO: The fields blockHash, blockNumber, logsBloom and
+ // transactionIndex have to be ignored as the class
+ // org.web3j.protocol.besu.response.privacy.PrivateTransactionReceipt does not contain these
+ // fields
.isEqualTo(expectedReceipt);
assertThat(actualReceipt.getLogs().size()).isEqualTo(expectedReceipt.getLogs().size()); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.dsl.privacy.condition;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.tests.acceptance.dsl.privacy.PrivacyNode;
import org.hyperledger.besu.tests.acceptance.dsl.privacy.transaction.PrivacyTransactions;
import org.web3j.protocol.besu.response.privacy.PrivateTransactionReceipt;
public class ExpectValidPrivateTransactionReceipt implements PrivateCondition {
private final PrivacyTransactions transactions;
private final String transactionHash;
private final PrivateTransactionReceipt expectedReceipt;
public ExpectValidPrivateTransactionReceipt(
final PrivacyTransactions transactions,
final String transactionHash,
final PrivateTransactionReceipt expectedReceipt) {
this.transactions = transactions;
this.transactionHash = transactionHash;
this.expectedReceipt = expectedReceipt;
}
@Override
public void verify(final PrivacyNode node) {
final PrivateTransactionReceipt actualReceipt =
node.execute(transactions.getPrivateTransactionReceipt(transactionHash));
assertThat(actualReceipt)
.usingRecursiveComparison()
.ignoringFields("commitmentHash", "logs")
.isEqualTo(expectedReceipt);
assertThat(actualReceipt.getLogs().size()).isEqualTo(expectedReceipt.getLogs().size());
for (int i = 0; i < expectedReceipt.getLogs().size(); i++) {
assertThat(actualReceipt.getLogs().get(i))
.usingRecursiveComparison()
.ignoringFields("blockHash", "blockNumber")
.isEqualTo(expectedReceipt.getLogs().get(i));
}
}
}
| 1 | 22,396 | NIT: This shouldn't be a TODO. It is just a note isn't it? | hyperledger-besu | java |
@@ -1644,6 +1644,8 @@ class CommandDispatcher:
"""
try:
elem.set_value(text)
+ except webelem.OrphanedError as e:
+ message.warning('Edited element vanished')
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
import typing
from PyQt5.QtWidgets import QApplication, QTabBar, QDialog
from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configdata
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem, downloads)
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, debug, standarddir)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import urlmodel, miscmodels
from qutebrowser.mainwindow import mainwindow
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self, private):
"""Get a tabbed-browser from a new window."""
new_window = mainwindow.MainWindow(private=private)
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
related=False, private=None):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
private: If opening a new window, open it in private browsing mode.
If not given, inherit the current window's mode.
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window, private), 'tbwp')
if window and private is None:
private = self._tabbed_browser.private
if window or private:
tabbed_browser = self._new_tabbed_browser(private)
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, related=related)
elif background:
tabbed_browser.tabopen(url, background=True, related=related)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self, *, show_error=True):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
if not show_error:
return
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, prev, next_, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((prev, next_, opposite), 'pno')
if prev:
return QTabBar.SelectLeftTab
elif next_:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.val.tabs.select_on_remove
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs.select_on_remove' set to "
"'last-used'!")
else: # pragma: no cover
raise ValueError("Invalid select_on_remove value "
"{!r}!".format(conf_selection))
return None
def _tab_close(self, tab, prev=False, next_=False, opposite=False):
"""Helper function for tab_close be able to handle message.async.
Args:
tab: Tab object to select be closed.
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
count: The tab index to close, or None
"""
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(prev, next_,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, prev=False, next_=False, opposite=False,
force=False, count=None):
"""Close the current/[count]th tab.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
force: Avoid confirmation for pinned tabs.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
close = functools.partial(self._tab_close, tab, prev,
next_, opposite)
self._tabbed_browser.tab_close_prompt_if_pinned(tab, force, close)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-pin')
@cmdutils.argument('count', count=True)
def tab_pin(self, count=None):
"""Pin/Unpin the current/[count]th tab.
Pinning a tab shrinks it to the size of its title text.
Attempting to close a pinned tab will cause a confirmation,
unless --force is passed.
Args:
count: The tab index to pin or unpin, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
to_pin = not tab.data.pinned
self._tabbed_browser.set_tab_pinned(tab, to_pin)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=urlmodel.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, related=False,
bg=False, tab=False, window=False, count=None, secure=False,
private=False):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
count: The tab index to open the URL in, or None.
secure: Force HTTPS.
private: Open a new window in private browsing mode.
"""
if url is None:
urls = [config.val.url.default_page]
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if secure:
cur_url.setScheme('https')
if not window and i > 0:
tab = False
bg = True
if tab or bg or window or private:
self._open(cur_url, tab, bg, window, related=related,
private=private)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
if isinstance(url, QUrl):
yield url
return
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
def _print_preview(self, tab):
"""Show a print preview."""
def print_callback(ok):
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(self, tab, filename):
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
log.misc.debug("Print to file: {}".format(filename))
def _print(self, tab):
"""Print with a QPrintDialog."""
def print_callback(ok):
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print():
"""Called when the dialog was closed."""
tab.printing.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
if preview:
tab.printing.check_preview_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
self._print_preview(tab)
elif pdf:
self._print_pdf(tab, pdf)
else:
self._print(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
try:
history = curtab.history.serialize()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs.tabs_are_windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.val.tabs.favicons.show:
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.val.tabs.tabs_are_windows:
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(history)
newtab.zoom.set_factor(curtab.zoom.factor())
new_tabbed_browser.set_tab_pinned(newtab, curtab.data.pinned)
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=miscmodels.buffer)
def tab_take(self, index):
"""Take a tab from another window.
Args:
index: The [win_id/]index of the tab to take. Or a substring
in which case the closest match will be taken.
"""
tabbed_browser, tab = self._resolve_buffer_index(index)
if tabbed_browser is self._tabbed_browser:
raise cmdexc.CommandError("Can't take a tab from the same window")
self._open(tab.url(), tab=True)
tabbed_browser.close_tab(tab, add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('win_id', completion=miscmodels.window)
def tab_give(self, win_id: int = None):
"""Give the current tab to a new or existing window if win_id given.
If no win_id is given, the tab will get detached into a new window.
Args:
win_id: The window ID of the window to give the current tab to.
"""
if win_id == self._win_id:
raise cmdexc.CommandError("Can't give a tab to the same window")
if win_id is None:
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach from a window with "
"only one tab")
tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(self._current_url())
self._tabbed_browser.close_tab(self._current_widget(), add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated='Use :tab-give instead!')
def tab_detach(self):
"""Deprecated way to detach a tab."""
self.tab_give()
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
try:
if forward:
widget.history.forward(count)
else:
widget.history.back(count)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window, related=True)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_to_perc(self, perc: float = None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str = None, bottom_navigate: str = None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/qutebrowser/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = urlutils.query_string(url)
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.val.url.yank_ignored_parameters:
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
caret = self._current_widget().caret
s = caret.selection()
if not caret.has_selection() or not s:
message.info("Nothing to yank")
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.leave(self._win_id, KeyMode.caret, "yank selected",
maybe=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
if zoom is not None:
try:
zoom = int(zoom.rstrip('%'))
except ValueError:
raise cmdexc.CommandError("zoom: Invalid int value {}"
.format(zoom))
level = count if count is not None else zoom
if level is None:
level = config.val.zoom.default
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(int(level)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, prev=False, next_=False, force=False):
"""Close all tabs except for the current one.
Args:
prev: Keep tabs before the current.
next_: Keep tabs after the current.
force: Avoid confirmation for pinned tabs.
"""
cmdutils.check_exclusive((prev, next_), 'pn')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
def _to_close(i):
"""Helper method to check if a tab should be closed or not."""
return not (i == cur_idx or
(prev and i < cur_idx) or
(next_ and i > cur_idx))
# Check to see if we are closing any pinned tabs
if not force:
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i) and tab.data.pinned:
self._tabbed_browser.tab_close_prompt_if_pinned(
tab,
force,
lambda: self.tab_only(
prev=prev, next_=next_, force=True))
return
first_tab = True
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i):
self._tabbed_browser.close_tab(tab, new_undo=first_tab)
first_tab = False
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open the last closed tab or tabs."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
def _resolve_buffer_index(self, index):
"""Resolve a buffer index to the tabbedbrowser and tab.
Args:
index: The [win_id/]index of the tab to be selected. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = miscmodels.buffer()
model.set_pattern(index)
if model.count() > 0:
index = model.data(model.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
return (tabbed_browser, tabbed_browser.widget(idx-1))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('index', completion=miscmodels.buffer)
@cmdutils.argument('count', count=True)
def buffer(self, index=None, count=None):
"""Select tab by index or url/title best match.
Focuses window if necessary when index is given. If both index and
count are given, use count.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
count: The tab index to focus, starting with 1.
"""
if count is None and index is None:
raise cmdexc.CommandError("buffer: Either a count or the argument "
"index must be specified.")
if count is not None:
index = str(count)
tabbed_browser, tab = self._resolve_buffer_index(index)
window = tabbed_browser.window()
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentWidget(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int] = None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
"""
index = count if count is not None else index
if index == 'last':
self._tab_focus_last()
return
elif index == self._current_index() + 1:
self._tab_focus_last(show_error=False)
return
elif index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int] = None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.val.tabs.wrap:
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
cur_idx = self._current_index()
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
cmdutils.check_exclusive((userscript, detach), 'ud')
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self._run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.val.url.start_pages[0])
def _run_userscript(self, cmd, *args, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
tab = self._tabbed_browser.currentWidget()
if tab is not None and tab.caret.has_selection():
env['QUTE_SELECTED_TEXT'] = tab.caret.selection()
try:
env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True)
except browsertab.UnsupportedOperationError:
pass
# FIXME:qtwebengine: If tab is None, run_async will fail!
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If None, use url of current page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if url is None:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}" if was_added else "Removed bookmark {}"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('dest_old', hide=True)
def download(self, url=None, dest_old=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
The form `:download [url] [dest]` is deprecated, use `:download --dest
[dest] [url]` instead.
Args:
url: The URL to download. If not given, download the current page.
dest_old: (deprecated) Same as dest.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
if dest_old is not None:
message.warning(":download [url] [dest] is deprecated - use "
":download --dest [dest] [url]")
if dest is not None:
raise cmdexc.CommandError("Can't give two destinations for the"
" download.")
dest = dest_old
# FIXME:qtwebengine do this with the QtWebEngine download manager?
download_manager = objreg.get('qtnetwork-download-manager',
scope='window', window=self._win_id)
target = None
if dest is not None:
dest = downloads.transform_path(dest)
if dest is None:
raise cmdexc.CommandError("Invalid target filename")
target = downloads.FileDownloadTarget(dest)
tab = self._current_widget()
user_agent = tab.user_agent()
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, user_agent=user_agent, target=target)
elif mhtml_:
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebEngine:
webengine_download_manager = objreg.get(
'webengine-download-manager')
try:
webengine_download_manager.get_mhtml(tab, target)
except browsertab.UnsupportedOperationError as e:
raise cmdexc.CommandError(e)
else:
download_manager.get_mhtml(tab, target)
else:
qnam = tab.networkaccessmanager()
suggested_fn = downloads.suggested_fn_from_title(
self._current_url().path(), tab.title()
)
download_manager.get(
self._current_url(),
user_agent=user_agent,
qnam=qnam,
target=target,
suggested_fn=suggested_fn
)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page in a new tab."""
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
def show_source_cb(source):
"""Show source as soon as it's ready."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/491
# pylint: disable=no-member
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table',
title='Source for {}'.format(current_url.toDisplayString()))
# pylint: enable=no-member
highlighted = pygments.highlight(source, lexer, formatter)
new_tab = self._tabbed_browser.tabopen()
new_tab.set_html(highlighted)
new_tab.data.viewing_source = True
tab.dump_async(show_source_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', scope='window')
def history(self, tab=True, bg=False, window=False):
"""Show browsing history.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
url = QUrl('qute://history/')
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=miscmodels.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__.__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif topic in configdata.DATA:
path = 'settings.html#{}'.format(topic)
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='info', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.value()
if text is None:
message.error("Could not get text from the focused element.")
return
assert isinstance(text, str), text
caret_position = elem.caret_position()
ed = editor.ExternalEditor(self._tabbed_browser)
ed.editing_finished.connect(functools.partial(
self.on_editing_finished, elem))
ed.edit(text, caret_position)
@cmdutils.register(instance='command-dispatcher', scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`editor.command` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_value(text)
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget =
usertypes.ClickTarget.normal,
force_event=False):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
if tab.search.search_displayed:
tab.search.clear()
if not text:
return
options = {
'ignore_case': config.val.ignore_case,
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
for _ in range(count):
try:
tab.action.run_string(action)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, file=False, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int] = None):
"""Evaluate a JavaScript string.
Args:
js_code: The string/file to evaluate.
file: Interpret js-code as a path to a file.
If the path is relative, the file is searched in a js/ subdir
in qutebrowser's data dir, e.g.
`~/.local/share/qutebrowser/js`.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
if file:
path = os.path.expanduser(js_code)
if not os.path.isabs(path):
path = os.path.join(standarddir.data(), 'js', path)
try:
with open(path, 'r', encoding='utf-8') as f:
js_code = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True, backend=usertypes.Backend.QtWebKit)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False,
private=False, related=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`editor.command` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.editing_finished.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window, private=private, related=related))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window')
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window')
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False, private=False, related=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
if bg or tab or window or private or related or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window,
private=private, related=related)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fullscreen(self, leave=False):
"""Toggle fullscreen mode.
Args:
leave: Only leave fullscreen if it was entered by the page.
"""
if leave:
tab = self._current_widget()
try:
tab.action.exit_fullscreen()
except browsertab.UnsupportedOperationError:
pass
return
window = self._tabbed_browser.window()
if window.isFullScreen():
window.setWindowState(
window.state_before_fullscreen & ~Qt.WindowFullScreen)
else:
window.state_before_fullscreen = window.windowState()
window.showFullScreen()
log.misc.debug('state before fullscreen: {}'.format(
debug.qflags_key(Qt, window.state_before_fullscreen)))
| 1 | 19,819 | I changed this to an error, that seems more appropriate than a warning here. | qutebrowser-qutebrowser | py |
@@ -492,7 +492,7 @@ func (sct *SmartContractTest) run(r *require.Assertions) {
if receipt.Status == uint64(iotextypes.ReceiptStatus_Success) {
numLog := 0
for _, l := range receipt.Logs {
- if !action.IsSystemLog(l) {
+ if !l.IsEvmTransfer() {
numLog++
}
} | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package execution
import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"os"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution/evm"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/blockdao"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
)
// ExpectedBalance defines an account-balance pair
type ExpectedBalance struct {
Account string `json:"account"`
RawBalance string `json:"rawBalance"`
}
// GenesisBlockHeight defines an genesis blockHeight
type GenesisBlockHeight struct {
IsBering bool `json:"isBering"`
}
func (eb *ExpectedBalance) Balance() *big.Int {
balance, ok := new(big.Int).SetString(eb.RawBalance, 10)
if !ok {
log.L().Panic("invalid balance", zap.String("balance", eb.RawBalance))
}
return balance
}
func readCode(sr protocol.StateReader, addr []byte) ([]byte, error) {
var c evm.SerializableBytes
account, err := accountutil.LoadAccount(sr, hash.BytesToHash160(addr))
if err != nil {
return nil, err
}
_, err = sr.State(&c, protocol.NamespaceOption(evm.CodeKVNameSpace), protocol.KeyOption(account.CodeHash[:]))
return c[:], err
}
type Log struct {
Topics []string `json:"topics"`
Data string `json:"data"`
}
type ExecutionConfig struct {
Comment string `json:"comment"`
ContractIndex int `json:"contractIndex"`
AppendContractAddress bool `json:"appendContractAddress"`
ContractIndexToAppend int `json:"contractIndexToAppend"`
ContractAddressToAppend string `json:"contractAddressToAppend"`
ReadOnly bool `json:"readOnly"`
RawPrivateKey string `json:"rawPrivateKey"`
RawByteCode string `json:"rawByteCode"`
RawAmount string `json:"rawAmount"`
RawGasLimit uint `json:"rawGasLimit"`
RawGasPrice string `json:"rawGasPrice"`
Failed bool `json:"failed"`
RawReturnValue string `json:"rawReturnValue"`
RawExpectedGasConsumed uint `json:"rawExpectedGasConsumed"`
ExpectedStatus uint64 `json:"expectedStatus"`
ExpectedBalances []ExpectedBalance `json:"expectedBalances"`
ExpectedLogs []Log `json:"expectedLogs"`
}
func (cfg *ExecutionConfig) PrivateKey() crypto.PrivateKey {
priKey, err := crypto.HexStringToPrivateKey(cfg.RawPrivateKey)
if err != nil {
log.L().Panic(
"invalid private key",
zap.String("privateKey", cfg.RawPrivateKey),
zap.Error(err),
)
}
return priKey
}
func (cfg *ExecutionConfig) Executor() address.Address {
priKey := cfg.PrivateKey()
addr, err := address.FromBytes(priKey.PublicKey().Hash())
if err != nil {
log.L().Panic(
"invalid private key",
zap.String("privateKey", cfg.RawPrivateKey),
zap.Error(err),
)
}
return addr
}
func (cfg *ExecutionConfig) ByteCode() []byte {
byteCode, err := hex.DecodeString(cfg.RawByteCode)
if err != nil {
log.L().Panic(
"invalid byte code",
zap.String("byteCode", cfg.RawByteCode),
zap.Error(err),
)
}
if cfg.AppendContractAddress {
addr, err := address.FromString(cfg.ContractAddressToAppend)
if err != nil {
log.L().Panic(
"invalid contract address to append",
zap.String("contractAddressToAppend", cfg.ContractAddressToAppend),
zap.Error(err),
)
}
ba := addr.Bytes()
ba = append(make([]byte, 12), ba...)
byteCode = append(byteCode, ba...)
}
return byteCode
}
func (cfg *ExecutionConfig) Amount() *big.Int {
amount, ok := new(big.Int).SetString(cfg.RawAmount, 10)
if !ok {
log.L().Panic("invalid amount", zap.String("amount", cfg.RawAmount))
}
return amount
}
func (cfg *ExecutionConfig) GasPrice() *big.Int {
price, ok := new(big.Int).SetString(cfg.RawGasPrice, 10)
if !ok {
log.L().Panic("invalid gas price", zap.String("gasPrice", cfg.RawGasPrice))
}
return price
}
func (cfg *ExecutionConfig) GasLimit() uint64 {
return uint64(cfg.RawGasLimit)
}
func (cfg *ExecutionConfig) ExpectedGasConsumed() uint64 {
return uint64(cfg.RawExpectedGasConsumed)
}
func (cfg *ExecutionConfig) ExpectedReturnValue() []byte {
retval, err := hex.DecodeString(cfg.RawReturnValue)
if err != nil {
log.L().Panic(
"invalid return value",
zap.String("returnValue", cfg.RawReturnValue),
zap.Error(err),
)
}
return retval
}
type SmartContractTest struct {
// the order matters
InitGenesis GenesisBlockHeight `json:"initGenesis"`
InitBalances []ExpectedBalance `json:"initBalances"`
Deployments []ExecutionConfig `json:"deployments"`
Executions []ExecutionConfig `json:"executions"`
}
func NewSmartContractTest(t *testing.T, file string) {
require := require.New(t)
jsonFile, err := os.Open(file)
require.NoError(err)
sctBytes, err := ioutil.ReadAll(jsonFile)
require.NoError(err)
sct := &SmartContractTest{}
require.NoError(json.Unmarshal(sctBytes, sct))
sct.run(require)
}
func readExecution(
bc blockchain.Blockchain,
sf factory.Factory,
dao blockdao.BlockDAO,
ap actpool.ActPool,
ecfg *ExecutionConfig,
contractAddr string,
) ([]byte, *action.Receipt, error) {
log.S().Info(ecfg.Comment)
state, err := accountutil.AccountState(sf, ecfg.Executor().String())
if err != nil {
return nil, nil, err
}
exec, err := action.NewExecution(
contractAddr,
state.Nonce+1,
ecfg.Amount(),
ecfg.GasLimit(),
ecfg.GasPrice(),
ecfg.ByteCode(),
)
if err != nil {
return nil, nil, err
}
addr, err := address.FromBytes(ecfg.PrivateKey().PublicKey().Hash())
if err != nil {
return nil, nil, err
}
ctx, err := bc.Context()
if err != nil {
return nil, nil, err
}
return sf.SimulateExecution(ctx, addr, exec, dao.GetBlockHash)
}
func runExecutions(
bc blockchain.Blockchain,
sf factory.Factory,
dao blockdao.BlockDAO,
ap actpool.ActPool,
ecfgs []*ExecutionConfig,
contractAddrs []string,
) ([]*action.Receipt, error) {
nonces := map[string]uint64{}
hashes := []hash.Hash256{}
for i, ecfg := range ecfgs {
log.S().Info(ecfg.Comment)
var nonce uint64
var ok bool
executor := ecfg.Executor().String()
if nonce, ok = nonces[executor]; !ok {
state, err := accountutil.AccountState(sf, executor)
if err != nil {
return nil, err
}
nonce = state.Nonce
}
nonce = nonce + 1
nonces[executor] = nonce
exec, err := action.NewExecution(
contractAddrs[i],
nonce,
ecfg.Amount(),
ecfg.GasLimit(),
ecfg.GasPrice(),
ecfg.ByteCode(),
)
if err != nil {
return nil, err
}
builder := &action.EnvelopeBuilder{}
elp := builder.SetAction(exec).
SetNonce(exec.Nonce()).
SetGasLimit(ecfg.GasLimit()).
SetGasPrice(ecfg.GasPrice()).
Build()
selp, err := action.Sign(elp, ecfg.PrivateKey())
if err != nil {
return nil, err
}
if err := ap.Add(context.Background(), selp); err != nil {
return nil, err
}
hashes = append(hashes, exec.Hash())
}
blk, err := bc.MintNewBlock(testutil.TimestampNow())
if err != nil {
return nil, err
}
if err := bc.CommitBlock(blk); err != nil {
return nil, err
}
receipts := []*action.Receipt{}
for _, hash := range hashes {
receipt, err := dao.GetReceiptByActionHash(hash, blk.Height())
if err != nil {
return nil, err
}
receipts = append(receipts, receipt)
}
return receipts, nil
}
func (sct *SmartContractTest) prepareBlockchain(
ctx context.Context,
cfg config.Config,
r *require.Assertions,
) (blockchain.Blockchain, factory.Factory, blockdao.BlockDAO, actpool.ActPool) {
defer func() {
delete(cfg.Plugins, config.GatewayPlugin)
}()
cfg.Plugins[config.GatewayPlugin] = true
cfg.Chain.EnableAsyncIndexWrite = false
cfg.Genesis.EnableGravityChainVoting = false
cfg.ActPool.MinGasPriceStr = "0"
if sct.InitGenesis.IsBering {
cfg.Genesis.Blockchain.BeringBlockHeight = 0
}
for _, expectedBalance := range sct.InitBalances {
cfg.Genesis.InitBalanceMap[expectedBalance.Account] = expectedBalance.Balance().String()
}
registry := protocol.NewRegistry()
acc := account.NewProtocol(rewarding.DepositGas)
r.NoError(acc.Register(registry))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
r.NoError(rp.Register(registry))
// create state factory
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption(), factory.RegistryOption(registry))
r.NoError(err)
ap, err := actpool.NewActPool(sf, cfg.ActPool)
r.NoError(err)
// create indexer
indexer, err := blockindex.NewIndexer(db.NewMemKVStore(), cfg.Genesis.Hash())
r.NoError(err)
// create BlockDAO
dao := blockdao.NewBlockDAO(db.NewMemKVStore(), []blockdao.BlockIndexer{sf, indexer}, cfg.Chain.CompressBlock, cfg.DB)
r.NotNil(dao)
bc := blockchain.NewBlockchain(
cfg,
dao,
factory.NewMinter(sf, ap),
blockchain.BlockValidatorOption(block.NewValidator(
sf,
protocol.NewGenericValidator(sf, accountutil.AccountState),
)),
)
reward := rewarding.NewProtocol(0, 0)
r.NoError(reward.Register(registry))
r.NotNil(bc)
execution := NewProtocol(dao.GetBlockHash, rewarding.DepositGas)
r.NoError(execution.Register(registry))
r.NoError(bc.Start(ctx))
return bc, sf, dao, ap
}
func (sct *SmartContractTest) deployContracts(
bc blockchain.Blockchain,
sf factory.Factory,
dao blockdao.BlockDAO,
ap actpool.ActPool,
r *require.Assertions,
) (contractAddresses []string) {
for i, contract := range sct.Deployments {
if contract.AppendContractAddress {
contract.ContractAddressToAppend = contractAddresses[contract.ContractIndexToAppend]
}
receipts, err := runExecutions(bc, sf, dao, ap, []*ExecutionConfig{&contract}, []string{action.EmptyAddress})
r.NoError(err)
r.Equal(1, len(receipts))
receipt := receipts[0]
r.NotNil(receipt)
if sct.InitGenesis.IsBering {
// if it is post bering, it compares the status with expected status
r.Equal(sct.Deployments[i].ExpectedStatus, receipt.Status)
if receipt.Status != uint64(iotextypes.ReceiptStatus_Success) {
return []string{}
}
} else {
if !sct.Deployments[i].Failed {
r.Equal(uint64(iotextypes.ReceiptStatus_Success), receipt.Status, i)
} else {
r.Equal(uint64(iotextypes.ReceiptStatus_Failure), receipt.Status, i)
return []string{}
}
}
if sct.Deployments[i].ExpectedGasConsumed() != 0 {
r.Equal(sct.Deployments[i].ExpectedGasConsumed(), receipt.GasConsumed)
}
addr, _ := address.FromString(receipt.ContractAddress)
c, err := readCode(sf, addr.Bytes())
r.NoError(err)
if contract.AppendContractAddress {
lenOfByteCode := len(contract.ByteCode())
r.True(bytes.Contains(contract.ByteCode()[:lenOfByteCode-32], c))
} else {
r.True(bytes.Contains(sct.Deployments[i].ByteCode(), c))
}
contractAddresses = append(contractAddresses, receipt.ContractAddress)
}
return
}
func (sct *SmartContractTest) run(r *require.Assertions) {
// prepare blockchain
ctx := context.Background()
bc, sf, dao, ap := sct.prepareBlockchain(ctx, config.Default, r)
defer func() {
r.NoError(bc.Stop(ctx))
}()
// deploy smart contract
contractAddresses := sct.deployContracts(bc, sf, dao, ap, r)
if len(contractAddresses) == 0 {
return
}
// run executions
for i, exec := range sct.Executions {
contractAddr := contractAddresses[exec.ContractIndex]
if exec.AppendContractAddress {
exec.ContractAddressToAppend = contractAddresses[exec.ContractIndexToAppend]
}
var retval []byte
var receipt *action.Receipt
var err error
if exec.ReadOnly {
retval, receipt, err = readExecution(bc, sf, dao, ap, &exec, contractAddr)
r.NoError(err)
expected := exec.ExpectedReturnValue()
if len(expected) == 0 {
r.Equal(0, len(retval))
} else {
r.Equal(expected, retval)
}
} else {
receipts, err := runExecutions(bc, sf, dao, ap, []*ExecutionConfig{&exec}, []string{contractAddr})
r.NoError(err)
r.Equal(1, len(receipts))
receipt = receipts[0]
r.NotNil(receipt)
}
if sct.InitGenesis.IsBering {
// if it is post bering, it compares the status with expected status
r.Equal(exec.ExpectedStatus, receipt.Status)
} else {
if exec.Failed {
r.Equal(uint64(iotextypes.ReceiptStatus_Failure), receipt.Status)
} else {
r.Equal(uint64(iotextypes.ReceiptStatus_Success), receipt.Status)
}
}
if exec.ExpectedGasConsumed() != 0 {
r.Equal(exec.ExpectedGasConsumed(), receipt.GasConsumed, i)
}
for _, expectedBalance := range exec.ExpectedBalances {
account := expectedBalance.Account
if account == "" {
account = contractAddr
}
state, err := accountutil.AccountState(sf, account)
r.NoError(err)
r.Equal(
0,
state.Balance.Cmp(expectedBalance.Balance()),
"balance of account %s is different from expectation, %d vs %d",
account,
state.Balance,
expectedBalance.Balance(),
)
}
if receipt.Status == uint64(iotextypes.ReceiptStatus_Success) {
numLog := 0
for _, l := range receipt.Logs {
if !action.IsSystemLog(l) {
numLog++
}
}
r.Equal(len(exec.ExpectedLogs), numLog, i)
// TODO: check value of logs
}
}
}
func TestProtocol_Validate(t *testing.T) {
require := require.New(t)
p := NewProtocol(func(uint64) (hash.Hash256, error) {
return hash.ZeroHash256, nil
}, rewarding.DepositGas)
t.Run("Oversized data", func(t *testing.T) {
tmpPayload := [32769]byte{}
data := tmpPayload[:]
ex, err := action.NewExecution("2", uint64(1), big.NewInt(0), uint64(0), big.NewInt(0), data)
require.NoError(err)
require.Equal(action.ErrActPool, errors.Cause(p.Validate(context.Background(), ex, nil)))
})
}
func TestProtocol_Handle(t *testing.T) {
testEVM := func(t *testing.T) {
log.S().Info("Test EVM")
require := require.New(t)
ctx := context.Background()
cfg := config.Default
defer func() {
delete(cfg.Plugins, config.GatewayPlugin)
}()
testTriePath, err := testutil.PathOfTempFile("trie")
require.NoError(err)
testDBPath, err := testutil.PathOfTempFile("db")
require.NoError(err)
testIndexPath, err := testutil.PathOfTempFile("index")
require.NoError(err)
cfg.Plugins[config.GatewayPlugin] = true
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
cfg.Chain.EnableAsyncIndexWrite = false
cfg.Genesis.EnableGravityChainVoting = false
cfg.ActPool.MinGasPriceStr = "0"
cfg.Genesis.InitBalanceMap[identityset.Address(27).String()] = unit.ConvertIotxToRau(1000000000).String()
registry := protocol.NewRegistry()
acc := account.NewProtocol(rewarding.DepositGas)
require.NoError(acc.Register(registry))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(rp.Register(registry))
// create state factory
sf, err := factory.NewStateDB(cfg, factory.DefaultStateDBOption(), factory.RegistryStateDBOption(registry))
require.NoError(err)
ap, err := actpool.NewActPool(sf, cfg.ActPool)
require.NoError(err)
// create indexer
cfg.DB.DbPath = cfg.Chain.IndexDBPath
indexer, err := blockindex.NewIndexer(db.NewBoltDB(cfg.DB), hash.ZeroHash256)
require.NoError(err)
// create BlockDAO
cfg.DB.DbPath = cfg.Chain.ChainDBPath
dao := blockdao.NewBlockDAO(db.NewBoltDB(cfg.DB), []blockdao.BlockIndexer{sf, indexer}, cfg.Chain.CompressBlock, cfg.DB)
require.NotNil(dao)
bc := blockchain.NewBlockchain(
cfg,
dao,
factory.NewMinter(sf, ap),
blockchain.BlockValidatorOption(block.NewValidator(
sf,
protocol.NewGenericValidator(sf, accountutil.AccountState),
)),
)
exeProtocol := NewProtocol(dao.GetBlockHash, rewarding.DepositGas)
require.NoError(exeProtocol.Register(registry))
require.NoError(bc.Start(ctx))
require.NotNil(bc)
defer func() {
require.NoError(bc.Stop(ctx))
}()
data, _ := hex.DecodeString("608060405234801561001057600080fd5b5060df8061001f6000396000f3006080604052600436106049576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806360fe47b114604e5780636d4ce63c146078575b600080fd5b348015605957600080fd5b5060766004803603810190808035906020019092919050505060a0565b005b348015608357600080fd5b50608a60aa565b6040518082815260200191505060405180910390f35b8060008190555050565b600080549050905600a165627a7a7230582002faabbefbbda99b20217cf33cb8ab8100caf1542bf1f48117d72e2c59139aea0029")
execution, err := action.NewExecution(action.EmptyAddress, 1, big.NewInt(0), uint64(100000), big.NewInt(0), data)
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetAction(execution).
SetNonce(1).
SetGasLimit(100000).Build()
selp, err := action.Sign(elp, identityset.PrivateKey(27))
require.NoError(err)
require.NoError(ap.Add(context.Background(), selp))
blk, err := bc.MintNewBlock(testutil.TimestampNow())
require.NoError(err)
require.NoError(bc.CommitBlock(blk))
require.Equal(1, len(blk.Receipts))
eHash := execution.Hash()
r, _ := dao.GetReceiptByActionHash(eHash, blk.Height())
require.NotNil(r)
require.Equal(eHash, r.ActionHash)
contract, err := address.FromString(r.ContractAddress)
require.NoError(err)
c, err := readCode(sf, contract.Bytes())
require.NoError(err)
require.Equal(data[31:], c)
exe, err := dao.GetActionByActionHash(eHash, blk.Height())
require.NoError(err)
require.Equal(eHash, exe.Hash())
addr27 := hash.BytesToHash160(identityset.Address(27).Bytes())
total, err := indexer.GetActionCountByAddress(addr27)
require.NoError(err)
exes, err := indexer.GetActionsByAddress(addr27, 0, total)
require.NoError(err)
require.Equal(1, len(exes))
require.Equal(eHash[:], exes[0])
actIndex, err := indexer.GetActionIndex(eHash[:])
require.NoError(err)
blkHash, err := dao.GetBlockHash(actIndex.BlockHeight())
require.NoError(err)
require.Equal(blk.HashBlock(), blkHash)
// store to key 0
data, _ = hex.DecodeString("60fe47b1000000000000000000000000000000000000000000000000000000000000000f")
execution, err = action.NewExecution(r.ContractAddress, 2, big.NewInt(0), uint64(120000), big.NewInt(0), data)
require.NoError(err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetAction(execution).
SetNonce(2).
SetGasLimit(120000).Build()
selp, err = action.Sign(elp, identityset.PrivateKey(27))
require.NoError(err)
log.S().Infof("execution %+v", execution)
require.NoError(ap.Add(context.Background(), selp))
blk, err = bc.MintNewBlock(testutil.TimestampNow())
require.NoError(err)
require.NoError(bc.CommitBlock(blk))
require.Equal(1, len(blk.Receipts))
// TODO (zhi): reenable the unit test
/*
ws, err = sf.NewWorkingSet()
require.NoError(err)
stateDB = evm.NewStateDBAdapter(ws, uint64(0), true, hash.ZeroHash256)
var emptyEVMHash common.Hash
v := stateDB.GetState(evmContractAddrHash, emptyEVMHash)
require.Equal(byte(15), v[31])
*/
eHash = execution.Hash()
r, err = dao.GetReceiptByActionHash(eHash, blk.Height())
require.NoError(err)
require.Equal(eHash, r.ActionHash)
// read from key 0
data, err = hex.DecodeString("6d4ce63c")
require.NoError(err)
execution, err = action.NewExecution(r.ContractAddress, 3, big.NewInt(0), uint64(120000), big.NewInt(0), data)
require.NoError(err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetAction(execution).
SetNonce(3).
SetGasLimit(120000).Build()
selp, err = action.Sign(elp, identityset.PrivateKey(27))
require.NoError(err)
log.S().Infof("execution %+v", execution)
require.NoError(ap.Add(context.Background(), selp))
blk, err = bc.MintNewBlock(testutil.TimestampNow())
require.NoError(err)
require.NoError(bc.CommitBlock(blk))
require.Equal(1, len(blk.Receipts))
eHash = execution.Hash()
r, err = dao.GetReceiptByActionHash(eHash, blk.Height())
require.NoError(err)
require.Equal(eHash, r.ActionHash)
data, _ = hex.DecodeString("608060405234801561001057600080fd5b5060df8061001f6000396000f3006080604052600436106049576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806360fe47b114604e5780636d4ce63c146078575b600080fd5b348015605957600080fd5b5060766004803603810190808035906020019092919050505060a0565b005b348015608357600080fd5b50608a60aa565b6040518082815260200191505060405180910390f35b8060008190555050565b600080549050905600a165627a7a7230582002faabbefbbda99b20217cf33cb8ab8100caf1542bf1f48117d72e2c59139aea0029")
execution1, err := action.NewExecution(action.EmptyAddress, 4, big.NewInt(0), uint64(100000), big.NewInt(10), data)
require.NoError(err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetAction(execution1).
SetNonce(4).
SetGasLimit(100000).SetGasPrice(big.NewInt(10)).Build()
selp, err = action.Sign(elp, identityset.PrivateKey(27))
require.NoError(err)
require.NoError(ap.Add(context.Background(), selp))
blk, err = bc.MintNewBlock(testutil.TimestampNow())
require.NoError(err)
require.NoError(bc.CommitBlock(blk))
require.Equal(1, len(blk.Receipts))
}
t.Run("EVM", func(t *testing.T) {
testEVM(t)
})
/**
* source of smart contract: https://etherscan.io/address/0x6fb3e0a217407efff7ca062d46c26e5d60a14d69#code
*/
t.Run("ERC20", func(t *testing.T) {
NewSmartContractTest(t, "testdata/erc20.json")
})
/**
* Source of smart contract: https://etherscan.io/address/0x8dd5fbce2f6a956c3022ba3663759011dd51e73e#code
*/
t.Run("DelegateERC20", func(t *testing.T) {
NewSmartContractTest(t, "testdata/delegate_erc20.json")
})
/*
* Source code: https://kovan.etherscan.io/address/0x81f85886749cbbf3c2ec742db7255c6b07c63c69
*/
t.Run("InfiniteLoop", func(t *testing.T) {
NewSmartContractTest(t, "testdata/infiniteloop.json")
})
// RollDice
t.Run("RollDice", func(t *testing.T) {
NewSmartContractTest(t, "testdata/rolldice.json")
})
// ChangeState
t.Run("ChangeState", func(t *testing.T) {
NewSmartContractTest(t, "testdata/changestate.json")
})
// array-return
t.Run("ArrayReturn", func(t *testing.T) {
NewSmartContractTest(t, "testdata/array-return.json")
})
// basic-token
t.Run("BasicToken", func(t *testing.T) {
NewSmartContractTest(t, "testdata/basic-token.json")
})
// call-dynamic
t.Run("CallDynamic", func(t *testing.T) {
NewSmartContractTest(t, "testdata/call-dynamic.json")
})
// factory
t.Run("Factory", func(t *testing.T) {
NewSmartContractTest(t, "testdata/factory.json")
})
// mapping-delete
t.Run("MappingDelete", func(t *testing.T) {
NewSmartContractTest(t, "testdata/mapping-delete.json")
})
// f.value
t.Run("F.value", func(t *testing.T) {
NewSmartContractTest(t, "testdata/f.value.json")
})
// proposal
t.Run("Proposal", func(t *testing.T) {
NewSmartContractTest(t, "testdata/proposal.json")
})
// public-length
t.Run("PublicLength", func(t *testing.T) {
NewSmartContractTest(t, "testdata/public-length.json")
})
// public-mapping
t.Run("PublicMapping", func(t *testing.T) {
NewSmartContractTest(t, "testdata/public-mapping.json")
})
// no-variable-length-returns
t.Run("NoVariableLengthReturns", func(t *testing.T) {
NewSmartContractTest(t, "testdata/no-variable-length-returns.json")
})
// tuple
t.Run("Tuple", func(t *testing.T) {
NewSmartContractTest(t, "testdata/tuple.json")
})
// tail-recursion
t.Run("TailRecursion", func(t *testing.T) {
NewSmartContractTest(t, "testdata/tail-recursion.json")
})
// sha3
t.Run("Sha3", func(t *testing.T) {
NewSmartContractTest(t, "testdata/sha3.json")
})
// remove-from-array
t.Run("RemoveFromArray", func(t *testing.T) {
NewSmartContractTest(t, "testdata/remove-from-array.json")
})
// send-eth
t.Run("SendEth", func(t *testing.T) {
NewSmartContractTest(t, "testdata/send-eth.json")
})
// multisend
t.Run("Multisend", func(t *testing.T) {
NewSmartContractTest(t, "testdata/multisend.json")
})
t.Run("Multisend2", func(t *testing.T) {
NewSmartContractTest(t, "testdata/multisend2.json")
})
// reentry
t.Run("reentry-attack", func(t *testing.T) {
NewSmartContractTest(t, "testdata/reentry-attack.json")
})
// cashier
t.Run("cashier", func(t *testing.T) {
NewSmartContractTest(t, "testdata/cashier.json")
})
// wireconnection
// [Issue #1422] This unit test proves that there is no problem when we want to deploy and execute the contract
// which inherits abstract contract and implements abstract functions and call each other (Utterance() calls utterance())
t.Run("wireconnection", func(t *testing.T) {
NewSmartContractTest(t, "testdata/wireconnection.json")
})
// gas-test
t.Run("gas-test", func(t *testing.T) {
NewSmartContractTest(t, "testdata/gas-test.json")
})
// storage-test
t.Run("storage-test", func(t *testing.T) {
NewSmartContractTest(t, "testdata/storage-test.json")
})
// cashier-bering
t.Run("cashier-bering", func(t *testing.T) {
NewSmartContractTest(t, "testdata/cashier-bering.json")
})
// infiniteloop-bering
t.Run("infiniteloop-bering", func(t *testing.T) {
NewSmartContractTest(t, "testdata/infiniteloop-bering.json")
})
}
func TestMaxTime(t *testing.T) {
t.Run("max-time", func(t *testing.T) {
NewSmartContractTest(t, "testdata/maxtime.json")
})
t.Run("max-time-2", func(t *testing.T) {
NewSmartContractTest(t, "testdata/maxtime2.json")
})
}
func benchmarkHotContract(b *testing.B, async bool) {
sct := SmartContractTest{
InitBalances: []ExpectedBalance{
{
Account: "io1mflp9m6hcgm2qcghchsdqj3z3eccrnekx9p0ms",
RawBalance: "1000000000000000000000000000",
},
},
Deployments: []ExecutionConfig{
{
ContractIndex: 0,
RawPrivateKey: "cfa6ef757dee2e50351620dca002d32b9c090cfda55fb81f37f1d26b273743f1",
RawByteCode: "608060405234801561001057600080fd5b506040516040806108018339810180604052810190808051906020019092919080519060200190929190505050816004819055508060058190555050506107a58061005c6000396000f300608060405260043610610078576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680631249c58b1461007d57806327e235e31461009457806353277879146100eb5780636941b84414610142578063810ad50514610199578063a9059cbb14610223575b600080fd5b34801561008957600080fd5b50610092610270565b005b3480156100a057600080fd5b506100d5600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610475565b6040518082815260200191505060405180910390f35b3480156100f757600080fd5b5061012c600480360381019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061048d565b6040518082815260200191505060405180910390f35b34801561014e57600080fd5b50610183600480360381019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104a5565b6040518082815260200191505060405180910390f35b3480156101a557600080fd5b506101da600480360381019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bd565b604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390f35b34801561022f57600080fd5b5061026e600480360381019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610501565b005b436004546000803373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054011115151561032a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f746f6f20736f6f6e20746f206d696e740000000000000000000000000000000081525060200191505060405180910390fd5b436000803373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550600554600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282540192505081905550600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600081548092919060010191905055503373ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167fec61728879a33aa50b55e1f4789dcfc1c680f30a24d7b8694a9f874e242a97b46005546040518082815260200191505060405180910390a3565b60016020528060005260406000206000915090505481565b60026020528060005260406000206000915090505481565b60006020528060005260406000206000915090505481565b60036020528060005260406000206000915090508060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060010154905082565b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156105b8576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f696e73756666696369656e742062616c616e636500000000000000000000000081525060200191505060405180910390fd5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555080600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254019250508190555060408051908101604052803373ffffffffffffffffffffffffffffffffffffffff16815260200182815250600360008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550602082015181600101559050508173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fec61728879a33aa50b55e1f4789dcfc1c680f30a24d7b8694a9f874e242a97b4836040518082815260200191505060405180910390a350505600a165627a7a7230582047e5e1380e66d6b109548617ae59ff7baf70ee2d4a6734559b8fc5cabca0870b0029000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000186a0",
RawAmount: "0",
RawGasLimit: 50000000,
RawGasPrice: "0",
},
},
}
r := require.New(b)
ctx := context.Background()
cfg := config.Default
cfg.Genesis.NumSubEpochs = uint64(b.N)
if async {
cfg.Genesis.GreenlandBlockHeight = 0
} else {
cfg.Genesis.GreenlandBlockHeight = 10000000000
}
bc, sf, dao, ap := sct.prepareBlockchain(ctx, cfg, r)
defer func() {
r.NoError(bc.Stop(ctx))
}()
contractAddresses := sct.deployContracts(bc, sf, dao, ap, r)
r.Equal(1, len(contractAddresses))
contractAddr := contractAddresses[0]
b.ResetTimer()
for i := 0; i < b.N; i++ {
receipts, err := runExecutions(
bc, sf, dao, ap, []*ExecutionConfig{
{
RawPrivateKey: "cfa6ef757dee2e50351620dca002d32b9c090cfda55fb81f37f1d26b273743f1",
RawByteCode: "1249c58b",
RawAmount: "0",
RawGasLimit: 5000000,
RawGasPrice: "0",
Failed: false,
Comment: "mint token",
},
},
[]string{contractAddr},
)
r.NoError(err)
r.Equal(1, len(receipts))
r.Equal(uint64(1), receipts[0].Status)
ecfgs := []*ExecutionConfig{}
contractAddrs := []string{}
for j := 0; j < 100; j++ {
ecfgs = append(ecfgs, &ExecutionConfig{
RawPrivateKey: "cfa6ef757dee2e50351620dca002d32b9c090cfda55fb81f37f1d26b273743f1",
RawByteCode: fmt.Sprintf("a9059cbb000000000000000000000000123456789012345678900987%016x0000000000000000000000000000000000000000000000000000000000000039", 100*i+j),
RawAmount: "0",
RawGasLimit: 5000000,
RawGasPrice: "0",
Failed: false,
Comment: "send token",
})
contractAddrs = append(contractAddrs, contractAddr)
}
receipts, err = runExecutions(bc, sf, dao, ap, ecfgs, contractAddrs)
r.NoError(err)
for _, receipt := range receipts {
r.Equal(uint64(1), receipt.Status)
}
}
b.StopTimer()
}
func BenchmarkHotContract(b *testing.B) {
b.Run("async mode", func(b *testing.B) {
benchmarkHotContract(b, true)
})
b.Run("sync mode", func(b *testing.B) {
benchmarkHotContract(b, false)
})
}
| 1 | 22,001 | This change could mute unit test failure, but it is better to update unit tests | iotexproject-iotex-core | go |
@@ -165,6 +165,8 @@ class NotificationData {
activeNotifications.remove(holder);
int notificationId = holder.notificationId;
+ notificationIdsInUse.delete(notificationId);
+
if (!additionalNotifications.isEmpty()) {
NotificationContent newContent = additionalNotifications.removeFirst();
NotificationHolder replacement = createNotificationHolder(notificationId, newContent); | 1 | package com.fsck.k9.notification;
import java.util.ArrayList;
import java.util.Deque;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import android.util.SparseBooleanArray;
import com.fsck.k9.Account;
import com.fsck.k9.activity.MessageReference;
/**
* A holder class for pending new mail notifications.
*/
class NotificationData {
// Note: As of Jellybean, phone notifications show a maximum of 5 lines, while tablet notifications show 7 lines.
static final int MAX_NUMBER_OF_MESSAGES_FOR_SUMMARY_NOTIFICATION = 5;
// Note: This class assumes MAX_NUMBER_OF_STACKED_NOTIFICATIONS >= MAX_NUMBER_OF_MESSAGES_FOR_SUMMARY_NOTIFICATION
static final int MAX_NUMBER_OF_STACKED_NOTIFICATIONS = 8;
private final Account account;
private final LinkedList<NotificationHolder> activeNotifications = new LinkedList<NotificationHolder>();
private final Deque<NotificationContent> additionalNotifications = new LinkedList<NotificationContent>();
private final SparseBooleanArray notificationIdsInUse = new SparseBooleanArray();
private int unreadMessageCount;
public NotificationData(Account account) {
this.account = account;
}
public AddNotificationResult addNotificationContent(NotificationContent content) {
int notificationId;
boolean cancelNotificationIdBeforeReuse;
if (isMaxNumberOfActiveNotificationsReached()) {
NotificationHolder notificationHolder = activeNotifications.removeLast();
addToAdditionalNotifications(notificationHolder);
notificationId = notificationHolder.notificationId;
cancelNotificationIdBeforeReuse = true;
} else {
notificationId = getNewNotificationId();
cancelNotificationIdBeforeReuse = false;
}
NotificationHolder notificationHolder = createNotificationHolder(notificationId, content);
activeNotifications.addFirst(notificationHolder);
if (cancelNotificationIdBeforeReuse) {
return AddNotificationResult.replaceNotification(notificationHolder);
} else {
return AddNotificationResult.newNotification(notificationHolder);
}
}
private boolean isMaxNumberOfActiveNotificationsReached() {
return activeNotifications.size() == MAX_NUMBER_OF_STACKED_NOTIFICATIONS;
}
private void addToAdditionalNotifications(NotificationHolder notificationHolder) {
additionalNotifications.addFirst(notificationHolder.content);
}
private int getNewNotificationId() {
for (int i = 0; i < MAX_NUMBER_OF_STACKED_NOTIFICATIONS; i++) {
int notificationId = NotificationIds.getNewMailStackedNotificationId(account, i);
if (!isNotificationInUse(notificationId)) {
markNotificationIdAsInUse(notificationId);
return notificationId;
}
}
throw new AssertionError("getNewNotificationId() called with no free notification ID");
}
private boolean isNotificationInUse(int notificationId) {
return notificationIdsInUse.get(notificationId);
}
private void markNotificationIdAsInUse(int notificationId) {
notificationIdsInUse.put(notificationId, true);
}
NotificationHolder createNotificationHolder(int notificationId, NotificationContent content) {
return new NotificationHolder(notificationId, content);
}
public boolean containsStarredMessages() {
for (NotificationHolder holder : activeNotifications) {
if (holder.content.starred) {
return true;
}
}
for (NotificationContent content : additionalNotifications) {
if (content.starred) {
return true;
}
}
return false;
}
public boolean hasAdditionalMessages() {
return activeNotifications.size() > MAX_NUMBER_OF_MESSAGES_FOR_SUMMARY_NOTIFICATION;
}
public int getAdditionalMessagesCount() {
return additionalNotifications.size();
}
public int getNewMessagesCount() {
return activeNotifications.size() + additionalNotifications.size();
}
public boolean isSingleMessageNotification() {
return activeNotifications.size() == 1;
}
public NotificationHolder getHolderForLatestNotification() {
return activeNotifications.getFirst();
}
public List<NotificationContent> getContentForSummaryNotification() {
int size = calculateNumberOfMessagesForSummaryNotification();
List<NotificationContent> result = new ArrayList<NotificationContent>(size);
Iterator<NotificationHolder> iterator = activeNotifications.iterator();
int notificationCount = 0;
while (iterator.hasNext() && notificationCount < MAX_NUMBER_OF_MESSAGES_FOR_SUMMARY_NOTIFICATION) {
NotificationHolder holder = iterator.next();
result.add(holder.content);
notificationCount++;
}
return result;
}
private int calculateNumberOfMessagesForSummaryNotification() {
return Math.min(activeNotifications.size(), MAX_NUMBER_OF_MESSAGES_FOR_SUMMARY_NOTIFICATION);
}
public int[] getActiveNotificationIds() {
int size = activeNotifications.size();
int[] notificationIds = new int[size];
for (int i = 0; i < size; i++) {
NotificationHolder holder = activeNotifications.get(i);
notificationIds[i] = holder.notificationId;
}
return notificationIds;
}
public RemoveNotificationResult removeNotificationForMessage(MessageReference messageReference) {
NotificationHolder holder = getNotificationHolderForMessage(messageReference);
if (holder == null) {
return RemoveNotificationResult.unknownNotification();
}
activeNotifications.remove(holder);
int notificationId = holder.notificationId;
if (!additionalNotifications.isEmpty()) {
NotificationContent newContent = additionalNotifications.removeFirst();
NotificationHolder replacement = createNotificationHolder(notificationId, newContent);
activeNotifications.addLast(replacement);
return RemoveNotificationResult.createNotification(replacement);
}
return RemoveNotificationResult.cancelNotification(notificationId);
}
private NotificationHolder getNotificationHolderForMessage(MessageReference messageReference) {
for (NotificationHolder holder : activeNotifications) {
if (messageReference.equals(holder.content.messageReference)) {
return holder;
}
}
return null;
}
public Account getAccount() {
return account;
}
public int getUnreadMessageCount() {
return unreadMessageCount + getNewMessagesCount();
}
public void setUnreadMessageCount(int unreadMessageCount) {
this.unreadMessageCount = unreadMessageCount;
}
public ArrayList<MessageReference> getAllMessageReferences() {
int newSize = activeNotifications.size() + additionalNotifications.size();
ArrayList<MessageReference> messageReferences = new ArrayList<MessageReference>(newSize);
for (NotificationHolder holder : activeNotifications) {
messageReferences.add(holder.content.messageReference);
}
for (NotificationContent content : additionalNotifications) {
messageReferences.add(content.messageReference);
}
return messageReferences;
}
}
| 1 | 14,069 | All other places that access `notificationIdsInUse` are in methods with descriptive names. We should do the same here. Maybe `markNotificationIdAsFree()`? | k9mail-k-9 | java |
@@ -26,17 +26,14 @@ import (
"sync"
"time"
- "github.com/ethersphere/bee/pkg/sctx"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
TagUidFunc = rand.Uint32
- ErrNotFound = errors.New("tag not found")
+ NotFoundErr = errors.New("tag not found")
)
-type TagsContextKey struct{}
-
// Tags hold tag information indexed by a unique random uint32
type Tags struct {
tags *sync.Map | 1 | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tags
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"strconv"
"sync"
"time"
"github.com/ethersphere/bee/pkg/sctx"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
TagUidFunc = rand.Uint32
ErrNotFound = errors.New("tag not found")
)
type TagsContextKey struct{}
// Tags hold tag information indexed by a unique random uint32
type Tags struct {
tags *sync.Map
}
// NewTags creates a tags object
func NewTags() *Tags {
return &Tags{
tags: &sync.Map{},
}
}
// Create creates a new tag, stores it by the name and returns it
// it returns an error if the tag with this name already exists
func (ts *Tags) Create(s string, total int64, anon bool) (*Tag, error) {
t := NewTag(context.Background(), TagUidFunc(), s, total, anon, nil)
if _, loaded := ts.tags.LoadOrStore(t.Uid, t); loaded {
return nil, errExists
}
return t, nil
}
// All returns all existing tags in Tags' sync.Map
// Note that tags are returned in no particular order
func (ts *Tags) All() (t []*Tag) {
ts.tags.Range(func(k, v interface{}) bool {
t = append(t, v.(*Tag))
return true
})
return t
}
// Get returns the underlying tag for the uid or an error if not found
func (ts *Tags) Get(uid uint32) (*Tag, error) {
t, ok := ts.tags.Load(uid)
if !ok {
return nil, ErrNotFound
}
return t.(*Tag), nil
}
// GetByAddress returns the latest underlying tag for the address or an error if not found
func (ts *Tags) GetByAddress(address swarm.Address) (*Tag, error) {
var t *Tag
var lastTime time.Time
ts.tags.Range(func(key interface{}, value interface{}) bool {
rcvdTag := value.(*Tag)
if rcvdTag.Address.Equal(address) && rcvdTag.StartedAt.After(lastTime) {
t = rcvdTag
lastTime = rcvdTag.StartedAt
}
return true
})
if t == nil {
return nil, ErrNotFound
}
return t, nil
}
// GetFromContext gets a tag from the tag uid stored in the context
func (ts *Tags) GetFromContext(ctx context.Context) (*Tag, error) {
uid := sctx.GetTag(ctx)
t, ok := ts.tags.Load(uid)
if !ok {
return nil, ErrNotFound
}
return t.(*Tag), nil
}
// Range exposes sync.Map's iterator
func (ts *Tags) Range(fn func(k, v interface{}) bool) {
ts.tags.Range(fn)
}
func (ts *Tags) Delete(k interface{}) {
ts.tags.Delete(k)
}
func (ts *Tags) MarshalJSON() (out []byte, err error) {
m := make(map[string]*Tag)
ts.Range(func(k, v interface{}) bool {
key := fmt.Sprintf("%d", k)
val := v.(*Tag)
// don't persist tags which were already done
if !val.Done(StateSynced) {
m[key] = val
}
return true
})
return json.Marshal(m)
}
func (ts *Tags) UnmarshalJSON(value []byte) error {
m := make(map[string]*Tag)
err := json.Unmarshal(value, &m)
if err != nil {
return err
}
for k, v := range m {
key, err := strconv.ParseUint(k, 10, 32)
if err != nil {
return err
}
// prevent a condition where a chunk was sent before shutdown
// and the node was turned off before the receipt was received
v.Sent = v.Synced
ts.tags.Store(key, v)
}
return err
}
| 1 | 11,820 | keep as `ErrNotFound`, it is the same convention as in other packages | ethersphere-bee | go |
@@ -10,13 +10,15 @@ var ip = {},
extIP = require('external-ip'),
plugins = require('../../../plugins/pluginManager.js');
+
/**
* Function to get the hostname/ip address/url to access dashboard
* @param {function} callback - callback function that returns the hostname
*/
ip.getHost = function(callback) {
// If host is set in config.js use that, otherwise get the external IP from ifconfig.me
- var domain = plugins.getConfig("api").domain;
+ var domain = plugins.getConfig("api").domain,
+ offlineMode = plugins.getConfig("api").offline_mode;
if (typeof domain !== "undefined" && domain !== "") {
if (domain.indexOf("://") === -1) {
domain = "http://" + domain; | 1 | /**
* Module returning hostname value
* @module api/parts/mgmt/ip
*/
/** @lends module:api/parts/mgmt/ip */
var ip = {},
net = require('net'),
extIP = require('external-ip'),
plugins = require('../../../plugins/pluginManager.js');
/**
* Function to get the hostname/ip address/url to access dashboard
* @param {function} callback - callback function that returns the hostname
*/
ip.getHost = function(callback) {
// If host is set in config.js use that, otherwise get the external IP from ifconfig.me
var domain = plugins.getConfig("api").domain;
if (typeof domain !== "undefined" && domain !== "") {
if (domain.indexOf("://") === -1) {
domain = "http://" + domain;
}
callback(false, stripTrailingSlash(domain));
}
else {
getIP(function(err, ipres) {
if (err) {
console.log(err);
getNetworkIP(function(err2, ipaddress) {
callback(err2, "http://" + ipaddress);
});
}
else {
callback(err, "http://" + ipres);
}
});
}
};
/**
* Strip trailing slash
* @param {string} str - string from which to remove trailing slash
* @returns {string} modified string
*/
function stripTrailingSlash(str) {
if (str.substr(str.length - 1) === '/') {
return str.substr(0, str.length - 1);
}
return str;
}
var getIP = extIP({
timeout: 600,
getIP: 'parallel'
});
/**
* Try to get ip address through network, by connecting to external resource
* @param {function} callback - callback function that returns the ip address
*/
function getNetworkIP(callback) {
var socket = net.createConnection(80, 'www.google.com');
socket.setTimeout(1000);
socket.on('connect', function() {
callback(undefined, socket.address().address);
socket.end();
});
socket.on('error', function(e) {
callback(e, 'localhost');
});
}
module.exports = ip; | 1 | 13,376 | Same here, we need to call `callback` in else branch | Countly-countly-server | js |
@@ -160,7 +160,7 @@ public class CertFailedRefreshNotificationTask implements NotificationTask {
}
String expiryTime = getTimestampAsString(certRecord.getExpiryTime());
- String hostName = (certRecord.getHostName() != null) ? certRecord.getHostName() : "";
+ String hostName = certRecord.getHostName();
certDetails.append(AthenzUtils.extractPrincipalServiceName(certRecord.getService())).append(';')
.append(certRecord.getProvider()).append(';')
.append(certRecord.getInstanceId()).append(';') | 1 | /*
* Copyright 2020 Verizon Media
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zts.notification;
import com.yahoo.athenz.auth.util.AthenzUtils;
import com.yahoo.athenz.auth.util.GlobStringsMatcher;
import com.yahoo.athenz.common.server.cert.X509CertRecord;
import com.yahoo.athenz.common.server.dns.HostnameResolver;
import com.yahoo.athenz.common.server.notification.*;
import com.yahoo.athenz.common.server.util.ResourceUtils;
import com.yahoo.athenz.zts.ZTSConsts;
import com.yahoo.athenz.zts.cert.InstanceCertManager;
import com.yahoo.athenz.zts.store.DataStore;
import com.yahoo.rdl.Timestamp;
import org.eclipse.jetty.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.MessageFormat;
import java.util.*;
import java.util.stream.Collectors;
import static com.yahoo.athenz.common.ServerCommonConsts.ADMIN_ROLE_NAME;
import static com.yahoo.athenz.common.ServerCommonConsts.USER_DOMAIN_PREFIX;
import static com.yahoo.athenz.common.server.notification.NotificationServiceConstants.*;
import static com.yahoo.athenz.common.server.notification.impl.MetricNotificationService.*;
public class CertFailedRefreshNotificationTask implements NotificationTask {
private final String serverName;
private final List<String> providers;
private final InstanceCertManager instanceCertManager;
private final NotificationCommon notificationCommon;
private static final Logger LOGGER = LoggerFactory.getLogger(CertFailedRefreshNotificationTask.class);
private final static String DESCRIPTION = "certificate failed refresh notification";
private final HostnameResolver hostnameResolver;
private final CertFailedRefreshNotificationToEmailConverter certFailedRefreshNotificationToEmailConverter;
private final CertFailedRefreshNotificationToMetricConverter certFailedRefreshNotificationToMetricConverter;
private final GlobStringsMatcher globStringsMatcher;
public CertFailedRefreshNotificationTask(InstanceCertManager instanceCertManager,
DataStore dataStore,
HostnameResolver hostnameResolver,
String userDomainPrefix,
String serverName,
int httpsPort) {
this.serverName = serverName;
this.providers = getProvidersList();
this.instanceCertManager = instanceCertManager;
DomainRoleMembersFetcher domainRoleMembersFetcher = new DomainRoleMembersFetcher(dataStore, USER_DOMAIN_PREFIX);
this.notificationCommon = new NotificationCommon(domainRoleMembersFetcher, userDomainPrefix);
this.hostnameResolver = hostnameResolver;
final String apiHostName = System.getProperty(ZTSConsts.ZTS_PROP_NOTIFICATION_API_HOSTNAME, serverName);
this.certFailedRefreshNotificationToEmailConverter = new CertFailedRefreshNotificationToEmailConverter(apiHostName, httpsPort);
this.certFailedRefreshNotificationToMetricConverter = new CertFailedRefreshNotificationToMetricConverter();
globStringsMatcher = new GlobStringsMatcher(ZTSConsts.ZTS_PROP_NOTIFICATION_CERT_FAIL_IGNORED_SERVICES_LIST);
}
private List<String> getProvidersList() {
return AthenzUtils.splitCommaSeparatedSystemProperty(ZTSConsts.ZTS_PROP_NOTIFICATION_CERT_FAIL_PROVIDER_LIST);
}
@Override
public List<Notification> getNotifications() {
if (providers == null || providers.isEmpty()) {
LOGGER.warn("No configured providers. Notifications will not be sent.");
return new ArrayList<>();
}
List<X509CertRecord> unrefreshedCerts = new ArrayList<>();
for (String provider : providers) {
unrefreshedCerts.addAll(instanceCertManager.getUnrefreshedCertsNotifications(serverName, provider));
}
if (unrefreshedCerts.isEmpty()) {
LOGGER.info("No unrefreshed certificates available to send notifications");
return new ArrayList<>();
}
List<X509CertRecord> unrefreshedCertsValidServices = getRecordsWithValidServices(unrefreshedCerts);
if (unrefreshedCertsValidServices.isEmpty()) {
LOGGER.info("No unrefreshed certificates with configured services available to send notifications");
return new ArrayList<>();
}
List<X509CertRecord> unrefreshedCertsValidHosts = getRecordsWithValidHosts(unrefreshedCertsValidServices);
if (unrefreshedCertsValidHosts.isEmpty()) {
LOGGER.info("No unrefreshed certificates with valid hosts available to send notifications");
return new ArrayList<>();
} else {
LOGGER.info("Number of valid certificate records that will receive notifications: " + unrefreshedCertsValidHosts.size());
}
Map<String, List<X509CertRecord>> domainToCertRecordsMap = getDomainToCertRecordsMap(unrefreshedCertsValidHosts);
return generateNotificationsForAdmins(domainToCertRecordsMap);
}
private List<X509CertRecord> getRecordsWithValidServices(List<X509CertRecord> unrefreshedCerts) {
return unrefreshedCerts.stream()
.filter(record -> !globStringsMatcher.isMatch(record.getService()))
.collect(Collectors.toList());
}
private List<Notification> generateNotificationsForAdmins(Map<String, List<X509CertRecord>> domainToCertRecordsMap) {
List<Notification> notificationList = new ArrayList<>();
domainToCertRecordsMap.forEach((domain, records) -> {
Map<String, String> details = getNotificationDetails(domain, records);
Notification notification = notificationCommon.createNotification(
ResourceUtils.roleResourceName(domain, ADMIN_ROLE_NAME),
details,
certFailedRefreshNotificationToEmailConverter,
certFailedRefreshNotificationToMetricConverter);
if (notification != null) {
notificationList.add(notification);
}
});
return notificationList;
}
private List<X509CertRecord> getRecordsWithValidHosts(List<X509CertRecord> unrefreshedCerts) {
unrefreshedCerts.stream()
.filter(record -> StringUtil.isEmpty(record.getHostName()))
.peek(record -> LOGGER.warn("Record with empty hostName: " + record.toString()))
.collect(Collectors.toList());
// Filter all records with non existing hosts or hosts not recognized by DNS
return unrefreshedCerts.stream()
.filter(record -> !StringUtil.isEmpty(record.getHostName()) && (hostnameResolver == null || hostnameResolver.isValidHostname(record.getHostName())))
.collect(Collectors.toList());
}
private Map<String, String> getNotificationDetails(String domainName, List<X509CertRecord> certRecords) {
Map<String, String> details = new HashMap<>();
// each domain can have multiple certificates that failed to refresh.
// we're going to collect them into one
// string and separate with | between those. The format will be:
// certificateRecords := <certificate-entry>[|<certificate-entry]*
// certificate-entry := <Service Name>;<Provider>;<InstanceID>;<Last refresh time>;<Expiration time>;<Hostname>;
StringBuilder certDetails = new StringBuilder(256);
for (X509CertRecord certRecord : certRecords) {
if (certDetails.length() != 0) {
certDetails.append('|');
}
String expiryTime = getTimestampAsString(certRecord.getExpiryTime());
String hostName = (certRecord.getHostName() != null) ? certRecord.getHostName() : "";
certDetails.append(AthenzUtils.extractPrincipalServiceName(certRecord.getService())).append(';')
.append(certRecord.getProvider()).append(';')
.append(certRecord.getInstanceId()).append(';')
.append(getTimestampAsString(certRecord.getCurrentTime())).append(';')
.append(expiryTime).append(';')
.append(hostName);
}
details.put(NOTIFICATION_DETAILS_UNREFRESHED_CERTS, certDetails.toString());
details.put(NOTIFICATION_DETAILS_DOMAIN, domainName);
return details;
}
private Map<String, List<X509CertRecord>> getDomainToCertRecordsMap(List<X509CertRecord> unrefreshedRecords) {
Map<String, List<X509CertRecord>> domainToCertRecords = new HashMap<>();
for (X509CertRecord x509CertRecord: unrefreshedRecords) {
String domainName = AthenzUtils.extractPrincipalDomainName(x509CertRecord.getService());
LOGGER.info("processing domain={}, hostName={}", domainName, x509CertRecord.getHostName());
domainToCertRecords.putIfAbsent(domainName, new ArrayList<>());
domainToCertRecords.get(domainName).add(x509CertRecord);
}
return domainToCertRecords;
}
private String getTimestampAsString(Date date) {
return (date != null) ? Timestamp.fromMillis(date.getTime()).toString() : "";
}
@Override
public String getDescription() {
return DESCRIPTION;
}
public static class CertFailedRefreshNotificationToEmailConverter implements NotificationToEmailConverter {
private static final String EMAIL_TEMPLATE_UNREFRESHED_CERTS = "messages/unrefreshed-certs.html";
private static final String UNREFRESHED_CERTS_SUBJECT = "athenz.notification.email.unrefreshed.certs.subject";
private final NotificationToEmailConverterCommon notificationToEmailConverterCommon;
private String emailUnrefreshedCertsBody;
private final String serverName;
private final int httpsPort;
public CertFailedRefreshNotificationToEmailConverter(final String serverName, int httpsPort) {
notificationToEmailConverterCommon = new NotificationToEmailConverterCommon();
emailUnrefreshedCertsBody = notificationToEmailConverterCommon.readContentFromFile(getClass().getClassLoader(), EMAIL_TEMPLATE_UNREFRESHED_CERTS);
this.serverName = serverName;
this.httpsPort = httpsPort;
}
private String getUnrefreshedCertsBody(Map<String, String> metaDetails) {
if (metaDetails == null) {
return null;
}
String bodyWithDeleteEndpoint = addInstanceDeleteEndpointDetails(metaDetails, emailUnrefreshedCertsBody);
return notificationToEmailConverterCommon.generateBodyFromTemplate(
metaDetails,
bodyWithDeleteEndpoint,
NOTIFICATION_DETAILS_DOMAIN,
NOTIFICATION_DETAILS_UNREFRESHED_CERTS,
6);
}
private String addInstanceDeleteEndpointDetails(Map<String, String> metaDetails, String messageWithoutZtsDeleteEndpoint) {
String ztsApiAddress = serverName + ":" + httpsPort;
String domainPlaceHolder = metaDetails.get(NOTIFICATION_DETAILS_DOMAIN);
String providerPlaceHolder = "<PROVIDER>";
String servicePlaceHolder = "<SERVICE>";
String instanceIdHolder = "<INSTANCE-ID>";
long numberOfRecords = metaDetails.get(NOTIFICATION_DETAILS_UNREFRESHED_CERTS)
.chars()
.filter(ch -> ch == '|')
.count() + 1;
// If there is only one record, fill the real values to make it easier for him
if (numberOfRecords == 1) {
String[] recordDetails = metaDetails.get(NOTIFICATION_DETAILS_UNREFRESHED_CERTS).split(";");
servicePlaceHolder = recordDetails[0];
providerPlaceHolder = recordDetails[1];
instanceIdHolder = recordDetails[2];
}
return MessageFormat.format(messageWithoutZtsDeleteEndpoint,
"{0}", "{1}", "{2}", "{3}", // Skip template arguments that will be filled later
ztsApiAddress,
providerPlaceHolder,
domainPlaceHolder,
servicePlaceHolder,
instanceIdHolder);
}
@Override
public NotificationEmail getNotificationAsEmail(Notification notification) {
String subject = notificationToEmailConverterCommon.getSubject(UNREFRESHED_CERTS_SUBJECT);
String body = getUnrefreshedCertsBody(notification.getDetails());
Set<String> fullyQualifiedEmailAddresses = notificationToEmailConverterCommon.getFullyQualifiedEmailAddresses(notification.getRecipients());
return new NotificationEmail(subject, body, fullyQualifiedEmailAddresses);
}
}
public static class CertFailedRefreshNotificationToMetricConverter implements NotificationToMetricConverter {
private final static String NOTIFICATION_TYPE = "cert_fail_refresh";
private final NotificationToMetricConverterCommon notificationToMetricConverterCommon = new NotificationToMetricConverterCommon();
@Override
public NotificationMetric getNotificationAsMetrics(Notification notification, Timestamp currentTime) {
Map<String, String> details = notification.getDetails();
String domain = details.get(NOTIFICATION_DETAILS_DOMAIN);
List<String[]> attributes = new ArrayList<>();
String[] records = details.get(NOTIFICATION_DETAILS_UNREFRESHED_CERTS).split("\\|");
String currentTimeStr = currentTime.toString();
for (String record: records) {
String[] recordAttributes = record.split(";");
String[] metricRecord = new String[]{
METRIC_NOTIFICATION_TYPE_KEY, NOTIFICATION_TYPE,
METRIC_NOTIFICATION_DOMAIN_KEY, domain,
METRIC_NOTIFICATION_SERVICE_KEY, recordAttributes[0],
METRIC_NOTIFICATION_PROVIDER_KEY, recordAttributes[1],
METRIC_NOTIFICATION_INSTANCE_ID_KEY, recordAttributes[2],
METRIC_NOTIFICATION_UPDATE_DAYS_KEY, notificationToMetricConverterCommon.getNumberOfDaysBetweenTimestamps(currentTimeStr, recordAttributes[3]),
METRIC_NOTIFICATION_EXPIRY_DAYS_KEY, notificationToMetricConverterCommon.getNumberOfDaysBetweenTimestamps(currentTimeStr, recordAttributes[4])
};
attributes.add(metricRecord);
}
return new NotificationMetric(attributes);
}
}
}
| 1 | 5,473 | At this point we already check that the record is valid and so it could never be nulll | AthenZ-athenz | java |
@@ -126,6 +126,7 @@ class UserController < ApplicationController
(params[:user][:auth_provider] == current_user.auth_provider &&
params[:user][:auth_uid] == current_user.auth_uid)
update_user(current_user, params)
+ @title = t "user.account.title"
else
session[:new_user_settings] = params
redirect_to auth_url(params[:user][:auth_provider], params[:user][:auth_uid]) | 1 | class UserController < ApplicationController
layout "site", :except => [:api_details]
skip_before_action :verify_authenticity_token, :only => [:api_read, :api_details, :api_gpx_files, :auth_success]
before_action :disable_terms_redirect, :only => [:terms, :save, :logout, :api_details]
before_action :authorize, :only => [:api_details, :api_gpx_files]
before_action :authorize_web, :except => [:api_read, :api_details, :api_gpx_files]
before_action :set_locale, :except => [:api_read, :api_details, :api_gpx_files]
before_action :require_user, :only => [:account, :go_public, :make_friend, :remove_friend]
before_action :require_self, :only => [:account]
before_action :check_database_readable, :except => [:login, :api_read, :api_details, :api_gpx_files]
before_action :check_database_writable, :only => [:new, :account, :confirm, :confirm_email, :lost_password, :reset_password, :go_public, :make_friend, :remove_friend]
before_action :check_api_readable, :only => [:api_read, :api_details, :api_gpx_files]
before_action :require_allow_read_prefs, :only => [:api_details]
before_action :require_allow_read_gpx, :only => [:api_gpx_files]
before_action :require_cookies, :only => [:new, :login, :confirm]
before_action :require_administrator, :only => [:set_status, :delete, :list]
around_action :api_call_handle_error, :only => [:api_read, :api_details, :api_gpx_files]
before_action :lookup_user_by_id, :only => [:api_read]
before_action :lookup_user_by_name, :only => [:set_status, :delete]
before_action :allow_thirdparty_images, :only => [:view, :account]
def terms
@legale = params[:legale] || OSM.ip_to_country(request.remote_ip) || DEFAULT_LEGALE
@text = OSM.legal_text_for_country(@legale)
if request.xhr?
render :partial => "terms"
else
@title = t "user.terms.title"
if current_user && current_user.terms_agreed?
# Already agreed to terms, so just show settings
redirect_to :action => :account, :display_name => current_user.display_name
elsif current_user.nil? && session[:new_user].nil?
redirect_to :action => :login, :referer => request.fullpath
end
end
end
def save
@title = t "user.new.title"
if params[:decline]
if current_user
current_user.terms_seen = true
flash[:notice] = t("user.new.terms declined", :url => t("user.new.terms declined url")).html_safe if current_user.save
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => :account, :display_name => current_user.display_name
end
else
redirect_to t("user.terms.declined")
end
elsif current_user
unless current_user.terms_agreed?
current_user.consider_pd = params[:user][:consider_pd]
current_user.terms_agreed = Time.now.getutc
current_user.terms_seen = true
flash[:notice] = t "user.new.terms accepted" if current_user.save
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => :account, :display_name => current_user.display_name
end
else
self.current_user = session.delete(:new_user)
if check_signup_allowed(current_user.email)
current_user.data_public = true
current_user.description = "" if current_user.description.nil?
current_user.creation_ip = request.remote_ip
current_user.languages = http_accept_language.user_preferred_languages
current_user.terms_agreed = Time.now.getutc
current_user.terms_seen = true
if current_user.auth_uid.blank?
current_user.auth_provider = nil
current_user.auth_uid = nil
end
if current_user.save
flash[:piwik_goal] = PIWIK["goals"]["signup"] if defined?(PIWIK)
referer = welcome_path
begin
uri = URI(session[:referer])
%r{map=(.*)/(.*)/(.*)}.match(uri.fragment) do |m|
editor = Rack::Utils.parse_query(uri.query).slice("editor")
referer = welcome_path({ "zoom" => m[1],
"lat" => m[2],
"lon" => m[3] }.merge(editor))
end
rescue StandardError
# Use default
end
if current_user.status == "active"
session[:referer] = referer
successful_login(current_user)
else
session[:token] = current_user.tokens.create.token
Notifier.signup_confirm(current_user, current_user.tokens.create(:referer => referer)).deliver_now
redirect_to :action => "confirm", :display_name => current_user.display_name
end
else
render :action => "new", :referer => params[:referer]
end
end
end
end
def account
@title = t "user.account.title"
@tokens = current_user.oauth_tokens.authorized
if params[:user] && params[:user][:display_name] && params[:user][:description]
if params[:user][:auth_provider].blank? ||
(params[:user][:auth_provider] == current_user.auth_provider &&
params[:user][:auth_uid] == current_user.auth_uid)
update_user(current_user, params)
else
session[:new_user_settings] = params
redirect_to auth_url(params[:user][:auth_provider], params[:user][:auth_uid])
end
elsif errors = session.delete(:user_errors)
errors.each do |attribute, error|
current_user.errors.add(attribute, error)
end
end
end
def go_public
current_user.data_public = true
current_user.save
flash[:notice] = t "user.go_public.flash success"
redirect_to :action => "account", :display_name => current_user.display_name
end
def lost_password
@title = t "user.lost_password.title"
if params[:user] && params[:user][:email]
user = User.visible.find_by(:email => params[:user][:email])
if user.nil?
users = User.visible.where("LOWER(email) = LOWER(?)", params[:user][:email])
user = users.first if users.count == 1
end
if user
token = user.tokens.create
Notifier.lost_password(user, token).deliver_now
flash[:notice] = t "user.lost_password.notice email on way"
redirect_to :action => "login"
else
flash.now[:error] = t "user.lost_password.notice email cannot find"
end
end
end
def reset_password
@title = t "user.reset_password.title"
if params[:token]
token = UserToken.find_by(:token => params[:token])
if token
self.current_user = token.user
if params[:user]
current_user.pass_crypt = params[:user][:pass_crypt]
current_user.pass_crypt_confirmation = params[:user][:pass_crypt_confirmation]
current_user.status = "active" if current_user.status == "pending"
current_user.email_valid = true
if current_user.save
token.destroy
flash[:notice] = t "user.reset_password.flash changed"
successful_login(current_user)
end
end
else
flash[:error] = t "user.reset_password.flash token bad"
redirect_to :action => "lost_password"
end
else
head :bad_request
end
end
def new
@title = t "user.new.title"
@referer = params[:referer] || session[:referer]
append_content_security_policy_directives(
:form_action => %w[accounts.google.com *.facebook.com login.live.com github.com meta.wikimedia.org]
)
if current_user
# The user is logged in already, so don't show them the signup
# page, instead send them to the home page
if @referer
redirect_to @referer
else
redirect_to :controller => "site", :action => "index"
end
elsif params.key?(:auth_provider) && params.key?(:auth_uid)
self.current_user = User.new(:email => params[:email],
:email_confirmation => params[:email],
:display_name => params[:nickname],
:auth_provider => params[:auth_provider],
:auth_uid => params[:auth_uid])
flash.now[:notice] = render_to_string :partial => "auth_association"
else
check_signup_allowed
self.current_user = User.new
end
end
def create
self.current_user = User.new(user_params)
if check_signup_allowed(current_user.email)
session[:referer] = params[:referer]
current_user.status = "pending"
if current_user.auth_provider.present? && current_user.pass_crypt.empty?
# We are creating an account with external authentication and
# no password was specified so create a random one
current_user.pass_crypt = SecureRandom.base64(16)
current_user.pass_crypt_confirmation = current_user.pass_crypt
end
if current_user.invalid?
# Something is wrong with a new user, so rerender the form
render :action => "new"
elsif current_user.auth_provider.present?
# Verify external authenticator before moving on
session[:new_user] = current_user
redirect_to auth_url(current_user.auth_provider, current_user.auth_uid)
else
# Save the user record
session[:new_user] = current_user
redirect_to :action => :terms
end
end
end
def login
session[:referer] = params[:referer] if params[:referer]
if params[:username].present? && params[:password].present?
session[:remember_me] ||= params[:remember_me]
password_authentication(params[:username], params[:password])
elsif params[:openid_url].present?
session[:remember_me] ||= params[:remember_me_openid]
redirect_to auth_url("openid", params[:openid_url], params[:referer])
end
end
def logout
@title = t "user.logout.title"
if params[:session] == session.id
if session[:token]
token = UserToken.find_by(:token => session[:token])
token.destroy if token
session.delete(:token)
end
session.delete(:user)
session_expires_automatically
if params[:referer]
redirect_to params[:referer]
else
redirect_to :controller => "site", :action => "index"
end
end
end
def confirm
if request.post?
token = UserToken.find_by(:token => params[:confirm_string])
if token && token.user.active?
flash[:error] = t("user.confirm.already active")
redirect_to :action => "login"
elsif !token || token.expired?
flash[:error] = t("user.confirm.unknown token")
redirect_to :action => "confirm"
else
user = token.user
user.status = "active"
user.email_valid = true
flash[:notice] = gravatar_status_message(user) if gravatar_enable(user)
user.save!
referer = token.referer
token.destroy
if session[:token]
token = UserToken.find_by(:token => session[:token])
session.delete(:token)
else
token = nil
end
if token.nil? || token.user != user
flash[:notice] = t("user.confirm.success")
redirect_to :action => :login, :referer => referer
else
token.destroy
session[:user] = user.id
redirect_to referer || welcome_path
end
end
else
user = User.find_by(:display_name => params[:display_name])
redirect_to root_path if user.nil? || user.active?
end
end
def confirm_resend
user = User.find_by(:display_name => params[:display_name])
token = UserToken.find_by(:token => session[:token])
if user.nil? || token.nil? || token.user != user
flash[:error] = t "user.confirm_resend.failure", :name => params[:display_name]
else
Notifier.signup_confirm(user, user.tokens.create).deliver_now
flash[:notice] = t("user.confirm_resend.success", :email => user.email, :sender => SUPPORT_EMAIL).html_safe
end
redirect_to :action => "login"
end
def confirm_email
if request.post?
token = UserToken.find_by(:token => params[:confirm_string])
if token && token.user.new_email?
self.current_user = token.user
current_user.email = current_user.new_email
current_user.new_email = nil
current_user.email_valid = true
gravatar_enabled = gravatar_enable(current_user)
if current_user.save
flash[:notice] = if gravatar_enabled
t("user.confirm_email.success") + " " + gravatar_status_message(current_user)
else
t("user.confirm_email.success")
end
else
flash[:errors] = current_user.errors
end
token.destroy
session[:user] = current_user.id
redirect_to :action => "account", :display_name => current_user.display_name
elsif token
flash[:error] = t "user.confirm_email.failure"
redirect_to :action => "account", :display_name => token.user.display_name
else
flash[:error] = t "user.confirm_email.unknown_token"
end
end
end
def api_read
if @user.visible?
render :action => :api_read, :content_type => "text/xml"
else
head :gone
end
end
def api_details
@user = current_user
render :action => :api_read, :content_type => "text/xml"
end
def api_gpx_files
doc = OSM::API.new.get_xml_doc
current_user.traces.reload.each do |trace|
doc.root << trace.to_xml_node
end
render :xml => doc.to_s
end
def view
@user = User.find_by(:display_name => params[:display_name])
if @user &&
(@user.visible? || (current_user && current_user.administrator?))
@title = @user.display_name
else
render_unknown_user params[:display_name]
end
end
def make_friend
@new_friend = User.find_by(:display_name => params[:display_name])
if @new_friend
if request.post?
friend = Friend.new
friend.befriender = current_user
friend.befriendee = @new_friend
if current_user.is_friends_with?(@new_friend)
flash[:warning] = t "user.make_friend.already_a_friend", :name => @new_friend.display_name
elsif friend.save
flash[:notice] = t "user.make_friend.success", :name => @new_friend.display_name
Notifier.friend_notification(friend).deliver_now
else
friend.add_error(t("user.make_friend.failed", :name => @new_friend.display_name))
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => "view"
end
end
else
render_unknown_user params[:display_name]
end
end
def remove_friend
@friend = User.find_by(:display_name => params[:display_name])
if @friend
if request.post?
if current_user.is_friends_with?(@friend)
Friend.where(:user_id => current_user.id, :friend_user_id => @friend.id).delete_all
flash[:notice] = t "user.remove_friend.success", :name => @friend.display_name
else
flash[:error] = t "user.remove_friend.not_a_friend", :name => @friend.display_name
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => "view"
end
end
else
render_unknown_user params[:display_name]
end
end
##
# sets a user's status
def set_status
@user.status = params[:status]
@user.save
redirect_to :action => "view", :display_name => params[:display_name]
end
##
# delete a user, marking them as deleted and removing personal data
def delete
@user.delete
redirect_to :action => "view", :display_name => params[:display_name]
end
##
# display a list of users matching specified criteria
def list
if request.post?
ids = params[:user].keys.collect(&:to_i)
User.where(:id => ids).update_all(:status => "confirmed") if params[:confirm]
User.where(:id => ids).update_all(:status => "deleted") if params[:hide]
redirect_to url_for(:status => params[:status], :ip => params[:ip], :page => params[:page])
else
@params = params.permit(:status, :ip)
conditions = {}
conditions[:status] = @params[:status] if @params[:status]
conditions[:creation_ip] = @params[:ip] if @params[:ip]
@user_pages, @users = paginate(:users,
:conditions => conditions,
:order => :id,
:per_page => 50)
end
end
##
# omniauth success callback
def auth_success
auth_info = request.env["omniauth.auth"]
provider = auth_info[:provider]
uid = auth_info[:uid]
name = auth_info[:info][:name]
email = auth_info[:info][:email]
case provider
when "openid"
email_verified = uid.match(%r{https://www.google.com/accounts/o8/id?(.*)}) ||
uid.match(%r{https://me.yahoo.com/(.*)})
when "google", "facebook"
email_verified = true
else
email_verified = false
end
if settings = session.delete(:new_user_settings)
current_user.auth_provider = provider
current_user.auth_uid = uid
update_user(current_user, settings)
session[:user_errors] = current_user.errors.as_json
redirect_to :action => "account", :display_name => current_user.display_name
elsif session[:new_user]
session[:new_user].auth_provider = provider
session[:new_user].auth_uid = uid
session[:new_user].status = "active" if email_verified && email == session[:new_user].email
redirect_to :action => "terms"
else
user = User.find_by(:auth_provider => provider, :auth_uid => uid)
if user.nil? && provider == "google"
openid_url = auth_info[:extra][:id_info]["openid_id"]
user = User.find_by(:auth_provider => "openid", :auth_uid => openid_url) if openid_url
user.update(:auth_provider => provider, :auth_uid => uid) if user
end
if user
case user.status
when "pending" then
unconfirmed_login(user)
when "active", "confirmed" then
successful_login(user, request.env["omniauth.params"]["referer"])
when "suspended" then
failed_login t("user.login.account is suspended", :webmaster => "mailto:#{SUPPORT_EMAIL}").html_safe
else
failed_login t("user.login.auth failure")
end
else
redirect_to :action => "new", :nickname => name, :email => email,
:auth_provider => provider, :auth_uid => uid
end
end
end
##
# omniauth failure callback
def auth_failure
flash[:error] = t("user.auth_failure." + params[:message])
redirect_to params[:origin] || login_url
end
private
##
# handle password authentication
def password_authentication(username, password)
if user = User.authenticate(:username => username, :password => password)
successful_login(user)
elsif user = User.authenticate(:username => username, :password => password, :pending => true)
unconfirmed_login(user)
elsif User.authenticate(:username => username, :password => password, :suspended => true)
failed_login t("user.login.account is suspended", :webmaster => "mailto:#{SUPPORT_EMAIL}").html_safe, username
else
failed_login t("user.login.auth failure"), username
end
end
##
# return the URL to use for authentication
def auth_url(provider, uid, referer = nil)
params = { :provider => provider }
params[:openid_url] = openid_expand_url(uid) if provider == "openid"
if referer.nil?
params[:origin] = request.path
else
params[:origin] = request.path + "?referer=" + CGI.escape(referer)
params[:referer] = referer
end
auth_path(params)
end
##
# special case some common OpenID providers by applying heuristics to
# try and come up with the correct URL based on what the user entered
def openid_expand_url(openid_url)
if openid_url.nil?
nil
elsif openid_url.match(%r{(.*)gmail.com(/?)$}) || openid_url.match(%r{(.*)googlemail.com(/?)$})
# Special case gmail.com as it is potentially a popular OpenID
# provider and, unlike yahoo.com, where it works automatically, Google
# have hidden their OpenID endpoint somewhere obscure this making it
# somewhat less user friendly.
"https://www.google.com/accounts/o8/id"
else
openid_url
end
end
##
# process a successful login
def successful_login(user, referer = nil)
session[:user] = user.id
session_expires_after 28.days if session[:remember_me]
target = referer || session[:referer] || url_for(:controller => :site, :action => :index)
# The user is logged in, so decide where to send them:
#
# - If they haven't seen the contributor terms, send them there.
# - If they have a block on them, show them that.
# - If they were referred to the login, send them back there.
# - Otherwise, send them to the home page.
if REQUIRE_TERMS_SEEN && !user.terms_seen
redirect_to :action => :terms, :referer => target
elsif user.blocked_on_view
redirect_to user.blocked_on_view, :referer => target
else
redirect_to target
end
session.delete(:remember_me)
session.delete(:referer)
end
##
# process a failed login
def failed_login(message, username = nil)
flash[:error] = message
redirect_to :action => "login", :referer => session[:referer],
:username => username, :remember_me => session[:remember_me]
session.delete(:remember_me)
session.delete(:referer)
end
##
#
def unconfirmed_login(user)
session[:token] = user.tokens.create.token
redirect_to :action => "confirm", :display_name => user.display_name
session.delete(:remember_me)
session.delete(:referer)
end
##
# update a user's details
def update_user(user, params)
user.display_name = params[:user][:display_name]
user.new_email = params[:user][:new_email]
unless params[:user][:pass_crypt].empty? && params[:user][:pass_crypt_confirmation].empty?
user.pass_crypt = params[:user][:pass_crypt]
user.pass_crypt_confirmation = params[:user][:pass_crypt_confirmation]
end
if params[:user][:description] != user.description
user.description = params[:user][:description]
user.description_format = "markdown"
end
user.languages = params[:user][:languages].split(",")
case params[:image_action]
when "new" then
user.image = params[:user][:image]
user.image_use_gravatar = false
when "delete" then
user.image = nil
user.image_use_gravatar = false
when "gravatar" then
user.image = nil
user.image_use_gravatar = true
end
user.home_lat = params[:user][:home_lat]
user.home_lon = params[:user][:home_lon]
user.preferred_editor = if params[:user][:preferred_editor] == "default"
nil
else
params[:user][:preferred_editor]
end
if params[:user][:auth_provider].nil? || params[:user][:auth_provider].blank?
user.auth_provider = nil
user.auth_uid = nil
end
if user.save
set_locale
if user.new_email.blank? || user.new_email == user.email
flash.now[:notice] = t "user.account.flash update success"
else
user.email = user.new_email
if user.valid?
flash.now[:notice] = t "user.account.flash update success confirm needed"
begin
Notifier.email_confirm(user, user.tokens.create).deliver_now
rescue StandardError
# Ignore errors sending email
end
else
current_user.errors.add(:new_email, current_user.errors[:email])
current_user.errors.add(:email, [])
end
user.restore_email!
end
end
end
##
# require that the user is a administrator, or fill out a helpful error message
# and return them to the user page.
def require_administrator
if current_user && !current_user.administrator?
flash[:error] = t("user.filter.not_an_administrator")
if params[:display_name]
redirect_to :action => "view", :display_name => params[:display_name]
else
redirect_to :action => "login", :referer => request.fullpath
end
elsif !current_user
redirect_to :action => "login", :referer => request.fullpath
end
end
##
# require that the user in the URL is the logged in user
def require_self
head :forbidden if params[:display_name] != current_user.display_name
end
##
# ensure that there is a "user" instance variable
def lookup_user_by_id
@user = User.find(params[:id])
end
##
# ensure that there is a "user" instance variable
def lookup_user_by_name
@user = User.find_by(:display_name => params[:display_name])
rescue ActiveRecord::RecordNotFound
redirect_to :action => "view", :display_name => params[:display_name] unless @user
end
##
#
def disable_terms_redirect
# this is necessary otherwise going to the user terms page, when
# having not agreed already would cause an infinite redirect loop.
# it's .now so that this doesn't propagate to other pages.
flash.now[:skip_terms] = true
end
##
# return permitted user parameters
def user_params
params.require(:user).permit(:email, :email_confirmation, :display_name,
:auth_provider, :auth_uid,
:pass_crypt, :pass_crypt_confirmation)
end
##
# check signup acls
def check_signup_allowed(email = nil)
domain = if email.nil?
nil
else
email.split("@").last
end
if blocked = Acl.no_account_creation(request.remote_ip, domain)
logger.info "Blocked signup from #{request.remote_ip} for #{email}"
render :action => "blocked"
end
!blocked
end
##
# check if this user has a gravatar and set the user pref is true
def gravatar_enable(user)
# code from example https://en.gravatar.com/site/implement/images/ruby/
return false if user.image.present?
hash = Digest::MD5.hexdigest(user.email.downcase)
url = "https://www.gravatar.com/avatar/#{hash}?d=404" # without d=404 we will always get an image back
response = OSM.http_client.get(URI.parse(url))
oldsetting = user.image_use_gravatar
user.image_use_gravatar = response.success?
oldsetting != user.image_use_gravatar
end
##
# display a message about th current status of the gravatar setting
def gravatar_status_message(user)
if user.image_use_gravatar
t "user.account.gravatar.enabled"
else
t "user.account.gravatar.disabled"
end
end
end
| 1 | 11,301 | Would it not be more sensible just to move setting of the title to later in the method rather than duplicating it here? | openstreetmap-openstreetmap-website | rb |
@@ -130,6 +130,7 @@ void MetaClient::heartBeatThreadFunc() {
localLastUpdateTime_ = metadLastUpdateTime_;
}
}
+ uploadSession();
}
bool MetaClient::loadUsersAndRoles() { | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "time/Duration.h"
#include "meta/common/MetaCommon.h"
#include "meta/client/MetaClient.h"
#include "network/NetworkUtils.h"
#include "meta/NebulaSchemaProvider.h"
#include "meta/ClusterIdMan.h"
#include "meta/GflagsManager.h"
#include "base/Configuration.h"
#include "stats/StatsManager.h"
#include <folly/ScopeGuard.h>
DEFINE_int32(heartbeat_interval_secs, 3, "Heartbeat interval");
DEFINE_int32(meta_client_retry_times, 3, "meta client retry times, 0 means no retry");
DEFINE_int32(meta_client_retry_interval_secs, 1, "meta client sleep interval between retry");
DEFINE_int32(meta_client_timeout_ms, 60 * 1000, "meta client timeout");
DEFINE_string(cluster_id_path, "cluster.id", "file path saved clusterId");
DECLARE_string(gflags_mode_json);
namespace nebula {
namespace meta {
MetaClient::MetaClient(std::shared_ptr<folly::IOThreadPoolExecutor> ioThreadPool,
std::vector<HostAddr> addrs,
const MetaClientOptions& options)
: ioThreadPool_(ioThreadPool)
, addrs_(std::move(addrs))
, options_(options) {
CHECK(ioThreadPool_ != nullptr) << "IOThreadPool is required";
CHECK(!addrs_.empty())
<< "No meta server address is specified. Meta server is required";
clientsMan_ = std::make_shared<
thrift::ThriftClientManager<meta::cpp2::MetaServiceAsyncClient>
>();
updateActive();
updateLeader();
bgThread_ = std::make_unique<thread::GenericWorker>();
LOG(INFO) << "Create meta client to " << active_;
stats_ = std::make_unique<stats::Stats>(options_.serviceName_, "metaClient");
}
MetaClient::~MetaClient() {
stop();
VLOG(3) << "~MetaClient";
}
bool MetaClient::isMetadReady() {
auto ret = heartbeat().get();
if (!ret.ok() && ret.status() != Status::LeaderChanged()) {
LOG(ERROR) << "Heartbeat failed, status:" << ret.status();
ready_ = false;
return ready_;
}
bool ldRet = loadData();
bool lcRet = true;
if (!options_.skipConfig_) {
lcRet = loadCfg();
}
if (ldRet && lcRet) {
localLastUpdateTime_ = metadLastUpdateTime_;
}
return ready_;
}
bool MetaClient::waitForMetadReady(int count, int retryIntervalSecs) {
if (!options_.skipConfig_) {
std::string gflagsJsonPath;
GflagsManager::getGflagsModule(gflagsModule_);
gflagsDeclared_ = GflagsManager::declareGflags(gflagsModule_);
}
isRunning_ = true;
int tryCount = count;
while (!isMetadReady() && ((count == -1) || (tryCount > 0)) && isRunning_) {
LOG(INFO) << "Waiting for the metad to be ready!";
--tryCount;
::sleep(retryIntervalSecs);
} // end while
if (!isRunning_) {
LOG(ERROR) << "Connect to the MetaServer Failed";
return false;
}
CHECK(bgThread_->start());
LOG(INFO) << "Register time task for heartbeat!";
size_t delayMS = FLAGS_heartbeat_interval_secs * 1000 + folly::Random::rand32(900);
bgThread_->addDelayTask(delayMS, &MetaClient::heartBeatThreadFunc, this);
return ready_;
}
void MetaClient::stop() {
if (bgThread_ != nullptr) {
bgThread_->stop();
bgThread_->wait();
bgThread_.reset();
}
isRunning_ = false;
}
void MetaClient::heartBeatThreadFunc() {
SCOPE_EXIT {
bgThread_->addDelayTask(FLAGS_heartbeat_interval_secs * 1000,
&MetaClient::heartBeatThreadFunc,
this);
};
auto ret = heartbeat().get();
if (!ret.ok()) {
LOG(ERROR) << "Heartbeat failed, status:" << ret.status();
return;
}
// if MetaServer has some changes, refesh the localCache_
if (localLastUpdateTime_ < metadLastUpdateTime_) {
bool ldRet = loadData();
bool lcRet = true;
if (!options_.skipConfig_) {
lcRet = loadCfg();
}
if (ldRet && lcRet) {
localLastUpdateTime_ = metadLastUpdateTime_;
}
}
}
bool MetaClient::loadUsersAndRoles() {
auto userRoleRet = listUsers().get();
if (!userRoleRet.ok()) {
LOG(ERROR) << "List users failed, status:" << userRoleRet.status();
return false;
}
decltype(userRolesMap_) userRolesMap;
decltype(userPasswordMap_) userPasswordMap;
for (auto& user : userRoleRet.value()) {
auto rolesRet = getUserRoles(user.first).get();
if (!rolesRet.ok()) {
LOG(ERROR) << "List role by user failed, user : " << user.first;
return false;
}
userRolesMap[user.first] = rolesRet.value();
userPasswordMap[user.first] = user.second;
}
{
folly::RWSpinLock::WriteHolder holder(localCacheLock_);
userRolesMap_ = std::move(userRolesMap);
userPasswordMap_ = std::move(userPasswordMap);
}
return true;
}
bool MetaClient::loadData() {
if (ioThreadPool_->numThreads() <= 0) {
LOG(ERROR) << "The threads number in ioThreadPool should be greater than 0";
return false;
}
if (!loadUsersAndRoles()) {
LOG(ERROR) << "Load roles Failed";
return false;
}
auto ret = listSpaces().get();
if (!ret.ok()) {
LOG(ERROR) << "List space failed, status:" << ret.status();
return false;
}
decltype(localCache_) cache;
decltype(spaceIndexByName_) spaceIndexByName;
decltype(spaceTagIndexByName_) spaceTagIndexByName;
decltype(spaceEdgeIndexByName_) spaceEdgeIndexByName;
decltype(spaceNewestTagVerMap_) spaceNewestTagVerMap;
decltype(spaceNewestEdgeVerMap_) spaceNewestEdgeVerMap;
decltype(spaceEdgeIndexByType_) spaceEdgeIndexByType;
decltype(spaceTagIndexById_) spaceTagIndexById;
decltype(spaceAllEdgeMap_) spaceAllEdgeMap;
for (auto space : ret.value()) {
auto spaceId = space.first;
auto r = getPartsAlloc(spaceId).get();
if (!r.ok()) {
LOG(ERROR) << "Get parts allocation failed for spaceId " << spaceId
<< ", status " << r.status();
return false;
}
auto spaceCache = std::make_shared<SpaceInfoCache>();
auto partsAlloc = r.value();
spaceCache->spaceName = space.second;
spaceCache->partsOnHost_ = reverse(partsAlloc);
spaceCache->partsAlloc_ = std::move(partsAlloc);
VLOG(2) << "Load space " << spaceId
<< ", parts num:" << spaceCache->partsAlloc_.size();
if (!loadSchemas(spaceId,
spaceCache,
spaceTagIndexByName,
spaceTagIndexById,
spaceEdgeIndexByName,
spaceEdgeIndexByType,
spaceNewestTagVerMap,
spaceNewestEdgeVerMap,
spaceAllEdgeMap)) {
LOG(ERROR) << "Load Schemas Failed";
return false;
}
if (!loadIndexes(spaceId,
spaceCache)) {
LOG(ERROR) << "Load Indexes Failed";
return false;
}
cache.emplace(spaceId, spaceCache);
spaceIndexByName.emplace(space.second, spaceId);
}
decltype(localCache_) oldCache;
{
folly::RWSpinLock::WriteHolder holder(localCacheLock_);
oldCache = std::move(localCache_);
localCache_ = std::move(cache);
spaceIndexByName_ = std::move(spaceIndexByName);
spaceTagIndexByName_ = std::move(spaceTagIndexByName);
spaceEdgeIndexByName_ = std::move(spaceEdgeIndexByName);
spaceNewestTagVerMap_ = std::move(spaceNewestTagVerMap);
spaceNewestEdgeVerMap_ = std::move(spaceNewestEdgeVerMap);
spaceEdgeIndexByType_ = std::move(spaceEdgeIndexByType);
spaceTagIndexById_ = std::move(spaceTagIndexById);
spaceAllEdgeMap_ = std::move(spaceAllEdgeMap);
}
diff(oldCache, localCache_);
ready_ = true;
return true;
}
bool MetaClient::loadSchemas(GraphSpaceID spaceId,
std::shared_ptr<SpaceInfoCache> spaceInfoCache,
SpaceTagNameIdMap &tagNameIdMap,
SpaceTagIdNameMap &tagIdNameMap,
SpaceEdgeNameTypeMap &edgeNameTypeMap,
SpaceEdgeTypeNameMap &edgeTypeNameMap,
SpaceNewestTagVerMap &newestTagVerMap,
SpaceNewestEdgeVerMap &newestEdgeVerMap,
SpaceAllEdgeMap &allEdgeMap) {
auto tagRet = listTagSchemas(spaceId).get();
if (!tagRet.ok()) {
LOG(ERROR) << "Get tag schemas failed for spaceId " << spaceId << ", " << tagRet.status();
return false;
}
auto edgeRet = listEdgeSchemas(spaceId).get();
if (!edgeRet.ok()) {
LOG(ERROR) << "Get edge schemas failed for spaceId " << spaceId << ", " << edgeRet.status();
return false;
}
allEdgeMap[spaceId] = {};
auto tagItemVec = tagRet.value();
auto edgeItemVec = edgeRet.value();
TagSchemas tagSchemas;
EdgeSchemas edgeSchemas;
for (auto& tagIt : tagItemVec) {
std::shared_ptr<NebulaSchemaProvider> schema(new NebulaSchemaProvider(tagIt.version));
for (auto colIt : tagIt.schema.get_columns()) {
schema->addField(colIt.name, std::move(colIt.type));
}
// handle schema property
schema->setProp(tagIt.schema.get_schema_prop());
tagSchemas.emplace(std::make_pair(tagIt.tag_id, tagIt.version), schema);
tagNameIdMap.emplace(std::make_pair(spaceId, tagIt.tag_name), tagIt.tag_id);
tagIdNameMap.emplace(std::make_pair(spaceId, tagIt.tag_id), tagIt.tag_name);
// get the latest tag version
auto it = newestTagVerMap.find(std::make_pair(spaceId, tagIt.tag_id));
if (it != newestTagVerMap.end()) {
if (it->second < tagIt.version) {
it->second = tagIt.version;
}
} else {
newestTagVerMap.emplace(std::make_pair(spaceId, tagIt.tag_id), tagIt.version);
}
VLOG(3) << "Load Tag Schema Space " << spaceId << ", ID " << tagIt.tag_id
<< ", Name " << tagIt.tag_name << ", Version " << tagIt.version << " Successfully!";
}
std::unordered_set<std::pair<GraphSpaceID, EdgeType>> edges;
for (auto& edgeIt : edgeItemVec) {
std::shared_ptr<NebulaSchemaProvider> schema(new NebulaSchemaProvider(edgeIt.version));
for (auto colIt : edgeIt.schema.get_columns()) {
schema->addField(colIt.name, std::move(colIt.type));
}
// handle shcem property
schema->setProp(edgeIt.schema.get_schema_prop());
edgeSchemas.emplace(std::make_pair(edgeIt.edge_type, edgeIt.version), schema);
edgeNameTypeMap.emplace(std::make_pair(spaceId, edgeIt.edge_name), edgeIt.edge_type);
edgeTypeNameMap.emplace(std::make_pair(spaceId, edgeIt.edge_type), edgeIt.edge_name);
if (edges.find({spaceId, edgeIt.edge_type}) != edges.cend()) {
continue;
}
edges.emplace(spaceId, edgeIt.edge_type);
allEdgeMap[spaceId].emplace_back(edgeIt.edge_name);
// get the latest edge version
auto it2 = newestEdgeVerMap.find(std::make_pair(spaceId, edgeIt.edge_type));
if (it2 != newestEdgeVerMap.end()) {
if (it2->second < edgeIt.version) {
it2->second = edgeIt.version;
}
} else {
newestEdgeVerMap.emplace(std::make_pair(spaceId, edgeIt.edge_type), edgeIt.version);
}
VLOG(3) << "Load Edge Schema Space " << spaceId << ", Type " << edgeIt.edge_type
<< ", Name " << edgeIt.edge_name << ", Version " << edgeIt.version
<< " Successfully!";
}
spaceInfoCache->tagSchemas_ = std::move(tagSchemas);
spaceInfoCache->edgeSchemas_ = std::move(edgeSchemas);
return true;
}
bool MetaClient::loadIndexes(GraphSpaceID spaceId,
std::shared_ptr<SpaceInfoCache> cache) {
auto tagIndexesRet = listTagIndexes(spaceId).get();
if (!tagIndexesRet.ok()) {
LOG(ERROR) << "Get tag indexes failed for spaceId " << spaceId
<< ", " << tagIndexesRet.status();
return false;
}
auto edgeIndexesRet = listEdgeIndexes(spaceId).get();
if (!edgeIndexesRet.ok()) {
LOG(ERROR) << "Get edge indexes failed for spaceId " << spaceId
<< ", " << edgeIndexesRet.status();
return false;
}
Indexes tagIndexes;
for (auto tagIndex : tagIndexesRet.value()) {
auto indexName = tagIndex.get_index_name();
auto indexID = tagIndex.get_index_id();
std::pair<GraphSpaceID, std::string> pair(spaceId, indexName);
tagNameIndexMap_.emplace(std::move(pair), indexID);
auto tagIndexPtr = std::make_shared<nebula::cpp2::IndexItem>(tagIndex);
tagIndexes.emplace(indexID, tagIndexPtr);
}
cache->tagIndexes_ = std::move(tagIndexes);
Indexes edgeIndexes;
for (auto& edgeIndex : edgeIndexesRet.value()) {
auto indexName = edgeIndex.get_index_name();
auto indexID = edgeIndex.get_index_id();
std::pair<GraphSpaceID, std::string> pair(spaceId, indexName);
edgeNameIndexMap_.emplace(std::move(pair), indexID);
auto edgeIndexPtr = std::make_shared<nebula::cpp2::IndexItem>(edgeIndex);
edgeIndexes.emplace(indexID, edgeIndexPtr);
}
cache->edgeIndexes_ = std::move(edgeIndexes);
return true;
}
Status MetaClient::checkTagIndexed(GraphSpaceID space, TagID tagID) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(space);
if (it != localCache_.end()) {
auto tagIt = it->second->tagIndexes_.find(tagID);
if (tagIt != it->second->tagIndexes_.end()) {
return Status::OK();
} else {
return Status::IndexNotFound();
}
}
return Status::SpaceNotFound();
}
Status MetaClient::checkEdgeIndexed(GraphSpaceID space, EdgeType edgeType) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(space);
if (it != localCache_.end()) {
auto edgeIt = it->second->edgeIndexes_.find(edgeType);
if (edgeIt != it->second->edgeIndexes_.end()) {
return Status::OK();
} else {
return Status::IndexNotFound();
}
}
return Status::SpaceNotFound();
}
std::unordered_map<HostAddr, std::vector<PartitionID>>
MetaClient::reverse(const PartsAlloc& parts) {
std::unordered_map<HostAddr, std::vector<PartitionID>> hosts;
for (auto& partHost : parts) {
for (auto& h : partHost.second) {
hosts[h].emplace_back(partHost.first);
}
}
return hosts;
}
template<typename Request,
typename RemoteFunc,
typename RespGenerator,
typename RpcResponse,
typename Response>
void MetaClient::getResponse(Request req,
RemoteFunc remoteFunc,
RespGenerator respGen,
folly::Promise<StatusOr<Response>> pro,
bool toLeader,
int32_t retry,
int32_t retryLimit) {
time::Duration duration;
auto* evb = ioThreadPool_->getEventBase();
HostAddr host;
{
folly::RWSpinLock::ReadHolder holder(&hostLock_);
host = toLeader ? leader_ : active_;
}
folly::via(evb, [host, evb, req = std::move(req), remoteFunc = std::move(remoteFunc),
respGen = std::move(respGen), pro = std::move(pro),
toLeader, retry, retryLimit, duration, this] () mutable {
auto client = clientsMan_->client(host, evb, false, FLAGS_meta_client_timeout_ms);
VLOG(1) << "Send request to meta " << host;
remoteFunc(client, req).via(evb)
.then([host, req = std::move(req), remoteFunc = std::move(remoteFunc),
respGen = std::move(respGen), pro = std::move(pro), toLeader, retry,
retryLimit, evb, duration, this] (folly::Try<RpcResponse>&& t) mutable {
// exception occurred during RPC
if (t.hasException()) {
if (toLeader) {
updateLeader();
} else {
updateActive();
}
if (retry < retryLimit) {
evb->runAfterDelay([req = std::move(req), remoteFunc = std::move(remoteFunc),
respGen = std::move(respGen), pro = std::move(pro),
toLeader, retry, retryLimit, this] () mutable {
getResponse(std::move(req),
std::move(remoteFunc),
std::move(respGen),
std::move(pro),
toLeader,
retry + 1,
retryLimit);
}, FLAGS_meta_client_retry_interval_secs * 1000);
return;
} else {
LOG(ERROR) << "Send request to " << host << ", exceed retry limit";
stats::Stats::addStatsValue(stats_.get(), false, duration.elapsedInUSec());
pro.setValue(Status::Error(folly::stringPrintf("RPC failure in MetaClient: %s",
t.exception().what().c_str())));
}
return;
}
auto&& resp = t.value();
if (resp.code == cpp2::ErrorCode::SUCCEEDED) {
// succeeded
stats::Stats::addStatsValue(stats_.get(), true, duration.elapsedInUSec());
pro.setValue(respGen(std::move(resp)));
return;
} else if (resp.code == cpp2::ErrorCode::E_LEADER_CHANGED) {
HostAddr leader(resp.get_leader().get_ip(), resp.get_leader().get_port());
updateLeader(leader);
if (retry < retryLimit) {
evb->runAfterDelay([req = std::move(req), remoteFunc = std::move(remoteFunc),
respGen = std::move(respGen), pro = std::move(pro),
toLeader, retry, retryLimit, this] () mutable {
getResponse(std::move(req),
std::move(remoteFunc),
std::move(respGen),
std::move(pro),
toLeader,
retry + 1,
retryLimit);
}, FLAGS_meta_client_retry_interval_secs * 1000);
return;
}
}
stats::Stats::addStatsValue(stats_.get(),
resp.code == cpp2::ErrorCode::SUCCEEDED,
duration.elapsedInUSec());
pro.setValue(this->handleResponse(resp));
}); // then
}); // via
}
std::vector<HostAddr> MetaClient::to(const std::vector<nebula::cpp2::HostAddr>& tHosts) {
std::vector<HostAddr> hosts;
hosts.resize(tHosts.size());
std::transform(tHosts.begin(), tHosts.end(), hosts.begin(), [](const auto& h) {
return HostAddr(h.get_ip(), h.get_port());
});
return hosts;
}
std::vector<SpaceIdName> MetaClient::toSpaceIdName(const std::vector<cpp2::IdName>& tIdNames) {
std::vector<SpaceIdName> idNames;
idNames.resize(tIdNames.size());
std::transform(tIdNames.begin(), tIdNames.end(), idNames.begin(), [](const auto& tin) {
return SpaceIdName(tin.id.get_space_id(), tin.name);
});
return idNames;
}
template<typename RESP>
Status MetaClient::handleResponse(const RESP& resp) {
switch (resp.get_code()) {
case cpp2::ErrorCode::SUCCEEDED:
return Status::OK();
case cpp2::ErrorCode::E_EXISTED:
return Status::Error("existed!");
case cpp2::ErrorCode::E_NOT_FOUND:
return Status::Error("not existed!");
case cpp2::ErrorCode::E_NO_HOSTS:
return Status::Error("no hosts!");
case cpp2::ErrorCode::E_CONFIG_IMMUTABLE:
return Status::Error("Config immutable");
case cpp2::ErrorCode::E_CONFLICT:
return Status::Error("conflict!");
case cpp2::ErrorCode::E_WRONGCLUSTER:
return Status::Error("wrong cluster!");
case cpp2::ErrorCode::E_LEADER_CHANGED:
return Status::LeaderChanged("Leader changed!");
case cpp2::ErrorCode::E_BALANCED:
return Status::Error("The cluster is balanced!");
case cpp2::ErrorCode::E_BALANCER_RUNNING:
return Status::Error("The balancer is running!");
case cpp2::ErrorCode::E_BAD_BALANCE_PLAN:
return Status::Error("Bad balance plan!");
case cpp2::ErrorCode::E_NO_RUNNING_BALANCE_PLAN:
return Status::Error("No running balance plan!");
case cpp2::ErrorCode::E_NO_VALID_HOST:
return Status::Error("No valid host hold the partition");
case cpp2::ErrorCode::E_CORRUPTTED_BALANCE_PLAN:
return Status::Error("No corrupted blance plan");
case cpp2::ErrorCode::E_INVALID_PARTITION_NUM:
return Status::Error("No valid partition_num");
case cpp2::ErrorCode::E_INVALID_REPLICA_FACTOR:
return Status::Error("No valid replica_factor");
case cpp2::ErrorCode::E_INVALID_CHARSET:
return Status::Error("No valid charset");
case cpp2::ErrorCode::E_INVALID_COLLATE:
return Status::Error("No valid collate");
case cpp2::ErrorCode::E_CHARSET_COLLATE_NOT_MATCH:
return Status::Error("Charset and collate not match");
case cpp2::ErrorCode::E_INVALID_PASSWORD:
return Status::Error("Invalid password");
case cpp2::ErrorCode::E_IMPROPER_ROLE:
return Status::Error("Improper role");
default:
return Status::Error("Unknown code %d", static_cast<int32_t>(resp.get_code()));
}
}
PartsMap MetaClient::doGetPartsMap(const HostAddr& host,
const LocalCache& localCache) {
PartsMap partMap;
for (auto it = localCache.begin(); it != localCache.end(); it++) {
auto spaceId = it->first;
auto& cache = it->second;
auto partsIt = cache->partsOnHost_.find(host);
if (partsIt != cache->partsOnHost_.end()) {
for (auto& partId : partsIt->second) {
auto partAllocIter = cache->partsAlloc_.find(partId);
CHECK(partAllocIter != cache->partsAlloc_.end());
auto& partM = partMap[spaceId][partId];
partM.spaceId_ = spaceId;
partM.partId_ = partId;
partM.peers_ = partAllocIter->second;
}
}
}
return partMap;
}
void MetaClient::diff(const LocalCache& oldCache, const LocalCache& newCache) {
folly::RWSpinLock::WriteHolder holder(listenerLock_);
if (listener_ == nullptr) {
VLOG(3) << "Listener is null!";
return;
}
auto newPartsMap = doGetPartsMap(options_.localHost_, newCache);
auto oldPartsMap = doGetPartsMap(options_.localHost_, oldCache);
VLOG(1) << "Let's check if any new parts added/updated for " << options_.localHost_;
for (auto it = newPartsMap.begin(); it != newPartsMap.end(); it++) {
auto spaceId = it->first;
const auto& newParts = it->second;
auto oldIt = oldPartsMap.find(spaceId);
if (oldIt == oldPartsMap.end()) {
LOG(INFO) << "SpaceId " << spaceId << " was added!";
listener_->onSpaceAdded(spaceId);
for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) {
listener_->onPartAdded(partIt->second);
}
} else {
const auto& oldParts = oldIt->second;
for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) {
auto oldPartIt = oldParts.find(partIt->first);
if (oldPartIt == oldParts.end()) {
VLOG(1) << "SpaceId " << spaceId << ", partId "
<< partIt->first << " was added!";
listener_->onPartAdded(partIt->second);
} else {
const auto& oldPartMeta = oldPartIt->second;
const auto& newPartMeta = partIt->second;
if (oldPartMeta != newPartMeta) {
VLOG(1) << "SpaceId " << spaceId
<< ", partId " << partIt->first << " was updated!";
listener_->onPartUpdated(newPartMeta);
}
}
}
}
}
VLOG(1) << "Let's check if any old parts removed....";
for (auto it = oldPartsMap.begin(); it != oldPartsMap.end(); it++) {
auto spaceId = it->first;
const auto& oldParts = it->second;
auto newIt = newPartsMap.find(spaceId);
if (newIt == newPartsMap.end()) {
LOG(INFO) << "SpaceId " << spaceId << " was removed!";
for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) {
listener_->onPartRemoved(spaceId, partIt->first);
}
listener_->onSpaceRemoved(spaceId);
} else {
const auto& newParts = newIt->second;
for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) {
auto newPartIt = newParts.find(partIt->first);
if (newPartIt == newParts.end()) {
VLOG(1) << "SpaceId " << spaceId
<< ", partId " << partIt->first << " was removed!";
listener_->onPartRemoved(spaceId, partIt->first);
}
}
}
}
}
/// ================================== public methods =================================
folly::Future<StatusOr<GraphSpaceID>> MetaClient::createSpace(SpaceDesc spaceDesc,
bool ifNotExists) {
cpp2::SpaceProperties properties;
properties.set_space_name(std::move(spaceDesc.spaceName_));
properties.set_partition_num(spaceDesc.partNum_);
properties.set_replica_factor(spaceDesc.replicaFactor_);
properties.set_charset_name(std::move(spaceDesc.charsetName_));
properties.set_collate_name(std::move(spaceDesc.collationName_));
cpp2::CreateSpaceReq req;
req.set_properties(std::move(properties));
req.set_if_not_exists(ifNotExists);
folly::Promise<StatusOr<GraphSpaceID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createSpace(request);
}, [] (cpp2::ExecResp&& resp) -> GraphSpaceID {
return resp.get_id().get_space_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<SpaceIdName>>> MetaClient::listSpaces() {
cpp2::ListSpacesReq req;
folly::Promise<StatusOr<std::vector<SpaceIdName>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listSpaces(request);
}, [this] (cpp2::ListSpacesResp&& resp) -> decltype(auto) {
return this->toSpaceIdName(resp.get_spaces());
}, std::move(promise));
return future;
}
folly::Future<StatusOr<cpp2::AdminJobResult>>
MetaClient::submitJob(cpp2::AdminJobOp op, std::vector<std::string> paras) {
cpp2::AdminJobReq req;
req.set_op(op);
req.set_paras(std::move(paras));
folly::Promise<StatusOr<cpp2::AdminJobResult>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_runAdminJob(request);
}, [] (cpp2::AdminJobResp&& resp) -> decltype(auto) {
return resp.get_result();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<cpp2::SpaceItem>>
MetaClient::getSpace(std::string name) {
cpp2::GetSpaceReq req;
req.set_space_name(std::move(name));
folly::Promise<StatusOr<cpp2::SpaceItem>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getSpace(request);
}, [] (cpp2::GetSpaceResp&& resp) -> decltype(auto) {
return std::move(resp).get_item();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>> MetaClient::dropSpace(std::string name, const bool ifExists) {
cpp2::DropSpaceReq req;
req.set_space_name(std::move(name));
req.set_if_exists(ifExists);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropSpace(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::HostItem>>> MetaClient::listHosts() {
cpp2::ListHostsReq req;
folly::Promise<StatusOr<std::vector<cpp2::HostItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listHosts(request);
}, [] (cpp2::ListHostsResp&& resp) -> decltype(auto) {
return resp.hosts;
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<cpp2::PartItem>>>
MetaClient::listParts(GraphSpaceID spaceId, std::vector<PartitionID> partIds) {
cpp2::ListPartsReq req;
req.set_space_id(spaceId);
req.set_part_ids(std::move(partIds));
folly::Promise<StatusOr<std::vector<cpp2::PartItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listParts(request);
}, [] (cpp2::ListPartsResp&& resp) -> decltype(auto) {
return resp.parts;
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::unordered_map<PartitionID, std::vector<HostAddr>>>>
MetaClient::getPartsAlloc(GraphSpaceID spaceId) {
cpp2::GetPartsAllocReq req;
req.set_space_id(spaceId);
folly::Promise<StatusOr<std::unordered_map<PartitionID, std::vector<HostAddr>>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getPartsAlloc(request);
}, [this] (cpp2::GetPartsAllocResp&& resp) -> decltype(auto) {
std::unordered_map<PartitionID, std::vector<HostAddr>> parts;
for (auto it = resp.parts.begin(); it != resp.parts.end(); it++) {
parts.emplace(it->first, to(it->second));
}
return parts;
}, std::move(promise));
return future;
}
StatusOr<GraphSpaceID>
MetaClient::getSpaceIdByNameFromCache(const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceIndexByName_.find(name);
if (it != spaceIndexByName_.end()) {
return it->second;
}
return Status::SpaceNotFound();
}
StatusOr<TagID> MetaClient::getTagIDByNameFromCache(const GraphSpaceID& space,
const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceTagIndexByName_.find(std::make_pair(space, name));
if (it == spaceTagIndexByName_.end()) {
std::string error = folly::stringPrintf("TagName `%s' is nonexistent", name.c_str());
return Status::Error(std::move(error));
}
return it->second;
}
StatusOr<std::string> MetaClient::getTagNameByIdFromCache(const GraphSpaceID& space,
const TagID& tagId) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceTagIndexById_.find(std::make_pair(space, tagId));
if (it == spaceTagIndexById_.end()) {
std::string error = folly::stringPrintf("TagID `%d' is nonexistent", tagId);
return Status::Error(std::move(error));
}
return it->second;
}
StatusOr<EdgeType> MetaClient::getEdgeTypeByNameFromCache(const GraphSpaceID& space,
const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceEdgeIndexByName_.find(std::make_pair(space, name));
if (it == spaceEdgeIndexByName_.end()) {
std::string error = folly::stringPrintf("EdgeName `%s' is nonexistent", name.c_str());
return Status::Error(std::move(error));
}
return it->second;
}
StatusOr<std::string> MetaClient::getEdgeNameByTypeFromCache(const GraphSpaceID& space,
const EdgeType edgeType) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceEdgeIndexByType_.find(std::make_pair(space, edgeType));
if (it == spaceEdgeIndexByType_.end()) {
std::string error = folly::stringPrintf("EdgeType `%d' is nonexistent", edgeType);
return Status::Error(std::move(error));
}
return it->second;
}
StatusOr<std::vector<std::string>> MetaClient::getAllEdgeFromCache(const GraphSpaceID& space) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceAllEdgeMap_.find(space);
if (it == spaceAllEdgeMap_.end()) {
std::string error = folly::stringPrintf("SpaceId `%d' is nonexistent", space);
return Status::Error(std::move(error));
}
return it->second;
}
folly::Future<StatusOr<bool>>
MetaClient::multiPut(std::string segment,
std::vector<std::pair<std::string, std::string>> pairs) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| pairs.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::MultiPutReq req;
std::vector<nebula::cpp2::Pair> data;
for (auto& element : pairs) {
nebula::cpp2::Pair pair;
pair.set_key(std::move(element.first));
pair.set_value(std::move(element.second));
data.emplace_back(std::move(pair));
}
req.set_segment(std::move(segment));
req.set_pairs(std::move(data));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_multiPut(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::string>>
MetaClient::get(std::string segment, std::string key) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| key.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::GetReq req;
req.set_segment(std::move(segment));
req.set_key(std::move(key));
folly::Promise<StatusOr<std::string>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_get(request);
}, [] (cpp2::GetResp&& resp) -> std::string {
return resp.get_value();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<std::string>>>
MetaClient::multiGet(std::string segment, std::vector<std::string> keys) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| keys.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::MultiGetReq req;
req.set_segment(std::move(segment));
req.set_keys(std::move(keys));
folly::Promise<StatusOr<std::vector<std::string>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_multiGet(request);
}, [] (cpp2::MultiGetResp&& resp) -> std::vector<std::string> {
return resp.get_values();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<std::string>>>
MetaClient::scan(std::string segment, std::string start, std::string end) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| start.empty() || end.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::ScanReq req;
req.set_segment(std::move(segment));
req.set_start(std::move(start));
req.set_end(std::move(end));
folly::Promise<StatusOr<std::vector<std::string>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_scan(request);
}, [] (cpp2::ScanResp&& resp) -> std::vector<std::string> {
return resp.get_values();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::remove(std::string segment, std::string key) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| key.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::RemoveReq req;
req.set_segment(std::move(segment));
req.set_key(std::move(key));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_remove(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::removeRange(std::string segment, std::string start, std::string end) {
if (!nebula::meta::MetaCommon::checkSegment(segment)
|| start.empty() || end.empty()) {
return Status::Error("arguments invalid!");
}
cpp2::RemoveRangeReq req;
req.set_segment(std::move(segment));
req.set_start(std::move(start));
req.set_end(std::move(end));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_removeRange(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
PartsMap MetaClient::getPartsMapFromCache(const HostAddr& host) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
return doGetPartsMap(host, localCache_);
}
StatusOr<PartMeta> MetaClient::getPartMetaFromCache(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it == localCache_.end()) {
return Status::Error("Space not found, spaceid: %d", spaceId);
}
auto& cache = it->second;
auto partAllocIter = cache->partsAlloc_.find(partId);
if (partAllocIter == cache->partsAlloc_.end()) {
return Status::Error("Part not found in cache, spaceid: %d, partid: %d", spaceId, partId);
}
PartMeta pm;
pm.spaceId_ = spaceId;
pm.partId_ = partId;
pm.peers_ = partAllocIter->second;
return pm;
}
Status MetaClient::checkPartExistInCache(const HostAddr& host,
GraphSpaceID spaceId,
PartitionID partId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it != localCache_.end()) {
auto partsIt = it->second->partsOnHost_.find(host);
if (partsIt != it->second->partsOnHost_.end()) {
for (auto& pId : partsIt->second) {
if (pId == partId) {
return Status::OK();
}
}
} else {
return Status::PartNotFound();
}
}
return Status::SpaceNotFound();
}
Status MetaClient::checkSpaceExistInCache(const HostAddr& host,
GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it != localCache_.end()) {
auto partsIt = it->second->partsOnHost_.find(host);
if (partsIt != it->second->partsOnHost_.end() && !partsIt->second.empty()) {
return Status::OK();
} else {
return Status::PartNotFound();
}
}
return Status::SpaceNotFound();
}
StatusOr<int32_t> MetaClient::partsNum(GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it == localCache_.end()) {
return Status::Error("Space not found, spaceid: %d", spaceId);
}
return it->second->partsAlloc_.size();
}
folly::Future<StatusOr<TagID>> MetaClient::createTagSchema(GraphSpaceID spaceId,
std::string name,
nebula::cpp2::Schema schema,
bool ifNotExists) {
cpp2::CreateTagReq req;
req.set_space_id(spaceId);
req.set_tag_name(std::move(name));
req.set_schema(std::move(schema));
req.set_if_not_exists(ifNotExists);
folly::Promise<StatusOr<TagID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createTag(request);
}, [] (cpp2::ExecResp&& resp) -> TagID {
return resp.get_id().get_tag_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<TagID>>
MetaClient::alterTagSchema(GraphSpaceID spaceId,
std::string name,
std::vector<cpp2::AlterSchemaItem> items,
nebula::cpp2::SchemaProp schemaProp) {
cpp2::AlterTagReq req;
req.set_space_id(spaceId);
req.set_tag_name(std::move(name));
req.set_tag_items(std::move(items));
req.set_schema_prop(std::move(schemaProp));
folly::Promise<StatusOr<TagID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_alterTag(request);
}, [] (cpp2::ExecResp&& resp) -> TagID {
return resp.get_id().get_tag_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::TagItem>>>
MetaClient::listTagSchemas(GraphSpaceID spaceId) {
cpp2::ListTagsReq req;
req.set_space_id(spaceId);
folly::Promise<StatusOr<std::vector<cpp2::TagItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listTags(request);
}, [] (cpp2::ListTagsResp&& resp) -> decltype(auto){
return std::move(resp).get_tags();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::dropTagSchema(int32_t spaceId, std::string tagName, const bool ifExists) {
cpp2::DropTagReq req;
req.set_space_id(spaceId);
req.set_tag_name(std::move(tagName));
req.set_if_exists(ifExists);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropTag(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<nebula::cpp2::Schema>>
MetaClient::getTagSchema(int32_t spaceId, std::string name, int64_t version) {
cpp2::GetTagReq req;
req.set_space_id(spaceId);
req.set_tag_name(std::move(name));
req.set_version(version);
folly::Promise<StatusOr<nebula::cpp2::Schema>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getTag(request);
}, [] (cpp2::GetTagResp&& resp) -> nebula::cpp2::Schema {
return std::move(resp).get_schema();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<EdgeType>> MetaClient::createEdgeSchema(GraphSpaceID spaceId,
std::string name,
nebula::cpp2::Schema schema,
bool ifNotExists) {
cpp2::CreateEdgeReq req;
req.set_space_id(spaceId);
req.set_edge_name(std::move(name));
req.set_schema(schema);
req.set_if_not_exists(ifNotExists);
folly::Promise<StatusOr<EdgeType>> promise;
auto future = promise.getFuture();
getResponse(
std::move(req),
[](auto client, auto request) { return client->future_createEdge(request); },
[](cpp2::ExecResp&& resp) -> EdgeType { return resp.get_id().get_edge_type(); },
std::move(promise),
true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::alterEdgeSchema(GraphSpaceID spaceId,
std::string name,
std::vector<cpp2::AlterSchemaItem> items,
nebula::cpp2::SchemaProp schemaProp) {
cpp2::AlterEdgeReq req;
req.set_space_id(spaceId);
req.set_edge_name(std::move(name));
req.set_edge_items(std::move(items));
req.set_schema_prop(std::move(schemaProp));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_alterEdge(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::EdgeItem>>>
MetaClient::listEdgeSchemas(GraphSpaceID spaceId) {
cpp2::ListEdgesReq req;
req.set_space_id(spaceId);
folly::Promise<StatusOr<std::vector<cpp2::EdgeItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listEdges(request);
}, [] (cpp2::ListEdgesResp&& resp) -> decltype(auto) {
return std::move(resp).get_edges();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<nebula::cpp2::Schema>>
MetaClient::getEdgeSchema(GraphSpaceID spaceId, std::string name, SchemaVer version) {
cpp2::GetEdgeReq req;
req.set_space_id(spaceId);
req.set_edge_name(std::move(name));
req.set_version(version);
folly::Promise<StatusOr<nebula::cpp2::Schema>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getEdge(request);
}, [] (cpp2::GetEdgeResp&& resp) -> nebula::cpp2::Schema {
return std::move(resp).get_schema();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::dropEdgeSchema(GraphSpaceID spaceId, std::string name, const bool ifExists) {
cpp2::DropEdgeReq req;
req.set_space_id(spaceId);
req.set_edge_name(std::move(name));
req.set_if_exists(ifExists);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropEdge(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<IndexID>>
MetaClient::createTagIndex(GraphSpaceID spaceID,
std::string indexName,
std::string tagName,
std::vector<std::string> fields,
bool ifNotExists) {
cpp2::CreateTagIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(indexName));
req.set_tag_name(std::move(tagName));
req.set_fields(std::move(fields));
req.set_if_not_exists(ifNotExists);
folly::Promise<StatusOr<IndexID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createTagIndex(request);
}, [] (cpp2::ExecResp&& resp) -> IndexID {
return resp.get_id().get_index_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::dropTagIndex(GraphSpaceID spaceID,
std::string name,
bool ifExists) {
cpp2::DropTagIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(name));
req.set_if_exists(ifExists);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropTagIndex(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<nebula::cpp2::IndexItem>>
MetaClient::getTagIndex(GraphSpaceID spaceID, std::string name) {
cpp2::GetTagIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(name));
folly::Promise<StatusOr<nebula::cpp2::IndexItem>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getTagIndex(request);
}, [] (cpp2::GetTagIndexResp&& resp) -> nebula::cpp2::IndexItem {
return std::move(resp).get_item();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<nebula::cpp2::IndexItem>>>
MetaClient::listTagIndexes(GraphSpaceID spaceID) {
cpp2::ListTagIndexesReq req;
req.set_space_id(spaceID);
folly::Promise<StatusOr<std::vector<nebula::cpp2::IndexItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listTagIndexes(request);
}, [] (cpp2::ListTagIndexesResp&& resp) -> std::vector<nebula::cpp2::IndexItem> {
return std::move(resp).get_items();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::rebuildTagIndex(GraphSpaceID spaceID,
std::string name,
bool isOffline) {
cpp2::RebuildIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(name));
req.set_is_offline(isOffline);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_rebuildTagIndex(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::IndexStatus>>>
MetaClient::listTagIndexStatus(GraphSpaceID spaceID) {
cpp2::ListIndexStatusReq req;
req.set_space_id(spaceID);
folly::Promise<StatusOr<std::vector<cpp2::IndexStatus>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listTagIndexStatus(request);
}, [] (cpp2::ListIndexStatusResp&& resp) -> decltype(auto) {
return std::move(resp).get_statuses();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<IndexID>>
MetaClient::createEdgeIndex(GraphSpaceID spaceID,
std::string indexName,
std::string edgeName,
std::vector<std::string> fields,
bool ifNotExists) {
cpp2::CreateEdgeIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(indexName));
req.set_edge_name(std::move(edgeName));
req.set_fields(std::move(fields));
req.set_if_not_exists(ifNotExists);
folly::Promise<StatusOr<IndexID>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createEdgeIndex(request);
}, [] (cpp2::ExecResp&& resp) -> IndexID {
return resp.get_id().get_index_id();
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::dropEdgeIndex(GraphSpaceID spaceID,
std::string name,
bool ifExists) {
cpp2::DropEdgeIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(name));
req.set_if_exists(ifExists);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropEdgeIndex(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<nebula::cpp2::IndexItem>>
MetaClient::getEdgeIndex(GraphSpaceID spaceID, std::string name) {
cpp2::GetEdgeIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(name));
folly::Promise<StatusOr<nebula::cpp2::IndexItem>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getEdgeIndex(request);
}, [] (cpp2::GetEdgeIndexResp&& resp) -> nebula::cpp2::IndexItem {
return std::move(resp).get_item();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<nebula::cpp2::IndexItem>>>
MetaClient::listEdgeIndexes(GraphSpaceID spaceID) {
cpp2::ListEdgeIndexesReq req;
req.set_space_id(spaceID);
folly::Promise<StatusOr<std::vector<nebula::cpp2::IndexItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listEdgeIndexes(request);
}, [] (cpp2::ListEdgeIndexesResp&& resp) -> std::vector<nebula::cpp2::IndexItem> {
return std::move(resp).get_items();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::rebuildEdgeIndex(GraphSpaceID spaceID,
std::string name,
bool isOffline) {
cpp2::RebuildIndexReq req;
req.set_space_id(spaceID);
req.set_index_name(std::move(name));
req.set_is_offline(isOffline);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_rebuildEdgeIndex(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::IndexStatus>>>
MetaClient::listEdgeIndexStatus(GraphSpaceID spaceID) {
cpp2::ListIndexStatusReq req;
req.set_space_id(spaceID);
folly::Promise<StatusOr<std::vector<cpp2::IndexStatus>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listEdgeIndexStatus(request);
}, [] (cpp2::ListIndexStatusResp&& resp) -> decltype(auto) {
return std::move(resp).get_statuses();
}, std::move(promise));
return future;
}
StatusOr<std::shared_ptr<const SchemaProviderIf>>
MetaClient::getTagSchemaFromCache(GraphSpaceID spaceId, TagID tagID, SchemaVer ver) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
LOG(ERROR) << "Space " << spaceId << " not found!";
return std::shared_ptr<const SchemaProviderIf>();
} else {
auto tagIt = spaceIt->second->tagSchemas_.find(std::make_pair(tagID, ver));
if (tagIt == spaceIt->second->tagSchemas_.end()) {
return std::shared_ptr<const SchemaProviderIf>();
} else {
return tagIt->second;
}
}
}
StatusOr<std::shared_ptr<const SchemaProviderIf>>
MetaClient::getEdgeSchemaFromCache(GraphSpaceID spaceId, EdgeType edgeType, SchemaVer ver) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
LOG(ERROR) << "Space " << spaceId << " not found!";
return std::shared_ptr<const SchemaProviderIf>();
} else {
auto edgeIt = spaceIt->second->edgeSchemas_.find(std::make_pair(edgeType, ver));
if (edgeIt == spaceIt->second->edgeSchemas_.end()) {
LOG(ERROR) << "Space " << spaceId << ", EdgeType " << edgeType << ", version "
<< ver << " not found!";
return std::shared_ptr<const SchemaProviderIf>();
} else {
return edgeIt->second;
}
}
}
StatusOr<std::shared_ptr<nebula::cpp2::IndexItem>>
MetaClient::getTagIndexByNameFromCache(const GraphSpaceID space, const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
std::pair<GraphSpaceID, std::string> key(space, name);
auto iter = tagNameIndexMap_.find(key);
if (iter == tagNameIndexMap_.end()) {
return Status::IndexNotFound();
}
auto indexID = iter->second;
auto itemStatus = getTagIndexFromCache(space, indexID);
if (!itemStatus.ok()) {
return itemStatus.status();
}
return itemStatus.value();
}
StatusOr<std::shared_ptr<nebula::cpp2::IndexItem>>
MetaClient::getEdgeIndexByNameFromCache(const GraphSpaceID space, const std::string& name) {
if (!ready_) {
return Status::Error("Not ready!");
}
std::pair<GraphSpaceID, std::string> key(space, name);
auto iter = edgeNameIndexMap_.find(key);
if (iter == edgeNameIndexMap_.end()) {
return Status::IndexNotFound();
}
auto indexID = iter->second;
auto itemStatus = getEdgeIndexFromCache(space, indexID);
if (!itemStatus.ok()) {
return itemStatus.status();
}
return itemStatus.value();
}
StatusOr<std::shared_ptr<nebula::cpp2::IndexItem>>
MetaClient::getTagIndexFromCache(GraphSpaceID spaceId, IndexID indexID) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
LOG(ERROR) << "Space " << spaceId << " not found!";
return Status::SpaceNotFound();
} else {
auto iter = spaceIt->second->tagIndexes_.find(indexID);
if (iter == spaceIt->second->tagIndexes_.end()) {
LOG(ERROR) << "Space " << spaceId << ", Tag Index " << indexID << " not found!";
return Status::IndexNotFound();
} else {
return iter->second;
}
}
}
StatusOr<TagID>
MetaClient::getRelatedTagIDByIndexNameFromCache(const GraphSpaceID space,
const std::string& indexName) {
if (!ready_) {
return Status::Error("Not ready!");
}
auto indexRet = getTagIndexByNameFromCache(space, indexName);
if (!indexRet.ok()) {
LOG(ERROR) << "Index " << indexName << " Not Found";
return indexRet.status();
}
return indexRet.value()->get_schema_id().get_tag_id();
}
StatusOr<std::shared_ptr<nebula::cpp2::IndexItem>>
MetaClient::getEdgeIndexFromCache(GraphSpaceID spaceId, IndexID indexID) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
VLOG(3) << "Space " << spaceId << " not found!";
return Status::SpaceNotFound();
} else {
auto iter = spaceIt->second->edgeIndexes_.find(indexID);
if (iter == spaceIt->second->edgeIndexes_.end()) {
VLOG(3) << "Space " << spaceId << ", Edge Index " << indexID << " not found!";
return Status::IndexNotFound();
} else {
return iter->second;
}
}
}
StatusOr<EdgeType>
MetaClient::getRelatedEdgeTypeByIndexNameFromCache(const GraphSpaceID space,
const std::string& indexName) {
if (!ready_) {
return Status::Error("Not ready!");
}
auto indexRet = getEdgeIndexByNameFromCache(space, indexName);
if (!indexRet.ok()) {
LOG(ERROR) << "Index " << indexName << " Not Found";
return indexRet.status();
}
return indexRet.value()->get_schema_id().get_edge_type();
}
StatusOr<std::vector<std::shared_ptr<nebula::cpp2::IndexItem>>>
MetaClient::getTagIndexesFromCache(GraphSpaceID spaceId) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
VLOG(3) << "Space " << spaceId << " not found!";
return Status::SpaceNotFound();
} else {
auto tagIndexes = spaceIt->second->tagIndexes_;
auto iter = tagIndexes.begin();
std::vector<std::shared_ptr<nebula::cpp2::IndexItem>> items;
while (iter != tagIndexes.end()) {
items.emplace_back(iter->second);
iter++;
}
return items;
}
}
StatusOr<std::vector<std::shared_ptr<nebula::cpp2::IndexItem>>>
MetaClient::getEdgeIndexesFromCache(GraphSpaceID spaceId) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto spaceIt = localCache_.find(spaceId);
if (spaceIt == localCache_.end()) {
VLOG(3) << "Space " << spaceId << " not found!";
return Status::SpaceNotFound();
} else {
auto edgeIndexes = spaceIt->second->edgeIndexes_;
auto iter = edgeIndexes.begin();
std::vector<std::shared_ptr<nebula::cpp2::IndexItem>> items;
while (iter != edgeIndexes.end()) {
items.emplace_back(iter->second);
iter++;
}
return items;
}
}
const std::vector<HostAddr>& MetaClient::getAddresses() {
return addrs_;
}
std::vector<nebula::cpp2::RoleItem>
MetaClient::getRolesByUserFromCache(const std::string& user) {
auto iter = userRolesMap_.find(user);
if (iter == userRolesMap_.end()) {
return std::vector<nebula::cpp2::RoleItem>(0);
}
return iter->second;
}
bool MetaClient::authCheckFromCache(const std::string& account, const std::string& password) {
auto iter = userPasswordMap_.find(account);
if (iter == userPasswordMap_.end()) {
return false;
}
return iter->second == password;
}
StatusOr<SchemaVer> MetaClient::getLatestTagVersionFromCache(const GraphSpaceID& space,
const TagID& tagId) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceNewestTagVerMap_.find(std::make_pair(space, tagId));
if (it == spaceNewestTagVerMap_.end()) {
return Status::TagNotFound();
}
return it->second;
}
StatusOr<SchemaVer> MetaClient::getLatestEdgeVersionFromCache(const GraphSpaceID& space,
const EdgeType& edgeType) {
if (!ready_) {
return Status::Error("Not ready!");
}
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceNewestEdgeVerMap_.find(std::make_pair(space, edgeType));
if (it == spaceNewestEdgeVerMap_.end()) {
return Status::EdgeNotFound();
}
return it->second;
}
folly::Future<StatusOr<bool>> MetaClient::heartbeat() {
cpp2::HBReq req;
req.set_in_storaged(options_.inStoraged_);
if (options_.inStoraged_) {
nebula::cpp2::HostAddr thriftHost;
thriftHost.set_ip(options_.localHost_.first);
thriftHost.set_port(options_.localHost_.second);
req.set_host(std::move(thriftHost));
if (options_.clusterId_.load() == 0) {
options_.clusterId_ = ClusterIdMan::getClusterIdFromFile(FLAGS_cluster_id_path);
}
req.set_cluster_id(options_.clusterId_.load());
std::unordered_map<GraphSpaceID, std::vector<PartitionID>> leaderIds;
if (listener_ != nullptr) {
listener_->fetchLeaderInfo(leaderIds);
if (leaderIds_ != leaderIds) {
{
folly::RWSpinLock::WriteHolder holder(leaderIdsLock_);
leaderIds_.clear();
leaderIds_ = leaderIds;
}
req.set_leader_partIds(std::move(leaderIds));
}
} else {
req.set_leader_partIds(std::move(leaderIds));
}
}
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
VLOG(1) << "Send heartbeat to " << leader_ << ", clusterId " << req.get_cluster_id();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_heartBeat(request);
}, [this] (cpp2::HBResp&& resp) -> bool {
if (options_.inStoraged_ && options_.clusterId_.load() == 0) {
LOG(INFO) << "Persisit the cluster Id from metad " << resp.get_cluster_id();
if (ClusterIdMan::persistInFile(resp.get_cluster_id(),
FLAGS_cluster_id_path)) {
options_.clusterId_.store(resp.get_cluster_id());
} else {
LOG(FATAL) << "Can't persist the clusterId in file "
<< FLAGS_cluster_id_path;
}
}
metadLastUpdateTime_ = resp.get_last_update_time_in_ms();
VLOG(1) << "Metad last update time: " << metadLastUpdateTime_;
return true; // resp.code == cpp2::ErrorCode::SUCCEEDED
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::createUser(std::string account, std::string password, bool ifNotExists) {
cpp2::CreateUserReq req;
req.set_account(std::move(account));
req.set_encoded_pwd(std::move(password));
req.set_if_not_exists(ifNotExists);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createUser(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::dropUser(std::string account, bool ifExists) {
cpp2::DropUserReq req;
req.set_account(std::move(account));
req.set_if_exists(ifExists);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropUser(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::alterUser(std::string account, std::string password) {
cpp2::AlterUserReq req;
req.set_account(std::move(account));
req.set_encoded_pwd(std::move(password));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_alterUser(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::grantToUser(nebula::cpp2::RoleItem roleItem) {
cpp2::GrantRoleReq req;
req.set_role_item(std::move(roleItem));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_grantRole(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::revokeFromUser(nebula::cpp2::RoleItem roleItem) {
cpp2::RevokeRoleReq req;
req.set_role_item(std::move(roleItem));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_revokeRole(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::map<std::string, std::string>>>
MetaClient::listUsers() {
cpp2::ListUsersReq req;
folly::Promise<StatusOr<std::map<std::string, std::string>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listUsers(request);
}, [] (cpp2::ListUsersResp&& resp) -> decltype(auto) {
return std::move(resp).get_users();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::vector<nebula::cpp2::RoleItem>>>
MetaClient::listRoles(GraphSpaceID space) {
cpp2::ListRolesReq req;
req.set_space_id(std::move(space));
folly::Promise<StatusOr<std::vector<nebula::cpp2::RoleItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listRoles(request);
}, [] (cpp2::ListRolesResp&& resp) -> decltype(auto) {
return std::move(resp).get_roles();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::changePassword(std::string account,
std::string newPwd,
std::string oldPwd) {
cpp2::ChangePasswordReq req;
req.set_account(std::move(account));
req.set_new_encoded_pwd(std::move(newPwd));
req.set_old_encoded_pwd(std::move(oldPwd));
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_changePassword(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<nebula::cpp2::RoleItem>>>
MetaClient::getUserRoles(std::string account) {
cpp2::GetUserRolesReq req;
req.set_account(std::move(account));
folly::Promise<StatusOr<std::vector<nebula::cpp2::RoleItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getUserRoles(request);
}, [] (cpp2::ListRolesResp&& resp) -> decltype(auto) {
return std::move(resp).get_roles();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<int64_t>> MetaClient::balance(std::vector<HostAddr> hostDel,
bool isStop) {
cpp2::BalanceReq req;
if (!hostDel.empty()) {
std::vector<nebula::cpp2::HostAddr> tHostDel;
tHostDel.reserve(hostDel.size());
std::transform(hostDel.begin(), hostDel.end(),
std::back_inserter(tHostDel), [](const auto& h) {
nebula::cpp2::HostAddr th;
th.set_ip(h.first);
th.set_port(h.second);
return th;
});
req.set_host_del(std::move(tHostDel));
}
if (isStop) {
req.set_stop(isStop);
}
folly::Promise<StatusOr<int64_t>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_balance(request);
}, [] (cpp2::BalanceResp&& resp) -> int64_t {
return resp.id;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::BalanceTask>>>
MetaClient::showBalance(int64_t balanceId) {
cpp2::BalanceReq req;
req.set_id(balanceId);
folly::Promise<StatusOr<std::vector<cpp2::BalanceTask>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_balance(request);
}, [] (cpp2::BalanceResp&& resp) -> std::vector<cpp2::BalanceTask> {
return resp.tasks;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>> MetaClient::balanceLeader() {
cpp2::LeaderBalanceReq req;
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_leaderBalance(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::string>> MetaClient::getTagDefaultValue(GraphSpaceID spaceId,
TagID tagId,
const std::string& field) {
cpp2::GetReq req;
static std::string defaultKey = "__default__";
req.set_segment(defaultKey);
std::string key;
key.reserve(64);
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
key.append(reinterpret_cast<const char*>(&tagId), sizeof(TagID));
key.append(field);
req.set_key(std::move(key));
folly::Promise<StatusOr<std::string>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_get(request);
}, [] (cpp2::GetResp&& resp) -> std::string {
return resp.get_value();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<std::string>> MetaClient::getEdgeDefaultValue(GraphSpaceID spaceId,
EdgeType edgeType,
const std::string& field) {
cpp2::GetReq req;
static std::string defaultKey = "__default__";
req.set_segment(defaultKey);
std::string key;
key.reserve(64);
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
key.append(reinterpret_cast<const char*>(&edgeType), sizeof(EdgeType));
key.append(field);
req.set_key(std::move(key));
folly::Promise<StatusOr<std::string>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_get(request);
}, [] (cpp2::GetResp&& resp) -> std::string {
return resp.get_value();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::regConfig(const std::vector<cpp2::ConfigItem>& items) {
cpp2::RegConfigReq req;
req.set_items(items);
folly::Promise<StatusOr<int64_t>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_regConfig(request);
}, [] (cpp2::ExecResp&& resp) -> decltype(auto) {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::ConfigItem>>>
MetaClient::getConfig(const cpp2::ConfigModule& module, const std::string& name) {
cpp2::ConfigItem item;
item.set_module(module);
item.set_name(name);
cpp2::GetConfigReq req;
req.set_item(item);
folly::Promise<StatusOr<std::vector<cpp2::ConfigItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getConfig(request);
}, [] (cpp2::GetConfigResp&& resp) -> decltype(auto) {
return std::move(resp).get_items();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>>
MetaClient::setConfig(const cpp2::ConfigModule& module, const std::string& name,
const cpp2::ConfigType& type, const std::string& value) {
cpp2::ConfigItem item;
item.set_module(module);
item.set_name(name);
item.set_type(type);
item.set_value(value);
cpp2::SetConfigReq req;
req.set_item(item);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_setConfig(request);
}, [] (cpp2::ExecResp&& resp) -> decltype(auto) {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::ConfigItem>>>
MetaClient::listConfigs(const cpp2::ConfigModule& module) {
cpp2::ListConfigsReq req;
req.set_module(module);
folly::Promise<StatusOr<std::vector<cpp2::ConfigItem>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listConfigs(request);
}, [] (cpp2::ListConfigsResp&& resp) -> decltype(auto) {
return std::move(resp).get_items();
}, std::move(promise));
return future;
}
folly::Future<StatusOr<bool>> MetaClient::createSnapshot() {
cpp2::CreateSnapshotReq req;
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createSnapshot(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<bool>> MetaClient::dropSnapshot(const std::string& name) {
cpp2::DropSnapshotReq req;
req.set_name(name);
folly::Promise<StatusOr<bool>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_dropSnapshot(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
}, std::move(promise), true);
return future;
}
folly::Future<StatusOr<std::vector<cpp2::Snapshot>>> MetaClient::listSnapshots() {
cpp2::ListSnapshotsReq req;
folly::Promise<StatusOr<std::vector<cpp2::Snapshot>>> promise;
auto future = promise.getFuture();
getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listSnapshots(request);
}, [] (cpp2::ListSnapshotsResp&& resp) -> decltype(auto){
return std::move(resp).get_snapshots();
}, std::move(promise));
return future;
}
bool MetaClient::registerCfg() {
auto ret = regConfig(gflagsDeclared_).get();
if (ret.ok()) {
LOG(INFO) << "Register gflags ok " << gflagsDeclared_.size();
configReady_ = true;
}
return configReady_;
}
bool MetaClient::loadCfg() {
if (!configReady_ && !registerCfg()) {
return false;
}
// only load current module's config is enough
auto ret = listConfigs(gflagsModule_).get();
if (ret.ok()) {
// if we load config from meta server successfully, update gflags and set configReady_
auto tItems = ret.value();
std::vector<ConfigItem> items;
for (const auto& tItem : tItems) {
items.emplace_back(toConfigItem(tItem));
}
MetaConfigMap metaConfigMap;
for (auto& item : items) {
std::pair<cpp2::ConfigModule, std::string> key = {item.module_, item.name_};
metaConfigMap.emplace(std::move(key), std::move(item));
}
{
// For any configurations that is in meta, update in cache to replace previous value
folly::RWSpinLock::WriteHolder holder(configCacheLock_);
for (const auto& entry : metaConfigMap) {
auto& key = entry.first;
auto it = metaConfigMap_.find(key);
if (it == metaConfigMap_.end() || metaConfigMap[key].value_ != it->second.value_) {
updateGflagsValue(entry.second);
metaConfigMap_[key] = entry.second;
}
}
}
} else {
LOG(ERROR) << "Load configs failed: " << ret.status();
return false;
}
return true;
}
void MetaClient::updateGflagsValue(const ConfigItem& item) {
if (item.mode_ != cpp2::ConfigMode::MUTABLE) {
return;
}
std::string metaValue;
switch (item.type_) {
case cpp2::ConfigType::INT64:
metaValue = folly::to<std::string>(boost::get<int64_t>(item.value_));
break;
case cpp2::ConfigType::DOUBLE:
metaValue = folly::to<std::string>(boost::get<double>(item.value_));
break;
case cpp2::ConfigType::BOOL:
metaValue = boost::get<bool>(item.value_) ? "true" : "false";
break;
case cpp2::ConfigType::STRING:
case cpp2::ConfigType::NESTED:
metaValue = boost::get<std::string>(item.value_);
break;
}
std::string curValue;
if (!gflags::GetCommandLineOption(item.name_.c_str(), &curValue)) {
return;
} else if (curValue != metaValue) {
if (item.type_ == cpp2::ConfigType::NESTED && metaValue.empty()) {
// Be compatible with previous configuration
metaValue = "{}";
}
gflags::SetCommandLineOption(item.name_.c_str(), metaValue.c_str());
// TODO: we simply judge the rocksdb by nested type for now
if (listener_ != nullptr && item.type_ == cpp2::ConfigType::NESTED) {
updateNestedGflags(item.name_);
}
LOG(INFO) << "update " << item.name_ << " from " << curValue << " to " << metaValue;
}
}
void MetaClient::updateNestedGflags(const std::string& name) {
std::string json;
gflags::GetCommandLineOption(name.c_str(), &json);
// generate option string map
Configuration conf;
auto status = conf.parseFromString(json);
if (!status.ok()) {
LOG(ERROR) << "Parse nested gflags " << name << " failed";
return;
}
std::unordered_map<std::string, std::string> optionMap;
conf.forEachItem([&optionMap] (const std::string& key, const folly::dynamic& val) {
optionMap.emplace(key, val.asString());
});
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
for (const auto& spaceEntry : localCache_) {
listener_->onSpaceOptionUpdated(spaceEntry.first, optionMap);
}
}
ConfigItem MetaClient::toConfigItem(const cpp2::ConfigItem& item) {
VariantType value;
switch (item.get_type()) {
case cpp2::ConfigType::INT64:
value = *reinterpret_cast<const int64_t*>(item.get_value().data());
break;
case cpp2::ConfigType::BOOL:
value = *reinterpret_cast<const bool*>(item.get_value().data());
break;
case cpp2::ConfigType::DOUBLE:
value = *reinterpret_cast<const double*>(item.get_value().data());
break;
case cpp2::ConfigType::STRING:
case cpp2::ConfigType::NESTED:
value = item.get_value();
break;
}
return ConfigItem(item.get_module(), item.get_name(), item.get_type(), item.get_mode(), value);
}
Status MetaClient::refreshCache() {
auto ret = bgThread_->addTask(&MetaClient::loadData, this).get();
return ret ? Status::OK() : Status::Error("Load data failed");
}
StatusOr<LeaderMap> MetaClient::loadLeader() {
// Return error if has not loadData before
if (!ready_) {
return Status::Error("Not ready!");
}
auto ret = listHosts().get();
if (!ret.ok()) {
return Status::Error("List hosts failed");
}
LeaderMap leaderMap;
auto hostItems = std::move(ret).value();
for (auto& item : hostItems) {
auto hostAddr = HostAddr(item.hostAddr.ip, item.hostAddr.port);
for (auto& spaceEntry : item.get_leader_parts()) {
auto spaceName = spaceEntry.first;
auto status = getSpaceIdByNameFromCache(spaceName);
if (!status.ok()) {
continue;
}
auto spaceId = status.value();
for (const auto& partId : spaceEntry.second) {
leaderMap[{spaceId, partId}] = hostAddr;
}
}
LOG(INFO) << "Load leader of " << hostAddr
<< " in " << item.get_leader_parts().size() << " space";
}
LOG(INFO) << "Load leader ok";
return leaderMap;
}
} // namespace meta
} // namespace nebula
| 1 | 29,003 | why not do it in `reclaimExpiredSessions`? and `pushSessionToCache` can delete | vesoft-inc-nebula | cpp |
@@ -0,0 +1,17 @@
+_base_ = './retinanet_regnetx-1.6GF_fpn_mstrain_640-800_3x_coco.py'
+model = dict(
+ pretrained='open-mmlab://regnetx_3.2gf',
+ backbone=dict(
+ _delete_=True,
+ type='RegNet',
+ arch='regnetx_3.2gf',
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=1,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ style='pytorch'),
+ neck=dict(
+ type='FPN',
+ in_channels=[96, 192, 432, 1008],
+ out_channels=256,
+ num_outs=5)) | 1 | 1 | 24,576 | out_channels/num_outs/type are unnecessary | open-mmlab-mmdetection | py |
|
@@ -6,6 +6,17 @@ let null_dict = null_dictionary || {}
// eslint-disable-next-line no-undef
assert_true(dict.X !== undefined && dict.doSomething !== undefined); // Testing successful object creation.
+
+/*
+ It seems that Object.values(dict) is not supported on JSC for non-static fields, it would be interesting to
+ review this in the future.
+ verify_object_fields(Object.values(dict).length)
+ */
+
+// eslint-disable-next-line no-undef
+verify_object_fields(Object.keys(dict).length)
+
+
null_dict.hello(true); // Testing method call from object.
null_dict.alo(true); // Testing method call from object <again>.
| 1 | // eslint-disable-next-line no-undef,strict
let dict = dictionary || {}
// eslint-disable-next-line no-undef
let null_dict = null_dictionary || {}
// eslint-disable-next-line no-undef
assert_true(dict.X !== undefined && dict.doSomething !== undefined); // Testing successful object creation.
null_dict.hello(true); // Testing method call from object.
null_dict.alo(true); // Testing method call from object <again>.
dict.doSomething(28850);
/* Testing accessors. */
dict.X=666;
// eslint-disable-next-line no-undef
test_accessor(dict, 'X', 666);
assert_enumerate(JSON.stringify(dict))
/*
Testing exception mechanism.
*/
try {
dict.X = -1 // testing wrong value.
}catch(error){
assert_exception(error.message)
} | 1 | 20,469 | You can create an issue for it and put it on the backlog so we don't forget it. | realm-realm-js | js |
@@ -21,7 +21,11 @@ func (i *startContainerInterceptor) InterceptResponse(r *http.Response) error {
return err
}
- cidrs, ok := i.proxy.weaveCIDRsFromConfig(container.Config)
+ if !validNetworkMode(container.HostConfig) {
+ Debug.Printf("Ignoring container %s with --net=%s", container.ID, networkMode(container.HostConfig))
+ return nil
+ }
+ cidrs, ok := i.proxy.weaveCIDRsFromConfig(container.Config, container.HostConfig)
if !ok {
Debug.Print("No Weave CIDR, ignoring")
return nil | 1 | package proxy
import (
"errors"
"net/http"
"strings"
"github.com/fsouza/go-dockerclient"
. "github.com/weaveworks/weave/common"
)
type startContainerInterceptor struct{ proxy *Proxy }
func (i *startContainerInterceptor) InterceptRequest(r *http.Request) error {
return nil
}
func (i *startContainerInterceptor) InterceptResponse(r *http.Response) error {
container, err := inspectContainerInPath(i.proxy.client, r.Request.URL.Path)
if err != nil {
return err
}
cidrs, ok := i.proxy.weaveCIDRsFromConfig(container.Config)
if !ok {
Debug.Print("No Weave CIDR, ignoring")
return nil
}
Info.Printf("Attaching container %s with WEAVE_CIDR \"%s\" to weave network", container.ID, strings.Join(cidrs, " "))
args := []string{"attach"}
args = append(args, cidrs...)
args = append(args, "--or-die", container.ID)
if output, err := callWeave(args...); err != nil {
Warning.Printf("Attaching container %s to weave network failed: %s", container.ID, string(output))
return errors.New(string(output))
}
return i.proxy.client.KillContainer(docker.KillContainerOptions{ID: container.ID, Signal: docker.SIGUSR2})
}
| 1 | 10,201 | So now we are checking twice, both here and in `weaveCIDRsFromConfig`. Not great. I suggest changing the `ok` return of `weaveCIDRsFromConfig` to a messsage (or error?) instead, which we can then log. | weaveworks-weave | go |
@@ -436,11 +436,10 @@ def install(session, package, hash=None, version=None, tag=None):
if pkghash != hash_contents(response_contents):
raise CommandException("Mismatched hash. Try again.")
- pkgobj = store.create_package(owner, pkg, PackageFormat.HDF5)
+ pkgobj = store.create_package(owner, pkg)
try:
pkgobj.install(response_contents, response_urls)
except PackageException as ex:
- pkgobj.clear_contents()
raise CommandException("Failed to install the package: %s" % ex)
def access_list(session, package): | 1 | # -*- coding: utf-8 -*-
"""
Command line parsing and command dispatch
"""
from __future__ import print_function
from builtins import input
import argparse
import json
import os
import stat
import sys
import time
import webbrowser
import pandas as pd
import requests
from packaging.version import Version
from .build import build_package, generate_build_file, BuildException
from .const import LATEST_TAG
from .core import (hash_contents, GroupNode, TableNode, FileNode,
decode_node, encode_node, PackageFormat)
from .package import PackageException
from .store import PackageStore, ls_packages
from .util import BASE_DIR, FileWithReadProgress
HEADERS = {"Content-Type": "application/json", "Accept": "application/json"}
QUILT_PKG_URL = os.environ.get('QUILT_PKG_URL', 'https://pkg.quiltdata.com')
AUTH_FILE_NAME = "auth.json"
class CommandException(Exception):
"""
Exception class for all command-related failures.
"""
pass
def _update_auth(refresh_token):
response = requests.post("%s/api/token" % QUILT_PKG_URL, data=dict(
refresh_token=refresh_token
))
if response.status_code != requests.codes.ok:
raise CommandException("Authentication error: %s" % response.status_code)
data = response.json()
error = data.get('error')
if error is not None:
raise CommandException("Failed to log in: %s" % error)
return dict(
refresh_token=data['refresh_token'],
access_token=data['access_token'],
expires_at=data['expires_at']
)
def _save_auth(auth):
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
file_path = os.path.join(BASE_DIR, AUTH_FILE_NAME)
with open(file_path, 'w') as fd:
os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR)
json.dump(auth, fd)
def _handle_response(resp, **kwargs):
if resp.status_code == requests.codes.unauthorized:
raise CommandException("Authentication failed. Run `quilt login` again.")
elif not resp.ok:
try:
data = resp.json()
raise CommandException(data['message'])
except ValueError:
raise CommandException("Unexpected failure: error %s" % resp.status_code)
def create_session():
"""
Creates a session object to be used for `push`, `install`, etc.
It reads the credentials, possibly gets an updated access token,
and sets the request headers.
"""
file_path = os.path.join(BASE_DIR, AUTH_FILE_NAME)
if os.path.exists(file_path):
with open(file_path) as fd:
auth = json.load(fd)
# If the access token expires within a minute, update it.
if auth['expires_at'] < time.time() + 60:
try:
auth = _update_auth(auth['refresh_token'])
except CommandException as ex:
raise CommandException(
"Failed to update the access token (%s). Run `quilt login` again." % ex
)
_save_auth(auth)
else:
# The auth file doesn't exist, probably because the
# user hasn't run quilt login yet.
auth = None
session = requests.Session()
session.hooks.update(dict(
response=_handle_response
))
session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
})
if auth is not None:
session.headers["Authorization"] = "Bearer %s" % auth['access_token']
return session
def _parse_package(name):
try:
owner, pkg = name.split('/')
if not owner or not pkg:
# Make sure they're not empty.
raise ValueError
except ValueError:
raise CommandException("Specify package as owner/package_name.")
return owner, pkg
def login():
"""
Authenticate.
"""
login_url = "%s/login" % QUILT_PKG_URL
print("Launching a web browser...")
print("If that didn't work, please visit the following URL: %s" % login_url)
# Open the browser. Get rid of stdout while launching the browser to prevent
# Chrome/Firefox from outputing garbage over the code prompt.
devnull = os.open(os.devnull, os.O_RDWR)
old_stdout = os.dup(1)
os.dup2(devnull, 1)
try:
webbrowser.open(login_url)
finally:
os.close(devnull)
os.dup2(old_stdout, 1)
os.close(old_stdout)
print()
refresh_token = input("Enter the code from the webpage: ")
# Get an access token (and a new refresh token).
# Technically, we could have the user enter both tokens - but it doesn't
# really matter, and this lets us verify that the token actually works.
auth = _update_auth(refresh_token)
_save_auth(auth)
def logout():
"""
Become anonymous. Useful for testing.
"""
auth_file = os.path.join(BASE_DIR, AUTH_FILE_NAME)
# TODO revoke refresh token (without logging out of web sessions)
if os.path.exists(auth_file):
os.remove(auth_file)
else:
print("Already logged out.")
def build(package, path, directory=None):
"""
Compile a Quilt data package
"""
owner, pkg = _parse_package(package)
if directory:
buildfilepath = generate_build_file(directory)
buildpath = buildfilepath
else:
buildpath = path
try:
build_package(owner, pkg, buildpath)
print("Built %s/%s successfully." % (owner, pkg))
except BuildException as ex:
raise CommandException("Failed to build the package: %s" % ex)
def log(session, package):
"""
List all of the changes to a package on the server.
"""
owner, pkg = _parse_package(package)
response = session.get(
"{url}/api/log/{owner}/{pkg}/".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg
)
)
format_str = "%-64s %-19s %s"
print(format_str % ("Hash", "Created", "Author"))
for entry in response.json()['logs']:
# TODO: convert "created" to local time.
print(format_str % (entry['hash'], entry['created'], entry['author']))
def push(session, package):
"""
Push a Quilt data package to the server
"""
owner, pkg = _parse_package(package)
store = PackageStore()
pkgobj = store.get_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
pkghash = pkgobj.get_hash()
print("Uploading package metadata...")
response = session.put(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
hash=pkghash
),
data=json.dumps(dict(
contents=pkgobj.get_contents(),
description="" # TODO
), default=encode_node)
)
dataset = response.json()
upload_urls = dataset['upload_urls']
headers = {
'Content-Encoding': 'gzip'
}
total = len(upload_urls)
for idx, (objhash, url) in enumerate(upload_urls.items()):
# Create a temporary gzip'ed file.
print("Uploading object %d/%d..." % (idx + 1, total))
with pkgobj.tempfile(objhash) as temp_file:
with FileWithReadProgress(temp_file) as temp_file_with_progress:
response = requests.put(url, data=temp_file_with_progress, headers=headers)
if not response.ok:
raise CommandException("Upload failed: error %s" % response.status_code)
print("Updating the 'latest' tag...")
# Set the "latest" tag.
response = session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
tag=LATEST_TAG
),
data=json.dumps(dict(
hash=pkghash
))
)
assert response.ok # other responses handled by _handle_response
url = "https://quiltdata.com/package/%s/%s" % (owner, pkg)
print("Success! Visit the package page here: %s" % url)
def version_list(session, package):
"""
List the versions of a package.
"""
owner, pkg = _parse_package(package)
response = session.get(
"{url}/api/version/{owner}/{pkg}/".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg
)
)
for version in response.json()['versions']:
print("%s: %s" % (version['version'], version['hash']))
def version_add(session, package, version, pkghash):
"""
Add a new version for a given package hash.
Version format needs to follow PEP 440.
Versions are permanent - once created, they cannot be modified or deleted.
"""
owner, pkg = _parse_package(package)
try:
Version(version)
except ValueError:
url = "https://www.python.org/dev/peps/pep-0440/#examples-of-compliant-version-schemes"
raise CommandException(
"Invalid version format; see %s" % url
)
answer = input("Versions cannot be modified or deleted; are you sure? (y/n) ")
if answer.lower() != 'y':
return
session.put(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
version=version
),
data=json.dumps(dict(
hash=pkghash
))
)
def tag_list(session, package):
"""
List the tags of a package.
"""
owner, pkg = _parse_package(package)
response = session.get(
"{url}/api/tag/{owner}/{pkg}/".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg
)
)
for tag in response.json()['tags']:
print("%s: %s" % (tag['tag'], tag['hash']))
def tag_add(session, package, tag, pkghash):
"""
Add a new tag for a given package hash.
Unlike versions, tags can have an arbitrary format, and can be modified
and deleted.
When a package is pushed, it gets the "latest" tag.
"""
owner, pkg = _parse_package(package)
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
tag=tag
),
data=json.dumps(dict(
hash=pkghash
))
)
def tag_remove(session, package, tag):
"""
Delete a tag.
"""
owner, pkg = _parse_package(package)
session.delete(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
tag=tag
)
)
def install(session, package, hash=None, version=None, tag=None):
"""
Download a Quilt data package from the server and install locally.
At most one of `hash`, `version`, or `tag` can be given. If none are
given, `tag` defaults to "latest".
"""
if hash is version is tag is None:
tag = LATEST_TAG
assert [hash, version, tag].count(None) == 2
owner, pkg = _parse_package(package)
store = PackageStore()
existing_pkg = store.get_package(owner, pkg)
if existing_pkg is not None:
print("{owner}/{pkg} already installed.".format(owner=owner, pkg=pkg))
overwrite = input("Overwrite? (y/n) ")
if overwrite.lower() != 'y':
return
if version is not None:
response = session.get(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
version=version
)
)
pkghash = response.json()['hash']
elif tag is not None:
response = session.get(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
tag=tag
)
)
pkghash = response.json()['hash']
else:
pkghash = hash
assert pkghash is not None
response = session.get(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=QUILT_PKG_URL,
owner=owner,
pkg=pkg,
hash=pkghash
)
)
assert response.ok # other responses handled by _handle_response
dataset = response.json(object_hook=decode_node)
response_urls = dataset['urls']
response_contents = dataset['contents']
# Verify contents hash
if pkghash != hash_contents(response_contents):
raise CommandException("Mismatched hash. Try again.")
pkgobj = store.create_package(owner, pkg, PackageFormat.HDF5)
try:
pkgobj.install(response_contents, response_urls)
except PackageException as ex:
pkgobj.clear_contents()
raise CommandException("Failed to install the package: %s" % ex)
def access_list(session, package):
"""
Print list of users who can access a package.
"""
owner, pkg = _parse_package(package)
lookup_url = "{url}/api/access/{owner}/{pkg}".format(url=QUILT_PKG_URL, owner=owner, pkg=pkg)
response = session.get(lookup_url)
data = response.json()
users = data['users']
print('\n'.join(users))
def access_add(session, package, user):
"""
Add access
"""
owner, pkg = _parse_package(package)
session.put("%s/api/access/%s/%s/%s" % (QUILT_PKG_URL, owner, pkg, user))
def access_remove(session, package, user):
"""
Remove access
"""
owner, pkg = _parse_package(package)
session.delete("%s/api/access/%s/%s/%s" % (QUILT_PKG_URL, owner, pkg, user))
def ls():
"""
List all installed Quilt data packages
"""
store = PackageStore()
for pkg_dir in store.find_package_dirs():
print("%s" % pkg_dir)
packages = ls_packages(pkg_dir)
for idx, (owner, pkg) in enumerate(packages):
prefix = u"└── " if idx == len(packages) - 1 else u"├── "
print("%s%s/%s" % (prefix, owner, pkg))
def inspect(package):
"""
Inspect package details
"""
owner, pkg = _parse_package(package)
store = PackageStore()
pkgobj = store.get_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
def _print_children(children, prefix, path):
for idx, (name, child) in enumerate(children):
if idx == len(children) - 1:
new_prefix = u"└─"
new_child_prefix = u" "
else:
new_prefix = u"├─"
new_child_prefix = u"│ "
_print_node(child, prefix + new_prefix, prefix + new_child_prefix, name, path)
def _print_node(node, prefix, child_prefix, name, path):
name_prefix = u"─ "
if isinstance(node, GroupNode):
children = list(node.children.items())
if children:
name_prefix = u"┬ "
print(prefix + name_prefix + name)
_print_children(children, child_prefix, path + name)
elif isinstance(node, TableNode):
fullname = "/".join([path, name])
df = pkgobj.get(fullname)
assert isinstance(df, pd.DataFrame)
info = "shape %s, type \"%s\"" % (df.shape, df.dtypes)
print(prefix + name_prefix + ": " + info)
elif isinstance(node, FileNode):
fullname = "/".join([path, name])
print(prefix + name_prefix + name)
else:
assert False, "node=%s type=%s" % (node, type(node))
print(pkgobj.get_path())
_print_children(children=pkgobj.get_contents().children.items(), prefix='', path='')
def main():
"""
Build and run parser
"""
parser = argparse.ArgumentParser(description="Quilt Command Line")
parser.set_defaults(need_session=True)
subparsers = parser.add_subparsers(title="Commands", dest='cmd')
subparsers.required = True
login_p = subparsers.add_parser("login")
login_p.set_defaults(func=login, need_session=False)
logout_p = subparsers.add_parser("logout")
logout_p.set_defaults(func=logout, need_session=False)
log_p = subparsers.add_parser("log")
log_p.add_argument("package", type=str, help="Owner/Package Name")
log_p.set_defaults(func=log)
build_p = subparsers.add_parser("build")
build_p.add_argument("package", type=str, help="Owner/Package Name")
buildpath_group = build_p.add_mutually_exclusive_group(required=True)
buildpath_group.add_argument("-d", "--directory", type=str, help="Source file directory")
buildpath_group.add_argument("path", type=str, nargs='?', help="Path to the Yaml build file")
build_p.set_defaults(func=build, need_session=False)
push_p = subparsers.add_parser("push")
push_p.add_argument("package", type=str, help="Owner/Package Name")
push_p.set_defaults(func=push)
push_p = subparsers.add_parser("push")
push_p.add_argument("package", type=str, help="Owner/Package Name")
push_p.set_defaults(func=push)
version_p = subparsers.add_parser("version")
version_subparsers = version_p.add_subparsers(title="version", dest='cmd')
version_subparsers.required = True
version_list_p = version_subparsers.add_parser("list")
version_list_p.add_argument("package", type=str, help="Owner/Package Name")
version_list_p.set_defaults(func=version_list)
version_add_p = version_subparsers.add_parser("add")
version_add_p.add_argument("package", type=str, help="Owner/Package Name")
version_add_p.add_argument("version", type=str, help="Version")
version_add_p.add_argument("pkghash", type=str, help="Package hash")
version_add_p.set_defaults(func=version_add)
tag_p = subparsers.add_parser("tag")
tag_subparsers = tag_p.add_subparsers(title="Tag", dest='cmd')
tag_subparsers.required = True
tag_list_p = tag_subparsers.add_parser("list")
tag_list_p.add_argument("package", type=str, help="Owner/Package Name")
tag_list_p.set_defaults(func=tag_list)
tag_add_p = tag_subparsers.add_parser("add")
tag_add_p.add_argument("package", type=str, help="Owner/Package Name")
tag_add_p.add_argument("tag", type=str, help="Tag name")
tag_add_p.add_argument("pkghash", type=str, help="Package hash")
tag_add_p.set_defaults(func=tag_add)
tag_remove_p = tag_subparsers.add_parser("remove")
tag_remove_p.add_argument("package", type=str, help="Owner/Package Name")
tag_remove_p.add_argument("tag", type=str, help="Tag name")
tag_remove_p.set_defaults(func=tag_remove)
install_p = subparsers.add_parser("install")
install_p.add_argument("package", type=str, help="Owner/Package Name")
install_p.set_defaults(func=install)
install_group = install_p.add_mutually_exclusive_group()
install_group.add_argument("-x", "--hash", type=str, help="Package hash")
install_group.add_argument("-v", "--version", type=str, help="Package version")
install_group.add_argument("-t", "--tag", type=str, help="Package tag - defaults to 'latest'")
access_p = subparsers.add_parser("access")
access_subparsers = access_p.add_subparsers(title="Access", dest='cmd')
access_subparsers.required = True
access_list_p = access_subparsers.add_parser("list")
access_list_p.add_argument("package", type=str, help="Owner/Package Name")
access_list_p.set_defaults(func=access_list)
access_add_p = access_subparsers.add_parser("add")
access_add_p.add_argument("package", type=str, help="Owner/Package Name")
access_add_p.add_argument("user", type=str, help="User to add")
access_add_p.set_defaults(func=access_add)
access_remove_p = access_subparsers.add_parser("remove")
access_remove_p.add_argument("package", type=str, help="Owner/Package Name")
access_remove_p.add_argument("user", type=str, help="User to remove")
access_remove_p.set_defaults(func=access_remove)
ls_p = subparsers.add_parser("ls")
ls_p.set_defaults(func=ls, need_session=False)
inspect_p = subparsers.add_parser("inspect")
inspect_p.add_argument("package", type=str, help="Owner/Package Name")
inspect_p.set_defaults(func=inspect, need_session=False)
args = parser.parse_args()
# Convert argparse.Namespace into dict and clean it up.
# We can then pass it directly to the helper function.
kwargs = vars(args)
del kwargs['cmd']
func = kwargs.pop('func')
try:
# Create a session if needed.
if kwargs.pop('need_session'):
kwargs['session'] = create_session()
func(**kwargs)
return 0
except CommandException as ex:
print(ex, file=sys.stderr)
return 1
except requests.exceptions.ConnectionError as ex:
print("Failed to connect: %s" % ex, file=sys.stderr)
return 1
| 1 | 14,926 | An alternative to setting format to the default in Package.__init__ would be to set it in create_package. I think we can assume all packages are created by create_package, but not necessarily by build_package. | quiltdata-quilt | py |
@@ -34,10 +34,17 @@ type MiningBlock struct {
RemoteTxs types.TransactionsStream
}
+// In case we are proposing during proof-of-stake and we are supplied with header fields already
+type PresetHeaderFields struct {
+ Timestamp uint64
+ Random common.Hash
+}
+
type MiningState struct {
MiningConfig *params.MiningConfig
PendingResultCh chan *types.Block
MiningResultCh chan *types.Block
+ ProposeBlockCh chan *types.Block
MiningBlock *MiningBlock
}
| 1 | package stagedsync
import (
"bytes"
"context"
"errors"
"fmt"
"math/big"
"time"
mapset "github.com/deckarep/golang-set"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/txpool"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/debug"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/eth/ethutils"
"github.com/ledgerwatch/erigon/params"
"github.com/ledgerwatch/log/v3"
)
type MiningBlock struct {
Header *types.Header
Uncles []*types.Header
Txs []types.Transaction
Receipts types.Receipts
LocalTxs types.TransactionsStream
RemoteTxs types.TransactionsStream
}
type MiningState struct {
MiningConfig *params.MiningConfig
PendingResultCh chan *types.Block
MiningResultCh chan *types.Block
MiningBlock *MiningBlock
}
func NewMiningState(cfg *params.MiningConfig) MiningState {
return MiningState{
MiningConfig: cfg,
PendingResultCh: make(chan *types.Block, 1),
MiningResultCh: make(chan *types.Block, 1),
MiningBlock: &MiningBlock{},
}
}
type MiningCreateBlockCfg struct {
db kv.RwDB
miner MiningState
chainConfig params.ChainConfig
engine consensus.Engine
txPool2 *txpool.TxPool
txPool2DB kv.RoDB
tmpdir string
}
func StageMiningCreateBlockCfg(db kv.RwDB, miner MiningState, chainConfig params.ChainConfig, engine consensus.Engine, txPool2 *txpool.TxPool, txPool2DB kv.RoDB, tmpdir string) MiningCreateBlockCfg {
return MiningCreateBlockCfg{
db: db,
miner: miner,
chainConfig: chainConfig,
engine: engine,
txPool2: txPool2,
txPool2DB: txPool2DB,
tmpdir: tmpdir,
}
}
// SpawnMiningCreateBlockStage
//TODO:
// - resubmitAdjustCh - variable is not implemented
func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBlockCfg, quit <-chan struct{}) (err error) {
current := cfg.miner.MiningBlock
txPoolLocals := []common.Address{} //txPoolV2 has no concept of local addresses (yet?)
coinbase := cfg.miner.MiningConfig.Etherbase
const (
// staleThreshold is the maximum depth of the acceptable stale block.
staleThreshold = 7
)
if cfg.miner.MiningConfig.Etherbase == (common.Address{}) {
return fmt.Errorf("refusing to mine without etherbase")
}
logPrefix := s.LogPrefix()
executionAt, err := s.ExecutionAt(tx)
if err != nil {
return fmt.Errorf("getting last executed block: %w", err)
}
parent := rawdb.ReadHeaderByNumber(tx, executionAt)
if parent == nil { // todo: how to return error and don't stop Erigon?
return fmt.Errorf(fmt.Sprintf("[%s] Empty block", logPrefix), "blocknum", executionAt)
}
blockNum := executionAt + 1
var txs []types.Transaction
if err = cfg.txPool2DB.View(context.Background(), func(tx kv.Tx) error {
txSlots := txpool.TxsRlp{}
if err := cfg.txPool2.Best(200, &txSlots, tx); err != nil {
return err
}
txs, err = types.DecodeTransactions(txSlots.Txs)
if err != nil {
return fmt.Errorf("decode rlp of pending txs: %w", err)
}
var sender common.Address
for i := range txs {
copy(sender[:], txSlots.Senders.At(i))
txs[i].SetSender(sender)
}
return nil
}); err != nil {
return err
}
current.RemoteTxs = types.NewTransactionsFixedOrder(txs)
// txpool v2 - doesn't prioritise local txs over remote
current.LocalTxs = types.NewTransactionsFixedOrder(nil)
log.Debug(fmt.Sprintf("[%s] Candidate txs", logPrefix), "amount", len(txs))
localUncles, remoteUncles, err := readNonCanonicalHeaders(tx, blockNum, cfg.engine, coinbase, txPoolLocals)
if err != nil {
return err
}
chain := ChainReader{Cfg: cfg.chainConfig, Db: tx}
var GetBlocksFromHash = func(hash common.Hash, n int) (blocks []*types.Block) {
number := rawdb.ReadHeaderNumber(tx, hash)
if number == nil {
return nil
}
for i := 0; i < n; i++ {
block := rawdb.ReadBlock(tx, hash, *number)
if block == nil {
break
}
blocks = append(blocks, block)
hash = block.ParentHash()
*number--
}
return
}
type envT struct {
signer *types.Signer
ancestors mapset.Set // ancestor set (used for checking uncle parent validity)
family mapset.Set // family set (used for checking uncle invalidity)
uncles mapset.Set // uncle set
}
env := &envT{
signer: types.MakeSigner(&cfg.chainConfig, blockNum),
ancestors: mapset.NewSet(),
family: mapset.NewSet(),
uncles: mapset.NewSet(),
}
// re-written miner/worker.go:commitNewWork
timestamp := time.Now().Unix()
if parent.Time >= uint64(timestamp) {
timestamp = int64(parent.Time + 1)
}
num := parent.Number
header := &types.Header{
ParentHash: parent.Hash(),
Number: num.Add(num, common.Big1),
GasLimit: core.CalcGasLimit(parent.GasUsed, parent.GasLimit, cfg.miner.MiningConfig.GasFloor, cfg.miner.MiningConfig.GasCeil),
Extra: cfg.miner.MiningConfig.ExtraData,
Time: uint64(timestamp),
}
// Set baseFee and GasLimit if we are on an EIP-1559 chain
if cfg.chainConfig.IsLondon(header.Number.Uint64()) {
header.Eip1559 = true
header.BaseFee = misc.CalcBaseFee(&cfg.chainConfig, parent)
if !cfg.chainConfig.IsLondon(parent.Number.Uint64()) {
parentGasLimit := parent.GasLimit * params.ElasticityMultiplier
header.GasLimit = core.CalcGasLimit(parent.GasUsed, parentGasLimit, cfg.miner.MiningConfig.GasFloor, cfg.miner.MiningConfig.GasCeil)
}
}
log.Info(fmt.Sprintf("[%s] Start mine", logPrefix), "block", executionAt+1, "baseFee", header.BaseFee, "gasLimit", header.GasLimit)
// Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
//if w.isRunning() {
header.Coinbase = coinbase
//}
if err = cfg.engine.Prepare(chain, header); err != nil {
log.Error("Failed to prepare header for mining",
"err", err,
"headerNumber", header.Number.Uint64(),
"headerRoot", header.Root.String(),
"headerParentHash", header.ParentHash.String(),
"parentNumber", parent.Number.Uint64(),
"parentHash", parent.Hash().String(),
"callers", debug.Callers(10))
return err
}
// If we are care about TheDAO hard-fork check whether to override the extra-data or not
if daoBlock := cfg.chainConfig.DAOForkBlock; daoBlock != nil {
// Check whether the block is among the fork extra-override range
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 {
// Depending whether we support or oppose the fork, override differently
if cfg.chainConfig.DAOForkSupport {
header.Extra = libcommon.Copy(params.DAOForkBlockExtra)
} else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data
}
}
}
// analog of miner.Worker.updateSnapshot
var makeUncles = func(proposedUncles mapset.Set) []*types.Header {
var uncles []*types.Header
proposedUncles.Each(func(item interface{}) bool {
hash, ok := item.(common.Hash)
if !ok {
return false
}
uncle, exist := localUncles[hash]
if !exist {
uncle, exist = remoteUncles[hash]
}
if !exist {
return false
}
uncles = append(uncles, uncle)
return false
})
return uncles
}
// when 08 is processed ancestors contain 07 (quick block)
for _, ancestor := range GetBlocksFromHash(parent.Hash(), 7) {
for _, uncle := range ancestor.Uncles() {
env.family.Add(uncle.Hash())
}
env.family.Add(ancestor.Hash())
env.ancestors.Add(ancestor.Hash())
}
commitUncle := func(env *envT, uncle *types.Header) error {
hash := uncle.Hash()
if env.uncles.Contains(hash) {
return errors.New("uncle not unique")
}
if parent.Hash() == uncle.ParentHash {
return errors.New("uncle is sibling")
}
if !env.ancestors.Contains(uncle.ParentHash) {
return errors.New("uncle's parent unknown")
}
if env.family.Contains(hash) {
return errors.New("uncle already included")
}
env.uncles.Add(uncle.Hash())
return nil
}
// Accumulate the miningUncles for the env block
// Prefer to locally generated uncle
uncles := make([]*types.Header, 0, 2)
for _, blocks := range []map[common.Hash]*types.Header{localUncles, remoteUncles} {
// Clean up stale uncle blocks first
for hash, uncle := range blocks {
if uncle.Number.Uint64()+staleThreshold <= header.Number.Uint64() {
delete(blocks, hash)
}
}
for hash, uncle := range blocks {
if len(uncles) == 2 {
break
}
if err = commitUncle(env, uncle); err != nil {
log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
} else {
log.Trace("Committing new uncle to block", "hash", hash)
uncles = append(uncles, uncle)
}
}
}
current.Header = header
current.Uncles = makeUncles(env.uncles)
return nil
}
func readNonCanonicalHeaders(tx kv.Tx, blockNum uint64, engine consensus.Engine, coinbase common.Address, txPoolLocals []common.Address) (localUncles, remoteUncles map[common.Hash]*types.Header, err error) {
localUncles, remoteUncles = map[common.Hash]*types.Header{}, map[common.Hash]*types.Header{}
nonCanonicalBlocks, err := rawdb.ReadHeadersByNumber(tx, blockNum)
if err != nil {
return
}
for _, u := range nonCanonicalBlocks {
if ethutils.IsLocalBlock(engine, coinbase, txPoolLocals, u) {
localUncles[u.Hash()] = u
} else {
remoteUncles[u.Hash()] = u
}
}
return
}
| 1 | 23,050 | Let's add fee recipient as well. | ledgerwatch-erigon | go |
@@ -239,6 +239,7 @@ class FileDownloadTarget(_DownloadTarget):
def __init__(self, filename):
# pylint: disable=super-init-not-called
self.filename = filename
+ # pylint: enable=super-init-not-called
def suggested_filename(self):
return os.path.basename(self.filename) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Shared QtWebKit/QtWebEngine code for downloads."""
import re
import sys
import html
import os.path
import collections
import functools
import pathlib
import tempfile
import sip
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QObject, QModelIndex,
QTimer, QAbstractListModel)
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.config import config
from qutebrowser.utils import (usertypes, standarddir, utils, message, log,
qtutils)
ModelRole = usertypes.enum('ModelRole', ['item'], start=Qt.UserRole,
is_int=True)
# Remember the last used directory
last_used_directory = None
# All REFRESH_INTERVAL milliseconds, speeds will be recalculated and downloads
# redrawn.
_REFRESH_INTERVAL = 500
class UnsupportedAttribute:
"""Class which is used to create attributes which are not supported.
This is used for attributes like "fileobj" for downloads which are not
supported with QtWebengine.
"""
pass
class UnsupportedOperationError(Exception):
"""Raised when an operation is not supported with the given backend."""
def download_dir():
"""Get the download directory to use."""
directory = config.val.downloads.location.directory
remember_dir = config.val.downloads.location.remember
if remember_dir and last_used_directory is not None:
ddir = last_used_directory
elif directory is None:
ddir = standarddir.download()
else:
ddir = directory
try:
os.makedirs(ddir)
except FileExistsError:
pass
return ddir
def immediate_download_path(prompt_download_directory=None):
"""Try to get an immediate download path without asking the user.
If that's possible, we return a path immediately. If not, None is returned.
Args:
prompt_download_directory: If this is something else than None, it
will overwrite the
downloads.location.prompt setting.
"""
if prompt_download_directory is None:
prompt_download_directory = config.val.downloads.location.prompt
if not prompt_download_directory:
return download_dir()
def _path_suggestion(filename):
"""Get the suggested file path.
Args:
filename: The filename to use if included in the suggestion.
"""
suggestion = config.val.downloads.location.suggestion
if suggestion == 'path':
# add trailing '/' if not present
return os.path.join(download_dir(), '')
elif suggestion == 'filename':
return filename
elif suggestion == 'both':
return os.path.join(download_dir(), filename)
else: # pragma: no cover
raise ValueError("Invalid suggestion value {}!".format(suggestion))
def create_full_filename(basename, filename):
"""Create a full filename based on the given basename and filename.
Args:
basename: The basename to use if filename is a directory.
filename: The path to a folder or file where you want to save.
Return:
The full absolute path, or None if filename creation was not possible.
"""
# Remove chars which can't be encoded in the filename encoding.
# See https://github.com/qutebrowser/qutebrowser/issues/427
encoding = sys.getfilesystemencoding()
filename = utils.force_encoding(filename, encoding)
basename = utils.force_encoding(basename, encoding)
if os.path.isabs(filename) and os.path.isdir(filename):
# We got an absolute directory from the user, so we save it under
# the default filename in that directory.
return os.path.join(filename, basename)
elif os.path.isabs(filename):
# We got an absolute filename from the user, so we save it under
# that filename.
return filename
return None
def get_filename_question(*, suggested_filename, url, parent=None):
"""Get a Question object for a download-path.
Args:
suggested_filename: The "default"-name that is pre-entered as path.
url: The URL the download originated from.
parent: The parent of the question (a QObject).
"""
encoding = sys.getfilesystemencoding()
suggested_filename = utils.force_encoding(suggested_filename, encoding)
q = usertypes.Question(parent)
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
html.escape(url.toDisplayString()))
q.mode = usertypes.PromptMode.download
q.completed.connect(q.deleteLater)
q.default = _path_suggestion(suggested_filename)
return q
def transform_path(path):
r"""Do platform-specific transformations, like changing E: to E:\.
Returns None if the path is invalid on the current platform.
"""
if not utils.is_windows:
return path
path = utils.expand_windows_drive(path)
# Drive dependent working directories are not supported, e.g.
# E:filename is invalid
if re.match(r'[A-Z]:[^\\]', path, re.IGNORECASE):
return None
# Paths like COM1, ...
# See https://github.com/qutebrowser/qutebrowser/issues/82
if pathlib.Path(path).is_reserved():
return None
return path
def suggested_fn_from_title(url_path, title=None):
"""Suggest a filename depending on the URL extension and page title.
Args:
url_path: a string with the URL path
title: the page title string
Return:
The download filename based on the title, or None if the extension is
not found in the whitelist (or if there is no page title).
"""
ext_whitelist = [".html", ".htm", ".php", ""]
_, ext = os.path.splitext(url_path)
if ext.lower() in ext_whitelist and title:
suggested_fn = utils.sanitize_filename(title)
if not suggested_fn.lower().endswith((".html", ".htm")):
suggested_fn += ".html"
else:
suggested_fn = None
return suggested_fn
class NoFilenameError(Exception):
"""Raised when we can't find out a filename in DownloadTarget."""
# Where a download should be saved
class _DownloadTarget:
"""Abstract base class for different download targets."""
def __init__(self):
raise NotImplementedError
def suggested_filename(self):
"""Get the suggested filename for this download target."""
raise NotImplementedError
class FileDownloadTarget(_DownloadTarget):
"""Save the download to the given file.
Attributes:
filename: Filename where the download should be saved.
"""
def __init__(self, filename):
# pylint: disable=super-init-not-called
self.filename = filename
def suggested_filename(self):
return os.path.basename(self.filename)
def __str__(self):
return self.filename
class FileObjDownloadTarget(_DownloadTarget):
"""Save the download to the given file-like object.
Attributes:
fileobj: File-like object where the download should be written to.
"""
def __init__(self, fileobj):
# pylint: disable=super-init-not-called
self.fileobj = fileobj
def suggested_filename(self):
try:
return self.fileobj.name
except AttributeError:
raise NoFilenameError
def __str__(self):
try:
return 'file object at {}'.format(self.fileobj.name)
except AttributeError:
return 'anonymous file object'
class OpenFileDownloadTarget(_DownloadTarget):
"""Save the download in a temp dir and directly open it.
Attributes:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default application.
If no `{}` is found, the filename is appended to the cmdline.
"""
def __init__(self, cmdline=None):
# pylint: disable=super-init-not-called
self.cmdline = cmdline
def suggested_filename(self):
raise NoFilenameError
def __str__(self):
return 'temporary file'
class DownloadItemStats(QObject):
"""Statistics (bytes done, total bytes, time, etc.) about a download.
Class attributes:
SPEED_AVG_WINDOW: How many seconds of speed data to average to
estimate the remaining time.
Attributes:
done: How many bytes there are already downloaded.
total: The total count of bytes. None if the total is unknown.
speed: The current download speed, in bytes per second.
_speed_avg: A rolling average of speeds.
_last_done: The count of bytes which where downloaded when calculating
the speed the last time.
"""
SPEED_AVG_WINDOW = 30
def __init__(self, parent=None):
super().__init__(parent)
self.total = None
self.done = 0
self.speed = 0
self._last_done = 0
samples = int(self.SPEED_AVG_WINDOW * (1000 / _REFRESH_INTERVAL))
self._speed_avg = collections.deque(maxlen=samples)
def update_speed(self):
"""Recalculate the current download speed.
The caller needs to guarantee this is called all _REFRESH_INTERVAL ms.
"""
if self.done is None:
# this can happen for very fast downloads, e.g. when actually
# opening a file
return
delta = self.done - self._last_done
self.speed = delta * 1000 / _REFRESH_INTERVAL
self._speed_avg.append(self.speed)
self._last_done = self.done
def finish(self):
"""Set the download stats as finished."""
self.done = self.total
def percentage(self):
"""The current download percentage, or None if unknown."""
if self.done == self.total:
return 100
elif self.total == 0 or self.total is None:
return None
else:
return 100 * self.done / self.total
def remaining_time(self):
"""The remaining download time in seconds, or None."""
if self.total is None or not self._speed_avg:
# No average yet or we don't know the total size.
return None
remaining_bytes = self.total - self.done
avg = sum(self._speed_avg) / len(self._speed_avg)
if avg == 0:
# Download stalled
return None
else:
return remaining_bytes / avg
@pyqtSlot('qint64', 'qint64')
def on_download_progress(self, bytes_done, bytes_total):
"""Update local variables when the download progress changed.
Args:
bytes_done: How many bytes are downloaded.
bytes_total: How many bytes there are to download in total.
"""
if bytes_total in [0, -1]: # QtWebEngine, QtWebKit
bytes_total = None
self.done = bytes_done
self.total = bytes_total
class AbstractDownloadItem(QObject):
"""Shared QtNetwork/QtWebEngine part of a download item.
Attributes:
done: Whether the download is finished.
stats: A DownloadItemStats object.
index: The index of the download in the view.
successful: Whether the download has completed successfully.
error_msg: The current error message, or None
fileobj: The file object to download the file to.
raw_headers: The headers sent by the server.
_filename: The filename of the download.
_dead: Whether the Download has _die()'d.
Signals:
data_changed: The downloads metadata changed.
finished: The download was finished.
cancelled: The download was cancelled.
error: An error with the download occurred.
arg: The error message as string.
remove_requested: Emitted when the removal of this download was
requested.
"""
data_changed = pyqtSignal()
finished = pyqtSignal()
error = pyqtSignal(str)
cancelled = pyqtSignal()
remove_requested = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.done = False
self.stats = DownloadItemStats(self)
self.index = 0
self.error_msg = None
self.basename = '???'
self.successful = False
self.fileobj = UnsupportedAttribute()
self.raw_headers = UnsupportedAttribute()
self._filename = None
self._dead = False
def __repr__(self):
return utils.get_repr(self, basename=self.basename)
def __str__(self):
"""Get the download as a string.
Example: foo.pdf [699.2kB/s|0.34|16%|4.253/25.124]
"""
speed = utils.format_size(self.stats.speed, suffix='B/s')
down = utils.format_size(self.stats.done, suffix='B')
perc = self.stats.percentage()
remaining = self.stats.remaining_time()
if self.error_msg is None:
errmsg = ""
else:
errmsg = " - {}".format(self.error_msg)
if all(e is None for e in [perc, remaining, self.stats.total]):
return ('{index}: {name} [{speed:>10}|{down}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
down=down, errmsg=errmsg))
perc = round(perc)
if remaining is None:
remaining = '?'
else:
remaining = utils.format_seconds(remaining)
total = utils.format_size(self.stats.total, suffix='B')
if self.done:
return ('{index}: {name} [{perc:>2}%|{total}]{errmsg}'.format(
index=self.index, name=self.basename, perc=perc,
total=total, errmsg=errmsg))
else:
return ('{index}: {name} [{speed:>10}|{remaining:>5}|{perc:>2}%|'
'{down}/{total}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
remaining=remaining, perc=perc, down=down,
total=total, errmsg=errmsg))
def _do_die(self):
"""Do cleanup steps after a download has died."""
raise NotImplementedError
def _die(self, msg):
"""Abort the download and emit an error."""
assert not self.successful
# Prevent actions if calling _die() twice.
#
# For QtWebKit, this might happen if the error handler correctly
# connects, and the error occurs in _init_reply between
# reply.error.connect and the reply.error() check. In this case, the
# connected error handlers will be called twice, once via the direct
# error.emit() and once here in _die(). The stacks look like this then:
#
# <networkmanager error.emit> -> on_reply_error -> _die ->
# self.error.emit()
#
# and
#
# [_init_reply -> <single shot timer> ->] <lambda in _init_reply> ->
# self.error.emit()
#
# which may lead to duplicate error messages (and failing tests)
if self._dead:
return
self._dead = True
self._do_die()
self.error_msg = msg
self.stats.finish()
self.error.emit(msg)
self.done = True
self.data_changed.emit()
def get_status_color(self, position):
"""Choose an appropriate color for presenting the download's status.
Args:
position: The color type requested, can be 'fg' or 'bg'.
"""
assert position in ["fg", "bg"]
# pylint: disable=bad-config-option
start = getattr(config.val.colors.downloads.start, position)
stop = getattr(config.val.colors.downloads.stop, position)
system = getattr(config.val.colors.downloads.system, position)
error = getattr(config.val.colors.downloads.error, position)
# pylint: enable=bad-config-option
if self.error_msg is not None:
assert not self.successful
return error
elif self.stats.percentage() is None:
return start
else:
return utils.interpolate_color(start, stop,
self.stats.percentage(), system)
def _do_cancel(self):
"""Actual cancel implementation."""
raise NotImplementedError
@pyqtSlot()
def cancel(self, *, remove_data=True):
"""Cancel the download.
Args:
remove_data: Whether to remove the downloaded data.
"""
self._do_cancel()
log.downloads.debug("cancelled")
if remove_data:
self.delete()
self.done = True
self.finished.emit()
self.data_changed.emit()
@pyqtSlot()
def remove(self):
"""Remove the download from the model."""
self.remove_requested.emit()
def delete(self):
"""Delete the downloaded file."""
try:
if self._filename is not None and os.path.exists(self._filename):
os.remove(self._filename)
log.downloads.debug("Deleted {}".format(self._filename))
else:
log.downloads.debug("Not deleting {}".format(self._filename))
except OSError:
log.downloads.exception("Failed to remove partial file")
@pyqtSlot()
def retry(self):
"""Retry a failed download."""
raise NotImplementedError
@pyqtSlot()
def try_retry(self):
"""Try to retry a download and show an error if it's unsupported."""
try:
self.retry()
except UnsupportedOperationError as e:
message.error(str(e))
def _get_open_filename(self):
"""Get the filename to open a download.
Returns None if no suitable filename was found.
"""
raise NotImplementedError
@pyqtSlot()
def open_file(self, cmdline=None):
"""Open the downloaded file.
Args:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default
application or `downloads.open_dispatcher` if set. If no
`{}` is found, the filename is appended to the cmdline.
"""
assert self.successful
filename = self._get_open_filename()
if filename is None: # pragma: no cover
log.downloads.error("No filename to open the download!")
return
# By using a singleshot timer, we ensure that we return fast. This
# is important on systems where process creation takes long, as
# otherwise the prompt might hang around and cause bugs
# (see issue #2296)
QTimer.singleShot(0, lambda: utils.open_file(filename, cmdline))
def _ensure_can_set_filename(self, filename):
"""Make sure we can still set a filename."""
raise NotImplementedError
def _after_set_filename(self):
"""Finish initialization based on self._filename."""
raise NotImplementedError
def _ask_confirm_question(self, title, msg):
"""Ask a confirmation question for the download."""
raise NotImplementedError
def _set_fileobj(self, fileobj, *, autoclose=True):
"""Set a file object to save the download to.
Not supported by QtWebEngine.
Args:
fileobj: The file object to download to.
autoclose: Close the file object automatically when it's done.
"""
raise NotImplementedError
def _set_tempfile(self, fileobj):
"""Set a temporary file when opening the download."""
raise NotImplementedError
def _set_filename(self, filename, *, force_overwrite=False,
remember_directory=True):
"""Set the filename to save the download to.
Args:
filename: The full filename to save the download to.
None: special value to stop the download.
force_overwrite: Force overwriting existing files.
remember_directory: If True, remember the directory for future
downloads.
"""
global last_used_directory
filename = os.path.expanduser(filename)
self._ensure_can_set_filename(filename)
self._filename = create_full_filename(self.basename, filename)
if self._filename is None:
# We only got a filename (without directory) or a relative path
# from the user, so we append that to the default directory and
# try again.
self._filename = create_full_filename(
self.basename, os.path.join(download_dir(), filename))
# At this point, we have a misconfigured XDG_DOWNLOAD_DIR, as
# download_dir() + filename is still no absolute path.
# The config value is checked for "absoluteness", but
# ~/.config/user-dirs.dirs may be misconfigured and a non-absolute path
# may be set for XDG_DOWNLOAD_DIR
if self._filename is None:
message.error(
"XDG_DOWNLOAD_DIR points to a relative path - please check"
" your ~/.config/user-dirs.dirs. The download is saved in"
" your home directory.",
)
# fall back to $HOME as download_dir
self._filename = create_full_filename(self.basename,
os.path.expanduser('~'))
self.basename = os.path.basename(self._filename)
if remember_directory:
last_used_directory = os.path.dirname(self._filename)
log.downloads.debug("Setting filename to {}".format(filename))
if force_overwrite:
self._after_set_filename()
elif os.path.isfile(self._filename):
# The file already exists, so ask the user if it should be
# overwritten.
txt = "<b>{}</b> already exists. Overwrite?".format(
html.escape(self._filename))
self._ask_confirm_question("Overwrite existing file?", txt)
# FIFO, device node, etc. Make sure we want to do this
elif (os.path.exists(self._filename) and
not os.path.isdir(self._filename)):
txt = ("<b>{}</b> already exists and is a special file. Write to "
"it anyways?".format(html.escape(self._filename)))
self._ask_confirm_question("Overwrite special file?", txt)
else:
self._after_set_filename()
def _open_if_successful(self, cmdline):
"""Open the downloaded file, but only if it was successful.
Args:
cmdline: Passed to DownloadItem.open_file().
"""
if not self.successful:
log.downloads.debug("{} finished but not successful, not opening!"
.format(self))
return
self.open_file(cmdline)
def set_target(self, target):
"""Set the target for a given download.
Args:
target: The DownloadTarget for this download.
"""
if isinstance(target, FileObjDownloadTarget):
self._set_fileobj(target.fileobj, autoclose=False)
elif isinstance(target, FileDownloadTarget):
self._set_filename(target.filename)
elif isinstance(target, OpenFileDownloadTarget):
try:
fobj = temp_download_manager.get_tmpfile(self.basename)
except OSError as exc:
msg = "Download error: {}".format(exc)
message.error(msg)
self.cancel()
return
self.finished.connect(
functools.partial(self._open_if_successful, target.cmdline))
self._set_tempfile(fobj)
else: # pragma: no cover
raise ValueError("Unsupported download target: {}".format(target))
class AbstractDownloadManager(QObject):
"""Backend-independent download manager code.
Attributes:
downloads: A list of active DownloadItems.
_networkmanager: A NetworkManager for generic downloads.
Signals:
begin_remove_row: Emitted before downloads are removed.
end_remove_row: Emitted after downloads are removed.
begin_insert_row: Emitted before downloads are inserted.
end_insert_row: Emitted after downloads are inserted.
data_changed: Emitted when the data of the model changed.
The argument is the index of the changed download
"""
begin_remove_row = pyqtSignal(int)
end_remove_row = pyqtSignal()
begin_insert_row = pyqtSignal(int)
end_insert_row = pyqtSignal()
data_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.downloads = []
self._update_timer = usertypes.Timer(self, 'download-update')
self._update_timer.timeout.connect(self._update_gui)
self._update_timer.setInterval(_REFRESH_INTERVAL)
def __repr__(self):
return utils.get_repr(self, downloads=len(self.downloads))
@pyqtSlot()
def _update_gui(self):
"""Periodical GUI update of all items."""
assert self.downloads
for dl in self.downloads:
dl.stats.update_speed()
self.data_changed.emit(-1)
def _init_item(self, download, auto_remove, suggested_filename):
"""Initialize a newly created DownloadItem."""
download.cancelled.connect(download.remove)
download.remove_requested.connect(functools.partial(
self._remove_item, download))
delay = config.val.downloads.remove_finished
if delay > -1:
download.finished.connect(
lambda: QTimer.singleShot(delay, download.remove))
elif auto_remove:
download.finished.connect(download.remove)
download.data_changed.connect(
functools.partial(self._on_data_changed, download))
download.error.connect(self._on_error)
download.basename = suggested_filename
idx = len(self.downloads)
download.index = idx + 1 # "Human readable" index
self.begin_insert_row.emit(idx)
self.downloads.append(download)
self.end_insert_row.emit()
if not self._update_timer.isActive():
self._update_timer.start()
@pyqtSlot(AbstractDownloadItem)
def _on_data_changed(self, download):
"""Emit data_changed signal when download data changed."""
try:
idx = self.downloads.index(download)
except ValueError:
# download has been deleted in the meantime
return
self.data_changed.emit(idx)
@pyqtSlot(str)
def _on_error(self, msg):
"""Display error message on download errors."""
message.error("Download error: {}".format(msg))
@pyqtSlot(AbstractDownloadItem)
def _remove_item(self, download):
"""Remove a given download."""
if sip.isdeleted(self):
# https://github.com/qutebrowser/qutebrowser/issues/1242
return
try:
idx = self.downloads.index(download)
except ValueError:
# already removed
return
self.begin_remove_row.emit(idx)
del self.downloads[idx]
self.end_remove_row.emit()
download.deleteLater()
self._update_indexes()
if not self.downloads:
self._update_timer.stop()
log.downloads.debug("Removed download {}".format(download))
def _update_indexes(self):
"""Update indexes of all DownloadItems."""
for i, d in enumerate(self.downloads, 1):
d.index = i
self.data_changed.emit(-1)
def _init_filename_question(self, question, download):
"""Set up an existing filename question with a download."""
question.answered.connect(download.set_target)
question.cancelled.connect(download.cancel)
download.cancelled.connect(question.abort)
download.error.connect(question.abort)
class DownloadModel(QAbstractListModel):
"""A list model showing downloads."""
def __init__(self, qtnetwork_manager, webengine_manager=None, parent=None):
super().__init__(parent)
self._qtnetwork_manager = qtnetwork_manager
self._webengine_manager = webengine_manager
qtnetwork_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=False))
qtnetwork_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=False))
qtnetwork_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=False))
qtnetwork_manager.end_insert_row.connect(self.endInsertRows)
qtnetwork_manager.end_remove_row.connect(self.endRemoveRows)
if webengine_manager is not None:
webengine_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=True))
webengine_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=True))
webengine_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=True))
webengine_manager.end_insert_row.connect(self.endInsertRows)
webengine_manager.end_remove_row.connect(self.endRemoveRows)
def _all_downloads(self):
"""Combine downloads from both downloaders."""
if self._webengine_manager is None:
return self._qtnetwork_manager.downloads[:]
else:
return (self._qtnetwork_manager.downloads +
self._webengine_manager.downloads)
def __len__(self):
return len(self._all_downloads())
def __iter__(self):
return iter(self._all_downloads())
def __getitem__(self, idx):
return self._all_downloads()[idx]
def _on_begin_insert_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_insert_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginInsertRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginInsertRows(QModelIndex(), idx, idx)
def _on_begin_remove_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_remove_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginRemoveRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginRemoveRows(QModelIndex(), idx, idx)
def _on_data_changed(self, idx, *, webengine):
"""Called when a downloader's data changed.
Args:
start: The first changed index as int.
end: The last changed index as int, or -1 for all indices.
webengine: If given, the QtNetwork download length is added to the
index.
"""
if idx == -1:
start_index = self.index(0, 0)
end_index = self.last_index()
else:
if webengine:
idx += len(self._qtnetwork_manager.downloads)
start_index = self.index(idx, 0)
end_index = self.index(idx, 0)
qtutils.ensure_valid(start_index)
qtutils.ensure_valid(end_index)
self.dataChanged.emit(start_index, end_index)
def _raise_no_download(self, count):
"""Raise an exception that the download doesn't exist.
Args:
count: The index of the download
"""
if not count:
raise cmdexc.CommandError("There's no download!")
raise cmdexc.CommandError("There's no download {}!".format(count))
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_cancel(self, all_=False, count=0):
"""Cancel the last/[count]th download.
Args:
all_: Cancel all running downloads
count: The index of the download to cancel.
"""
downloads = self._all_downloads()
if all_:
for download in downloads:
if not download.done:
download.cancel()
else:
try:
download = downloads[count - 1]
except IndexError:
self._raise_no_download(count)
if download.done:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is already done!"
.format(count))
download.cancel()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_delete(self, count=0):
"""Delete the last/[count]th download from disk.
Args:
count: The index of the download to delete.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.delete()
download.remove()
log.downloads.debug("deleted download {}".format(download))
@cmdutils.register(instance='download-model', scope='window', maxsplit=0)
@cmdutils.argument('count', count=True)
def download_open(self, cmdline: str = None, count=0):
"""Open the last/[count]th download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
count: The index of the download to open.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.open_file(cmdline)
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_retry(self, count=0):
"""Retry the first failed/[count]th download.
Args:
count: The index of the download to retry.
"""
if count:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if download.successful or not download.done:
raise cmdexc.CommandError("Download {} did not fail!".format(
count))
else:
to_retry = [d for d in self if d.done and not d.successful]
if not to_retry:
raise cmdexc.CommandError("No failed downloads!")
else:
download = to_retry[0]
download.try_retry()
def can_clear(self):
"""Check if there are finished downloads to clear."""
return any(download.done for download in self)
@cmdutils.register(instance='download-model', scope='window')
def download_clear(self):
"""Remove all finished downloads from the list."""
for download in self:
if download.done:
download.remove()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_remove(self, all_=False, count=0):
"""Remove the last/[count]th download from the list.
Args:
all_: Remove all finished downloads.
count: The index of the download to remove.
"""
if all_:
self.download_clear()
else:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.done:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!"
.format(count))
download.remove()
def running_downloads(self):
"""Return the amount of still running downloads.
Return:
The number of unfinished downloads.
"""
return sum(1 for download in self if not download.done)
def last_index(self):
"""Get the last index in the model.
Return:
A (possibly invalid) QModelIndex.
"""
idx = self.index(self.rowCount() - 1)
return idx
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Simple constant header."""
if (section == 0 and orientation == Qt.Horizontal and
role == Qt.DisplayRole):
return "Downloads"
else:
return ""
def data(self, index, role):
"""Download data from DownloadManager."""
if not index.isValid():
return None
if index.parent().isValid() or index.column() != 0:
return None
item = self[index.row()]
if role == Qt.DisplayRole:
data = str(item)
elif role == Qt.ForegroundRole:
data = item.get_status_color('fg')
elif role == Qt.BackgroundRole:
data = item.get_status_color('bg')
elif role == ModelRole.item:
data = item
elif role == Qt.ToolTipRole:
if item.error_msg is None:
data = None
else:
return item.error_msg
else:
data = None
return data
def flags(self, index):
"""Override flags so items aren't selectable.
The default would be Qt.ItemIsEnabled | Qt.ItemIsSelectable.
"""
if not index.isValid():
return Qt.ItemFlags()
return Qt.ItemIsEnabled | Qt.ItemNeverHasChildren
def rowCount(self, parent=QModelIndex()):
"""Get count of active downloads."""
if parent.isValid():
# We don't have children
return 0
return len(self)
class TempDownloadManager:
"""Manager to handle temporary download files.
The downloads are downloaded to a temporary location and then openened with
the system standard application. The temporary files are deleted when
qutebrowser is shutdown.
Attributes:
files: A list of NamedTemporaryFiles of downloaded items.
"""
def __init__(self):
self.files = []
self._tmpdir = None
def cleanup(self):
"""Clean up any temporary files."""
if self._tmpdir is not None:
try:
self._tmpdir.cleanup()
except OSError:
log.misc.exception("Failed to clean up temporary download "
"directory")
self._tmpdir = None
def _get_tmpdir(self):
"""Return the temporary directory that is used for downloads.
The directory is created lazily on first access.
Return:
The tempfile.TemporaryDirectory that is used.
"""
if self._tmpdir is None:
self._tmpdir = tempfile.TemporaryDirectory(
prefix='qutebrowser-downloads-')
return self._tmpdir
def get_tmpfile(self, suggested_name):
"""Return a temporary file in the temporary downloads directory.
The files are kept as long as qutebrowser is running and automatically
cleaned up at program exit.
Args:
suggested_name: str of the "suggested"/original filename. Used as a
suffix, so any file extenions are preserved.
Return:
A tempfile.NamedTemporaryFile that should be used to save the file.
"""
tmpdir = self._get_tmpdir()
encoding = sys.getfilesystemencoding()
suggested_name = utils.force_encoding(suggested_name, encoding)
# Make sure that the filename is not too long
suggested_name = utils.elide_filename(suggested_name, 50)
fobj = tempfile.NamedTemporaryFile(dir=tmpdir.name, delete=False,
suffix=suggested_name)
self.files.append(fobj)
return fobj
temp_download_manager = TempDownloadManager()
| 1 | 19,409 | No need for those with `super-init-not-called`, as pylint already only turns things off for this function and it's needed for the entire function. | qutebrowser-qutebrowser | py |
@@ -35,6 +35,7 @@ public interface CapabilityType {
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
+ String APPLICATION_NAME = "applicationName";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String HAS_NATIVE_EVENTS = "nativeEvents"; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
String PLATFORM = "platform";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_NETWORK_CONNECTION = "networkConnectionEnabled";
String SUPPORTS_FINDING_BY_CSS = "cssSelectorsEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
/**
* @deprecated Use PAGE_LOAD_STRATEGY instead
*/
@Deprecated
String PAGE_LOADING_STRATEGY = "pageLoadingStrategy";
String PAGE_LOAD_STRATEGY = "pageLoadStrategy";
/**
* Moved InternetExplorer specific CapabilityTypes into InternetExplorerDriver.java for consistency
*/
@Deprecated
String ENABLE_PERSISTENT_HOVERING = "enablePersistentHover";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 13,114 | I think there's another spot for this in DefaultCapabilityMatcher | SeleniumHQ-selenium | py |
@@ -55,6 +55,15 @@ class ProposalDecorator < Draper::Decorator
"#{number_approved} of #{total_approvers} approved."
end
+ def table_waiting_text
+ actionable_step = currently_awaiting_steps.first
+ if actionable_step
+ actionable_step.decorate.waiting_text
+ else
+ I18n.t("decorators.steps.approval.status.waiting")
+ end
+ end
+
def step_text_for_user(key, user)
step = existing_approval_for(user)
klass = step.class.name.demodulize.downcase.to_sym | 1 | class ProposalDecorator < Draper::Decorator
delegate_all
def number_approved
object.individual_steps.approved.count
end
def total_approvers
object.individual_steps.count
end
def steps_by_status
# Override default scope
object.individual_steps.with_users.reorder(
# http://stackoverflow.com/a/6332081/358804
<<-SQL
CASE steps.status
WHEN 'approved' THEN 1
WHEN 'actionable' THEN 2
ELSE 3
END
SQL
)
end
def steps_in_list_order
if object.flow == 'linear'
object.individual_steps.with_users
else
self.steps_by_status
end
end
def display_status
if object.pending?
'pending approval'
else
object.status
end
end
def generate_status_message
if object.steps.non_pending.empty?
progress_status_message
else
completed_status_message
end
end
def completed_status_message
"All #{number_approved} of #{total_approvers} approvals have been received. Please move forward with the purchase of ##{object.public_id}."
end
def progress_status_message
"#{number_approved} of #{total_approvers} approved."
end
def step_text_for_user(key, user)
step = existing_approval_for(user)
klass = step.class.name.demodulize.downcase.to_sym
scope = [:decorators, :steps, klass]
I18n.t(key, scope: scope)
end
end
| 1 | 16,126 | this name seems pretty vague -- thoughts on a more descriptive method name? | 18F-C2 | rb |
@@ -1,4 +1,4 @@
-// <copyright file="MeterFactory.cs" company="OpenTelemetry Authors">
+// <copyright file="MeterFactory.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); | 1 | // <copyright file="MeterFactory.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Threading;
using OpenTelemetry.Metrics.Export;
namespace OpenTelemetry.Metrics.Configuration
{
public class MeterFactory : MeterFactoryBase
{
// TODO: make MeterFactory IDisposable to call Dispose on Exporter/Controller.
private readonly object lck = new object();
private readonly Dictionary<MeterRegistryKey, MeterSdk> meterRegistry = new Dictionary<MeterRegistryKey, MeterSdk>();
private readonly MetricProcessor metricProcessor;
private readonly MetricExporter metricExporter;
private readonly PushMetricController pushMetricController;
private readonly TimeSpan defaultPushInterval = TimeSpan.FromSeconds(60);
private MeterSdk defaultMeter;
private MeterFactory(MeterBuilder meterBuilder)
{
this.metricProcessor = meterBuilder.MetricProcessor ?? new NoOpMetricProcessor();
this.metricExporter = meterBuilder.MetricExporter ?? new NoOpMetricExporter();
// We only have PushMetricController now with only configurable thing being the push interval
this.pushMetricController = new PushMetricController(
this.meterRegistry,
this.metricProcessor,
this.metricExporter,
meterBuilder.MetricPushInterval == default(TimeSpan) ? this.defaultPushInterval : meterBuilder.MetricPushInterval,
new CancellationTokenSource());
this.defaultMeter = new MeterSdk(string.Empty, this.metricProcessor);
this.meterRegistry.Add(new MeterRegistryKey(string.Empty, null), this.defaultMeter);
}
public static MeterFactory Create(Action<MeterBuilder> configure)
{
if (configure == null)
{
throw new ArgumentNullException(nameof(configure));
}
var builder = new MeterBuilder();
configure(builder);
return new MeterFactory(builder);
}
public override Meter GetMeter(string name, string version = null)
{
if (string.IsNullOrEmpty(name))
{
return this.defaultMeter;
}
lock (this.lck)
{
var key = new MeterRegistryKey(name, version);
if (!this.meterRegistry.TryGetValue(key, out var meter))
{
meter = this.defaultMeter = new MeterSdk(name, this.metricProcessor);
this.meterRegistry.Add(key, meter);
}
return meter;
}
}
private static IEnumerable<KeyValuePair<string, string>> CreateLibraryResourceLabels(string name, string version)
{
var labels = new Dictionary<string, string> { { "name", name } };
if (!string.IsNullOrEmpty(version))
{
labels.Add("version", version);
}
return labels;
}
internal readonly struct MeterRegistryKey
{
private readonly string name;
private readonly string version;
internal MeterRegistryKey(string name, string version)
{
this.name = name;
this.version = version;
}
}
}
}
| 1 | 13,985 | what changed in this line? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -43,13 +43,16 @@ import { isZeroReport } from '../../modules/search-console/util/is-zero-report';
import sumObjectListValue from '../../util/sum-object-list-value';
const { useSelect } = Data;
+// reportArgs is declared in this higher scope so that it can be used by hasData.
+let reportArgs;
+
const AdminBarClicks = ( { className } ) => {
const url = useSelect( ( select ) => select( CORE_SITE ).getCurrentEntityURL() );
const { compareStartDate, endDate } = useSelect( ( select ) => select( CORE_USER ).getDateRangeDates( {
compare: true,
offsetDays: DATE_RANGE_OFFSET,
} ) );
- const reportArgs = {
+ reportArgs = {
startDate: compareStartDate,
endDate,
dimensions: 'date', | 1 | /**
* Admin Bar Clicks component.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import classnames from 'classnames';
import PropTypes from 'prop-types';
/**
* WordPress dependencies
*/
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import DataBlock from '../DataBlock';
import PreviewBlock from '../PreviewBlock';
import ReportError from '../ReportError';
import ReportZero from '../ReportZero';
import { STORE_NAME as CORE_USER } from '../../googlesitekit/datastore/user/constants';
import { STORE_NAME as CORE_SITE } from '../../googlesitekit/datastore/site/constants';
import { STORE_NAME as MODULES_SEARCH_CONSOLE, DATE_RANGE_OFFSET } from '../../modules/search-console/datastore/constants';
import { calculateChange } from '../../util';
import { isZeroReport } from '../../modules/search-console/util/is-zero-report';
import sumObjectListValue from '../../util/sum-object-list-value';
const { useSelect } = Data;
const AdminBarClicks = ( { className } ) => {
const url = useSelect( ( select ) => select( CORE_SITE ).getCurrentEntityURL() );
const { compareStartDate, endDate } = useSelect( ( select ) => select( CORE_USER ).getDateRangeDates( {
compare: true,
offsetDays: DATE_RANGE_OFFSET,
} ) );
const reportArgs = {
startDate: compareStartDate,
endDate,
dimensions: 'date',
url,
};
const searchConsoleData = useSelect( ( select ) => select( MODULES_SEARCH_CONSOLE ).getReport( reportArgs ) );
const hasFinishedResolution = useSelect( ( select ) => select( MODULES_SEARCH_CONSOLE ).hasFinishedResolution( 'getReport', [ reportArgs ] ) );
const error = useSelect( ( select ) => select( MODULES_SEARCH_CONSOLE ).getErrorForSelector( 'getReport', [ reportArgs ] ) );
if ( ! hasFinishedResolution ) {
return (
<div className={ classnames(
'mdc-layout-grid__cell',
className,
) }>
<PreviewBlock width="auto" height="59px" />
</div>
);
}
if ( error ) {
return <ReportError moduleSlug="search-console" error={ error } />;
}
if ( isZeroReport( searchConsoleData ) ) {
return <ReportZero moduleSlug="search-console" />;
}
// Split the data in two chunks.
const half = Math.floor( searchConsoleData.length / 2 );
const latestData = searchConsoleData.slice( half );
const olderData = searchConsoleData.slice( 0, half );
const totalClicks = sumObjectListValue( latestData, 'clicks' );
const totalOlderClicks = sumObjectListValue( olderData, 'clicks' );
const totalClicksChange = calculateChange( totalOlderClicks, totalClicks );
return (
<div className={ classnames(
'mdc-layout-grid__cell',
className,
) }>
<DataBlock
className="overview-total-clicks"
title={ __( 'Total Clicks', 'google-site-kit' ) }
datapoint={ totalClicks }
change={ totalClicksChange }
changeDataUnit="%"
/>
</div>
);
};
AdminBarClicks.propTypes = {
className: PropTypes.string,
};
AdminBarClicks.defaultProps = {
className: 'mdc-layout-grid__cell--span-2-tablet mdc-layout-grid__cell--span-3-desktop',
};
export default AdminBarClicks;
| 1 | 35,316 | Instead of doing this let's add another function similar to `hasZeroData` for selecting the `reportArgs` since this is all sourced from selected values. Then `hasZeroData` can use this internally, as well as the component itself. Since this function would be internal just for the purpose of avoiding duplication, we don't need to expose it on the component the same way though. | google-site-kit-wp | js |
@@ -38,7 +38,6 @@ class ExceptionListener
return $event->getException()->getMessage();
}
- /** @var \JavierEguiluz\Bundle\EasyAdminBundle\Exception\BaseException */
$exception = $event->getException();
$exceptionClassName = basename(str_replace('\\', '/', get_class($exception)));
| 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\Listener;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\BaseException;
use Symfony\Component\HttpKernel\Event\GetResponseForExceptionEvent;
class ExceptionListener
{
private $templating;
private $debug;
private $exceptionTemplates = array(
'ForbiddenActionException' => '@EasyAdmin/error/forbidden_action.html.twig',
'NoEntitiesConfigurationException' => '@EasyAdmin/error/no_entities.html.twig',
'UndefinedEntityException' => '@EasyAdmin/error/undefined_entity.html.twig',
'EntityNotFoundException' => '@EasyAdmin/error/entity_not_found.html.twig',
);
public function __construct($templating, $debug)
{
$this->templating = $templating;
$this->debug = $debug;
}
public function onKernelException(GetResponseForExceptionEvent $event)
{
// in 'dev' environment, don't override Symfony's exception pages
if (true === $this->debug) {
return $event->getException()->getMessage();
}
/** @var \JavierEguiluz\Bundle\EasyAdminBundle\Exception\BaseException */
$exception = $event->getException();
$exceptionClassName = basename(str_replace('\\', '/', get_class($exception)));
if (!$exception instanceof BaseException || !array_key_exists($exceptionClassName, $this->exceptionTemplates)) {
return;
}
$templatePath = $this->exceptionTemplates[$exceptionClassName];
$parameters = array_merge($exception->getParameters(), array('message' => $exception->getMessage()));
$response = $this->templating->renderResponse($templatePath, $parameters);
$event->setResponse($response);
}
}
| 1 | 9,156 | This line was useful for auto-completion, I think it should be re-added with specifying the var name (`$exception`) and simplifying the FQCN. | EasyCorp-EasyAdminBundle | php |
@@ -228,10 +228,16 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request, extr
outreq.Write(backendConn)
+ errCh := make(chan error, 1)
go func() {
- io.Copy(backendConn, conn) // write tcp stream to backend.
+ _, err := io.Copy(backendConn, conn) // write tcp stream to backend.
+ errCh <- err
}()
- io.Copy(conn, backendConn) // read tcp stream from backend.
+ go func() {
+ _, err := io.Copy(conn, backendConn) // read tcp stream from backend.
+ errCh <- err
+ }()
+ <-errCh
} else {
defer res.Body.Close()
for _, h := range hopHeaders { | 1 | // This file is adapted from code in the net/http/httputil
// package of the Go standard library, which is by the
// Go Authors, and bears this copyright and license info:
//
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// This file has been modified from the standard lib to
// meet the needs of the application.
package proxy
import (
"crypto/tls"
"io"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
)
// onExitFlushLoop is a callback set by tests to detect the state of the
// flushLoop() goroutine.
var onExitFlushLoop func()
// ReverseProxy is an HTTP Handler that takes an incoming request and
// sends it to another server, proxying the response back to the
// client.
type ReverseProxy struct {
// Director must be a function which modifies
// the request into a new request to be sent
// using Transport. Its response is then copied
// back to the original client unmodified.
Director func(*http.Request)
// The transport used to perform proxy requests.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
// FlushInterval specifies the flush interval
// to flush to the client while copying the
// response body.
// If zero, no periodic flushing is done.
FlushInterval time.Duration
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
// Though the relevant directive prefix is just "unix:", url.Parse
// will - assuming the regular URL scheme - add additional slashes
// as if "unix" was a request protocol.
// What we need is just the path, so if "unix:/var/run/www.socket"
// was the proxy directive, the parsed hostName would be
// "unix:///var/run/www.socket", hence the ambiguous trimming.
func socketDial(hostName string) func(network, addr string) (conn net.Conn, err error) {
return func(network, addr string) (conn net.Conn, err error) {
return net.Dial("unix", hostName[len("unix://"):])
}
}
// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites
// URLs to the scheme, host, and base path provided in target. If the
// target's path is "/base" and the incoming request was for "/dir",
// the target request will be for /base/dir.
// Without logic: target's path is "/", incoming is "/api/messages",
// without is "/api", then the target request will be for /messages.
func NewSingleHostReverseProxy(target *url.URL, without string) *ReverseProxy {
targetQuery := target.RawQuery
director := func(req *http.Request) {
if target.Scheme == "unix" {
// to make Dial work with unix URL,
// scheme and host have to be faked
req.URL.Scheme = "http"
req.URL.Host = "socket"
} else {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
}
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
if targetQuery == "" || req.URL.RawQuery == "" {
req.URL.RawQuery = targetQuery + req.URL.RawQuery
} else {
req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
}
// Trims the path of the socket from the URL path.
// This is done because req.URL passed to your proxied service
// will have the full path of the socket file prefixed to it.
// Calling /test on a server that proxies requests to
// unix:/var/run/www.socket will thus set the requested path
// to /var/run/www.socket/test, rendering paths useless.
if target.Scheme == "unix" {
// See comment on socketDial for the trim
socketPrefix := target.String()[len("unix://"):]
req.URL.Path = strings.TrimPrefix(req.URL.Path, socketPrefix)
}
// We are then safe to remove the `without` prefix.
if without != "" {
req.URL.Path = strings.TrimPrefix(req.URL.Path, without)
}
}
rp := &ReverseProxy{Director: director, FlushInterval: 250 * time.Millisecond} // flushing good for streaming & server-sent events
if target.Scheme == "unix" {
rp.Transport = &http.Transport{
Dial: socketDial(target.String()),
}
}
return rp
}
func copyHeader(dst, src http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}
// Hop-by-hop headers. These are removed when sent to the backend.
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
var hopHeaders = []string{
"Connection",
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"Te", // canonicalized version of "TE"
"Trailers",
"Transfer-Encoding",
"Upgrade",
}
// InsecureTransport is used to facilitate HTTPS proxying
// when it is OK for upstream to be using a bad certificate,
// since this transport skips verification.
var InsecureTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request, extraHeaders http.Header) error {
transport := p.Transport
if transport == nil {
transport = http.DefaultTransport
}
outreq := new(http.Request)
*outreq = *req // includes shallow copies of maps, but okay
p.Director(outreq)
outreq.Proto = "HTTP/1.1"
outreq.ProtoMajor = 1
outreq.ProtoMinor = 1
outreq.Close = false
// Remove hop-by-hop headers to the backend. Especially
// important is "Connection" because we want a persistent
// connection, regardless of what the client sent to us. This
// is modifying the same underlying map from req (shallow
// copied above) so we only copy it if necessary.
copiedHeaders := false
for _, h := range hopHeaders {
if outreq.Header.Get(h) != "" {
if !copiedHeaders {
outreq.Header = make(http.Header)
copyHeader(outreq.Header, req.Header)
copiedHeaders = true
}
outreq.Header.Del(h)
}
}
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
// If we aren't the first proxy retain prior
// X-Forwarded-For information as a comma+space
// separated list and fold multiple headers into one.
if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
outreq.Header.Set("X-Forwarded-For", clientIP)
}
if extraHeaders != nil {
for k, v := range extraHeaders {
outreq.Header[k] = v
}
}
res, err := transport.RoundTrip(outreq)
if err != nil {
return err
}
if res.StatusCode == http.StatusSwitchingProtocols && strings.ToLower(res.Header.Get("Upgrade")) == "websocket" {
res.Body.Close()
hj, ok := rw.(http.Hijacker)
if !ok {
return nil
}
conn, _, err := hj.Hijack()
if err != nil {
return err
}
defer conn.Close()
backendConn, err := net.Dial("tcp", outreq.URL.Host)
if err != nil {
return err
}
defer backendConn.Close()
outreq.Write(backendConn)
go func() {
io.Copy(backendConn, conn) // write tcp stream to backend.
}()
io.Copy(conn, backendConn) // read tcp stream from backend.
} else {
defer res.Body.Close()
for _, h := range hopHeaders {
res.Header.Del(h)
}
copyHeader(rw.Header(), res.Header)
rw.WriteHeader(res.StatusCode)
p.copyResponse(rw, res.Body)
}
return nil
}
func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
if p.FlushInterval != 0 {
if wf, ok := dst.(writeFlusher); ok {
mlw := &maxLatencyWriter{
dst: wf,
latency: p.FlushInterval,
done: make(chan bool),
}
go mlw.flushLoop()
defer mlw.stop()
dst = mlw
}
}
io.Copy(dst, src)
}
type writeFlusher interface {
io.Writer
http.Flusher
}
type maxLatencyWriter struct {
dst writeFlusher
latency time.Duration
lk sync.Mutex // protects Write + Flush
done chan bool
}
func (m *maxLatencyWriter) Write(p []byte) (int, error) {
m.lk.Lock()
defer m.lk.Unlock()
return m.dst.Write(p)
}
func (m *maxLatencyWriter) flushLoop() {
t := time.NewTicker(m.latency)
defer t.Stop()
for {
select {
case <-m.done:
if onExitFlushLoop != nil {
onExitFlushLoop()
}
return
case <-t.C:
m.lk.Lock()
m.dst.Flush()
m.lk.Unlock()
}
}
}
func (m *maxLatencyWriter) stop() { m.done <- true }
| 1 | 7,983 | This looks like you're not collecting everything from the error channel. | caddyserver-caddy | go |
@@ -1,10 +1,10 @@
class CheckoutMailer < BaseMailer
def receipt(checkout)
- @checkout = checkout
+ @plan = checkout.plan
mail(
- to: @checkout.user_email,
- subject: "Your receipt for #{@checkout.plan_name}"
+ to: checkout.user_email,
+ subject: "Your receipt for #{@plan.name}"
)
end
end | 1 | class CheckoutMailer < BaseMailer
def receipt(checkout)
@checkout = checkout
mail(
to: @checkout.user_email,
subject: "Your receipt for #{@checkout.plan_name}"
)
end
end
| 1 | 14,023 | This currently violates the Law of Demeter. Using `checkout.plan_name` is the quick resolution. | thoughtbot-upcase | rb |
@@ -77,7 +77,7 @@ class Image implements EntityFileUploadInterface
* @param string $entityName
* @param int $entityId
* @param string|null $type
- * @param string $temporaryFilename
+ * @param string|null $temporaryFilename
*/
public function __construct($entityName, $entityId, $type, $temporaryFilename)
{ | 1 | <?php
namespace Shopsys\FrameworkBundle\Component\Image;
use DateTime;
use Doctrine\ORM\Mapping as ORM;
use Shopsys\FrameworkBundle\Component\FileUpload\EntityFileUploadInterface;
use Shopsys\FrameworkBundle\Component\FileUpload\FileForUpload;
use Shopsys\FrameworkBundle\Component\FileUpload\FileNamingConvention;
use Shopsys\FrameworkBundle\Component\Image\Config\ImageConfig;
/**
* @ORM\Table(name="images", indexes={@ORM\Index(columns={"entity_name", "entity_id", "type"})})
* @ORM\Entity
*/
class Image implements EntityFileUploadInterface
{
const UPLOAD_KEY = 'image';
/**
* @var int
*
* @ORM\Column(type="integer")
* @ORM\Id
* @ORM\GeneratedValue(strategy="IDENTITY")
*/
protected $id;
/**
* @var string
*
* @ORM\Column(type="string", length=100)
*/
protected $entityName;
/**
* @var int
*
* @ORM\Column(type="integer")
*/
protected $entityId;
/**
* @var string
*
* @ORM\Column(type="string", length=100, nullable=true)
*/
protected $type;
/**
* @var string
*
* @ORM\Column(type="string", length=5)
*/
protected $extension;
/**
* @var int
*
* @ORM\Column(type="integer", nullable=true)
*/
protected $position;
/**
* @var \Datetime
*
* @ORM\Column(type="datetime")
*/
protected $modifiedAt;
/**
* @var string|null
*/
protected $temporaryFilename;
/**
* @param string $entityName
* @param int $entityId
* @param string|null $type
* @param string $temporaryFilename
*/
public function __construct($entityName, $entityId, $type, $temporaryFilename)
{
$this->entityName = $entityName;
$this->entityId = $entityId;
$this->type = $type;
$this->setTemporaryFilename($temporaryFilename);
}
/**
* @return \Shopsys\FrameworkBundle\Component\FileUpload\FileForUpload[]
*/
public function getTemporaryFilesForUpload()
{
$files = [];
if ($this->temporaryFilename !== null) {
$files[self::UPLOAD_KEY] = new FileForUpload(
$this->temporaryFilename,
true,
$this->entityName,
$this->type . '/' . ImageConfig::ORIGINAL_SIZE_NAME,
FileNamingConvention::TYPE_ID
);
}
return $files;
}
/**
* @param string $key
* @param string $originalFilename
*/
public function setFileAsUploaded($key, $originalFilename)
{
if ($key === self::UPLOAD_KEY) {
$this->extension = pathinfo($originalFilename, PATHINFO_EXTENSION);
} else {
throw new \Shopsys\FrameworkBundle\Component\FileUpload\Exception\InvalidFileKeyException($key);
}
}
/**
* @param string|null $temporaryFilename
*/
public function setTemporaryFilename($temporaryFilename)
{
$this->temporaryFilename = $temporaryFilename;
// workaround: Entity must be changed so that preUpdate and postUpdate are called
$this->modifiedAt = new DateTime();
}
/**
* @param int $position
*/
public function setPosition($position)
{
$this->position = $position;
}
/**
* @return string
*/
public function getFilename()
{
return $this->id . '.' . $this->extension;
}
/**
* @return int
*/
public function getId()
{
return $this->id;
}
/**
* @return string
*/
public function getEntityName()
{
return $this->entityName;
}
/**
* @return int
*/
public function getEntityId()
{
return $this->entityId;
}
/**
* @return string|null
*/
public function getType()
{
return $this->type;
}
/**
* @return string
*/
public function getExtension()
{
return $this->extension;
}
/**
* @return \DateTime
*/
public function getModifiedAt()
{
return $this->modifiedAt;
}
}
| 1 | 9,925 | is there any scenario when `$temporaryFilename` can be null? | shopsys-shopsys | php |
@@ -171,6 +171,7 @@ class FlinkTypeToType extends FlinkTypeVisitor<Type> {
}
@Override
+ @SuppressWarnings("ReferenceEquality")
public Type visit(RowType rowType) {
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(rowType.getFieldCount());
boolean isRoot = root == rowType; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.flink.table.types.logical.ArrayType;
import org.apache.flink.table.types.logical.BigIntType;
import org.apache.flink.table.types.logical.BinaryType;
import org.apache.flink.table.types.logical.BooleanType;
import org.apache.flink.table.types.logical.CharType;
import org.apache.flink.table.types.logical.DateType;
import org.apache.flink.table.types.logical.DecimalType;
import org.apache.flink.table.types.logical.DoubleType;
import org.apache.flink.table.types.logical.FloatType;
import org.apache.flink.table.types.logical.IntType;
import org.apache.flink.table.types.logical.LocalZonedTimestampType;
import org.apache.flink.table.types.logical.MapType;
import org.apache.flink.table.types.logical.MultisetType;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.types.logical.SmallIntType;
import org.apache.flink.table.types.logical.TimeType;
import org.apache.flink.table.types.logical.TimestampType;
import org.apache.flink.table.types.logical.TinyIntType;
import org.apache.flink.table.types.logical.VarBinaryType;
import org.apache.flink.table.types.logical.VarCharType;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
class FlinkTypeToType extends FlinkTypeVisitor<Type> {
private final RowType root;
private int nextId;
FlinkTypeToType(RowType root) {
this.root = root;
// the root struct's fields use the first ids
this.nextId = root.getFieldCount();
}
private int getNextId() {
int next = nextId;
nextId += 1;
return next;
}
@Override
public Type visit(CharType charType) {
return Types.StringType.get();
}
@Override
public Type visit(VarCharType varCharType) {
return Types.StringType.get();
}
@Override
public Type visit(BooleanType booleanType) {
return Types.BooleanType.get();
}
@Override
public Type visit(BinaryType binaryType) {
return Types.FixedType.ofLength(binaryType.getLength());
}
@Override
public Type visit(VarBinaryType varBinaryType) {
return Types.BinaryType.get();
}
@Override
public Type visit(DecimalType decimalType) {
return Types.DecimalType.of(decimalType.getPrecision(), decimalType.getScale());
}
@Override
public Type visit(TinyIntType tinyIntType) {
return Types.IntegerType.get();
}
@Override
public Type visit(SmallIntType smallIntType) {
return Types.IntegerType.get();
}
@Override
public Type visit(IntType intType) {
return Types.IntegerType.get();
}
@Override
public Type visit(BigIntType bigIntType) {
return Types.LongType.get();
}
@Override
public Type visit(FloatType floatType) {
return Types.FloatType.get();
}
@Override
public Type visit(DoubleType doubleType) {
return Types.DoubleType.get();
}
@Override
public Type visit(DateType dateType) {
return Types.DateType.get();
}
@Override
public Type visit(TimeType timeType) {
return Types.TimeType.get();
}
@Override
public Type visit(TimestampType timestampType) {
return Types.TimestampType.withoutZone();
}
@Override
public Type visit(LocalZonedTimestampType localZonedTimestampType) {
return Types.TimestampType.withZone();
}
@Override
public Type visit(ArrayType arrayType) {
Type elementType = arrayType.getElementType().accept(this);
if (arrayType.getElementType().isNullable()) {
return Types.ListType.ofOptional(getNextId(), elementType);
} else {
return Types.ListType.ofRequired(getNextId(), elementType);
}
}
@Override
public Type visit(MultisetType multisetType) {
Type elementType = multisetType.getElementType().accept(this);
return Types.MapType.ofRequired(getNextId(), getNextId(), elementType, Types.IntegerType.get());
}
@Override
public Type visit(MapType mapType) {
// keys in map are not allowed to be null.
Type keyType = mapType.getKeyType().accept(this);
Type valueType = mapType.getValueType().accept(this);
if (mapType.getValueType().isNullable()) {
return Types.MapType.ofOptional(getNextId(), getNextId(), keyType, valueType);
} else {
return Types.MapType.ofRequired(getNextId(), getNextId(), keyType, valueType);
}
}
@Override
public Type visit(RowType rowType) {
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(rowType.getFieldCount());
boolean isRoot = root == rowType;
List<Type> types = rowType.getFields().stream()
.map(f -> f.getType().accept(this))
.collect(Collectors.toList());
for (int i = 0; i < rowType.getFieldCount(); i++) {
int id = isRoot ? i : getNextId();
RowType.RowField field = rowType.getFields().get(i);
String name = field.getName();
String comment = field.getDescription().orElse(null);
if (field.getType().isNullable()) {
newFields.add(Types.NestedField.optional(id, name, types.get(i), comment));
} else {
newFields.add(Types.NestedField.required(id, name, types.get(i), comment));
}
}
return Types.StructType.of(newFields);
}
}
| 1 | 38,578 | this is for the `boolean isRoot = root == rowType` check, which seems to be on purpose, but maybe you could double check whether using ref. equality here is still wanted? Same for `SparkTypeToType` | apache-iceberg | java |
@@ -260,6 +260,7 @@ static void roots_drag_icon_handle_surface_commit(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, surface_commit);
+ roots_drag_icon_update_position(icon);
roots_drag_icon_damage_whole(icon);
}
| 1 | #define _POSIX_C_SOURCE 199309L
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <wayland-server.h>
#include <wlr/config.h>
#include <wlr/types/wlr_idle.h>
#include <wlr/types/wlr_layer_shell.h>
#include <wlr/types/wlr_xcursor_manager.h>
#include <wlr/util/log.h>
#include "rootston/cursor.h"
#include "rootston/input.h"
#include "rootston/keyboard.h"
#include "rootston/seat.h"
#include "rootston/xcursor.h"
static void handle_keyboard_key(struct wl_listener *listener, void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, keyboard_key);
struct roots_desktop *desktop = keyboard->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, keyboard->seat->seat);
struct wlr_event_keyboard_key *event = data;
roots_keyboard_handle_key(keyboard, event);
}
static void handle_keyboard_modifiers(struct wl_listener *listener,
void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, keyboard_modifiers);
struct roots_desktop *desktop = keyboard->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, keyboard->seat->seat);
roots_keyboard_handle_modifiers(keyboard);
}
static void handle_cursor_motion(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, motion);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_motion *event = data;
roots_cursor_handle_motion(cursor, event);
}
static void handle_cursor_motion_absolute(struct wl_listener *listener,
void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, motion_absolute);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_motion_absolute *event = data;
roots_cursor_handle_motion_absolute(cursor, event);
}
static void handle_cursor_button(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, button);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_button *event = data;
roots_cursor_handle_button(cursor, event);
}
static void handle_cursor_axis(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, axis);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_axis *event = data;
roots_cursor_handle_axis(cursor, event);
}
static void handle_touch_down(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_down);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_down *event = data;
roots_cursor_handle_touch_down(cursor, event);
}
static void handle_touch_up(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_up);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_up *event = data;
roots_cursor_handle_touch_up(cursor, event);
}
static void handle_touch_motion(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_motion);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_motion *event = data;
roots_cursor_handle_touch_motion(cursor, event);
}
static void handle_tool_axis(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, tool_axis);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_tablet_tool_axis *event = data;
roots_cursor_handle_tool_axis(cursor, event);
}
static void handle_tool_tip(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, tool_tip);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_tablet_tool_tip *event = data;
roots_cursor_handle_tool_tip(cursor, event);
}
static void handle_request_set_cursor(struct wl_listener *listener,
void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, request_set_cursor);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_seat_pointer_request_set_cursor_event *event = data;
roots_cursor_handle_request_set_cursor(cursor, event);
}
static void seat_reset_device_mappings(struct roots_seat *seat,
struct wlr_input_device *device) {
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_config *config = seat->input->config;
wlr_cursor_map_input_to_output(cursor, device, NULL);
struct roots_device_config *dconfig;
if ((dconfig = roots_config_get_device(config, device))) {
wlr_cursor_map_input_to_region(cursor, device, dconfig->mapped_box);
}
}
static void seat_set_device_output_mappings(struct roots_seat *seat,
struct wlr_input_device *device, struct wlr_output *output) {
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_config *config = seat->input->config;
struct roots_device_config *dconfig =
roots_config_get_device(config, device);
const char *mapped_output = NULL;
if (dconfig != NULL) {
mapped_output = dconfig->mapped_output;
}
if (mapped_output == NULL) {
mapped_output = device->output_name;
}
if (mapped_output && strcmp(mapped_output, output->name) == 0) {
wlr_cursor_map_input_to_output(cursor, device, output);
}
}
void roots_seat_configure_cursor(struct roots_seat *seat) {
struct roots_config *config = seat->input->config;
struct roots_desktop *desktop = seat->input->server->desktop;
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_pointer *pointer;
struct roots_touch *touch;
struct roots_tablet_tool *tablet_tool;
struct roots_output *output;
// reset mappings
wlr_cursor_map_to_output(cursor, NULL);
wl_list_for_each(pointer, &seat->pointers, link) {
seat_reset_device_mappings(seat, pointer->device);
}
wl_list_for_each(touch, &seat->touch, link) {
seat_reset_device_mappings(seat, touch->device);
}
wl_list_for_each(tablet_tool, &seat->tablet_tools, link) {
seat_reset_device_mappings(seat, tablet_tool->device);
}
// configure device to output mappings
const char *mapped_output = NULL;
struct roots_cursor_config *cc =
roots_config_get_cursor(config, seat->seat->name);
if (cc != NULL) {
mapped_output = cc->mapped_output;
}
wl_list_for_each(output, &desktop->outputs, link) {
if (mapped_output &&
strcmp(mapped_output, output->wlr_output->name) == 0) {
wlr_cursor_map_to_output(cursor, output->wlr_output);
}
wl_list_for_each(pointer, &seat->pointers, link) {
seat_set_device_output_mappings(seat, pointer->device,
output->wlr_output);
}
wl_list_for_each(tablet_tool, &seat->tablet_tools, link) {
seat_set_device_output_mappings(seat, tablet_tool->device,
output->wlr_output);
}
wl_list_for_each(touch, &seat->touch, link) {
seat_set_device_output_mappings(seat, touch->device,
output->wlr_output);
}
}
}
static void roots_seat_init_cursor(struct roots_seat *seat) {
seat->cursor = roots_cursor_create(seat);
if (!seat->cursor) {
return;
}
seat->cursor->seat = seat;
struct wlr_cursor *wlr_cursor = seat->cursor->cursor;
struct roots_desktop *desktop = seat->input->server->desktop;
wlr_cursor_attach_output_layout(wlr_cursor, desktop->layout);
roots_seat_configure_cursor(seat);
roots_seat_configure_xcursor(seat);
// add input signals
wl_signal_add(&wlr_cursor->events.motion, &seat->cursor->motion);
seat->cursor->motion.notify = handle_cursor_motion;
wl_signal_add(&wlr_cursor->events.motion_absolute,
&seat->cursor->motion_absolute);
seat->cursor->motion_absolute.notify = handle_cursor_motion_absolute;
wl_signal_add(&wlr_cursor->events.button, &seat->cursor->button);
seat->cursor->button.notify = handle_cursor_button;
wl_signal_add(&wlr_cursor->events.axis, &seat->cursor->axis);
seat->cursor->axis.notify = handle_cursor_axis;
wl_signal_add(&wlr_cursor->events.touch_down, &seat->cursor->touch_down);
seat->cursor->touch_down.notify = handle_touch_down;
wl_signal_add(&wlr_cursor->events.touch_up, &seat->cursor->touch_up);
seat->cursor->touch_up.notify = handle_touch_up;
wl_signal_add(&wlr_cursor->events.touch_motion,
&seat->cursor->touch_motion);
seat->cursor->touch_motion.notify = handle_touch_motion;
wl_signal_add(&wlr_cursor->events.tablet_tool_axis,
&seat->cursor->tool_axis);
seat->cursor->tool_axis.notify = handle_tool_axis;
wl_signal_add(&wlr_cursor->events.tablet_tool_tip, &seat->cursor->tool_tip);
seat->cursor->tool_tip.notify = handle_tool_tip;
wl_signal_add(&seat->seat->events.request_set_cursor,
&seat->cursor->request_set_cursor);
seat->cursor->request_set_cursor.notify = handle_request_set_cursor;
}
static void roots_drag_icon_handle_surface_commit(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, surface_commit);
roots_drag_icon_damage_whole(icon);
}
static void roots_drag_icon_handle_map(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, map);
roots_drag_icon_damage_whole(icon);
}
static void roots_drag_icon_handle_destroy(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, destroy);
roots_drag_icon_damage_whole(icon);
wl_list_remove(&icon->link);
wl_list_remove(&icon->surface_commit.link);
wl_list_remove(&icon->map.link);
wl_list_remove(&icon->destroy.link);
free(icon);
}
static void roots_seat_handle_new_drag_icon(struct wl_listener *listener,
void *data) {
struct roots_seat *seat = wl_container_of(listener, seat, new_drag_icon);
struct wlr_drag_icon *wlr_drag_icon = data;
struct roots_drag_icon *icon = calloc(1, sizeof(struct roots_drag_icon));
if (icon == NULL) {
return;
}
icon->seat = seat;
icon->wlr_drag_icon = wlr_drag_icon;
icon->surface_commit.notify = roots_drag_icon_handle_surface_commit;
wl_signal_add(&wlr_drag_icon->surface->events.commit, &icon->surface_commit);
icon->map.notify = roots_drag_icon_handle_map;
wl_signal_add(&wlr_drag_icon->events.map, &icon->map);
icon->destroy.notify = roots_drag_icon_handle_destroy;
wl_signal_add(&wlr_drag_icon->events.destroy, &icon->destroy);
wl_list_insert(&seat->drag_icons, &icon->link);
}
void roots_drag_icon_update_position(struct roots_drag_icon *icon) {
roots_drag_icon_damage_whole(icon);
struct wlr_drag_icon *wlr_icon = icon->wlr_drag_icon;
struct roots_seat *seat = icon->seat;
struct wlr_cursor *cursor = seat->cursor->cursor;
if (wlr_icon->is_pointer) {
icon->x = cursor->x + wlr_icon->sx;
icon->y = cursor->y + wlr_icon->sy;
} else {
struct wlr_touch_point *point =
wlr_seat_touch_get_point(seat->seat, wlr_icon->touch_id);
if (point == NULL) {
return;
}
icon->x = seat->touch_x + wlr_icon->sx;
icon->y = seat->touch_y + wlr_icon->sy;
}
roots_drag_icon_damage_whole(icon);
}
void roots_drag_icon_damage_whole(struct roots_drag_icon *icon) {
struct roots_output *output;
wl_list_for_each(output, &icon->seat->input->server->desktop->outputs,
link) {
output_damage_whole_drag_icon(output, icon);
}
}
static void seat_view_destroy(struct roots_seat_view *seat_view);
static void roots_seat_handle_destroy(struct wl_listener *listener,
void *data) {
struct roots_seat *seat = wl_container_of(listener, seat, destroy);
// TODO: probably more to be freed here
wl_list_remove(&seat->destroy.link);
struct roots_seat_view *view, *nview;
wl_list_for_each_safe(view, nview, &seat->views, link) {
seat_view_destroy(view);
}
}
void roots_seat_destroy(struct roots_seat *seat) {
roots_seat_handle_destroy(&seat->destroy, seat->seat);
wlr_seat_destroy(seat->seat);
}
struct roots_seat *roots_seat_create(struct roots_input *input, char *name) {
struct roots_seat *seat = calloc(1, sizeof(struct roots_seat));
if (!seat) {
return NULL;
}
wl_list_init(&seat->keyboards);
wl_list_init(&seat->pointers);
wl_list_init(&seat->touch);
wl_list_init(&seat->tablet_tools);
wl_list_init(&seat->views);
wl_list_init(&seat->drag_icons);
seat->input = input;
seat->seat = wlr_seat_create(input->server->wl_display, name);
if (!seat->seat) {
free(seat);
return NULL;
}
roots_seat_init_cursor(seat);
if (!seat->cursor) {
wlr_seat_destroy(seat->seat);
free(seat);
return NULL;
}
wl_list_insert(&input->seats, &seat->link);
seat->new_drag_icon.notify = roots_seat_handle_new_drag_icon;
wl_signal_add(&seat->seat->events.new_drag_icon, &seat->new_drag_icon);
seat->destroy.notify = roots_seat_handle_destroy;
wl_signal_add(&seat->seat->events.destroy, &seat->destroy);
return seat;
}
static void seat_update_capabilities(struct roots_seat *seat) {
uint32_t caps = 0;
if (!wl_list_empty(&seat->keyboards)) {
caps |= WL_SEAT_CAPABILITY_KEYBOARD;
}
if (!wl_list_empty(&seat->pointers) || !wl_list_empty(&seat->tablet_tools)) {
caps |= WL_SEAT_CAPABILITY_POINTER;
}
if (!wl_list_empty(&seat->touch)) {
caps |= WL_SEAT_CAPABILITY_TOUCH;
}
wlr_seat_set_capabilities(seat->seat, caps);
// Hide cursor if seat doesn't have pointer capability
if ((caps & WL_SEAT_CAPABILITY_POINTER) == 0) {
wlr_cursor_set_image(seat->cursor->cursor, NULL, 0, 0, 0, 0, 0, 0);
} else {
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
seat->cursor->default_xcursor, seat->cursor->cursor);
}
}
static void handle_keyboard_destroy(struct wl_listener *listener, void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, device_destroy);
struct roots_seat *seat = keyboard->seat;
wl_list_remove(&keyboard->device_destroy.link);
wl_list_remove(&keyboard->keyboard_key.link);
wl_list_remove(&keyboard->keyboard_modifiers.link);
roots_keyboard_destroy(keyboard);
seat_update_capabilities(seat);
}
static void seat_add_keyboard(struct roots_seat *seat,
struct wlr_input_device *device) {
assert(device->type == WLR_INPUT_DEVICE_KEYBOARD);
struct roots_keyboard *keyboard =
roots_keyboard_create(device, seat->input);
if (keyboard == NULL) {
wlr_log(L_ERROR, "could not allocate keyboard for seat");
return;
}
keyboard->seat = seat;
wl_list_insert(&seat->keyboards, &keyboard->link);
keyboard->device_destroy.notify = handle_keyboard_destroy;
wl_signal_add(&keyboard->device->events.destroy, &keyboard->device_destroy);
keyboard->keyboard_key.notify = handle_keyboard_key;
wl_signal_add(&keyboard->device->keyboard->events.key,
&keyboard->keyboard_key);
keyboard->keyboard_modifiers.notify = handle_keyboard_modifiers;
wl_signal_add(&keyboard->device->keyboard->events.modifiers,
&keyboard->keyboard_modifiers);
wlr_seat_set_keyboard(seat->seat, device);
}
static void handle_pointer_destroy(struct wl_listener *listener, void *data) {
struct roots_pointer *pointer =
wl_container_of(listener, pointer, device_destroy);
struct roots_seat *seat = pointer->seat;
wl_list_remove(&pointer->link);
wlr_cursor_detach_input_device(seat->cursor->cursor, pointer->device);
wl_list_remove(&pointer->device_destroy.link);
free(pointer);
seat_update_capabilities(seat);
}
static void seat_add_pointer(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_pointer *pointer = calloc(sizeof(struct roots_pointer), 1);
if (!pointer) {
wlr_log(L_ERROR, "could not allocate pointer for seat");
return;
}
device->data = pointer;
pointer->device = device;
pointer->seat = seat;
wl_list_insert(&seat->pointers, &pointer->link);
pointer->device_destroy.notify = handle_pointer_destroy;
wl_signal_add(&pointer->device->events.destroy, &pointer->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
static void handle_touch_destroy(struct wl_listener *listener, void *data) {
struct roots_pointer *touch =
wl_container_of(listener, touch, device_destroy);
struct roots_seat *seat = touch->seat;
wl_list_remove(&touch->link);
wlr_cursor_detach_input_device(seat->cursor->cursor, touch->device);
wl_list_remove(&touch->device_destroy.link);
free(touch);
seat_update_capabilities(seat);
}
static void seat_add_touch(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_touch *touch = calloc(sizeof(struct roots_touch), 1);
if (!touch) {
wlr_log(L_ERROR, "could not allocate touch for seat");
return;
}
device->data = touch;
touch->device = device;
touch->seat = seat;
wl_list_insert(&seat->touch, &touch->link);
touch->device_destroy.notify = handle_touch_destroy;
wl_signal_add(&touch->device->events.destroy, &touch->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
static void seat_add_tablet_pad(struct roots_seat *seat,
struct wlr_input_device *device) {
// TODO
}
static void handle_tablet_tool_destroy(struct wl_listener *listener,
void *data) {
struct roots_tablet_tool *tablet_tool =
wl_container_of(listener, tablet_tool, device_destroy);
struct roots_seat *seat = tablet_tool->seat;
wlr_cursor_detach_input_device(seat->cursor->cursor, tablet_tool->device);
wl_list_remove(&tablet_tool->device_destroy.link);
wl_list_remove(&tablet_tool->link);
free(tablet_tool);
seat_update_capabilities(seat);
}
static void seat_add_tablet_tool(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_tablet_tool *tablet_tool =
calloc(sizeof(struct roots_tablet_tool), 1);
if (!tablet_tool) {
wlr_log(L_ERROR, "could not allocate tablet_tool for seat");
return;
}
device->data = tablet_tool;
tablet_tool->device = device;
tablet_tool->seat = seat;
wl_list_insert(&seat->tablet_tools, &tablet_tool->link);
tablet_tool->device_destroy.notify = handle_tablet_tool_destroy;
wl_signal_add(&tablet_tool->device->events.destroy,
&tablet_tool->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
void roots_seat_add_device(struct roots_seat *seat,
struct wlr_input_device *device) {
switch (device->type) {
case WLR_INPUT_DEVICE_KEYBOARD:
seat_add_keyboard(seat, device);
break;
case WLR_INPUT_DEVICE_POINTER:
seat_add_pointer(seat, device);
break;
case WLR_INPUT_DEVICE_TOUCH:
seat_add_touch(seat, device);
break;
case WLR_INPUT_DEVICE_TABLET_PAD:
seat_add_tablet_pad(seat, device);
break;
case WLR_INPUT_DEVICE_TABLET_TOOL:
seat_add_tablet_tool(seat, device);
break;
}
seat_update_capabilities(seat);
}
void roots_seat_configure_xcursor(struct roots_seat *seat) {
const char *cursor_theme = NULL;
struct roots_cursor_config *cc =
roots_config_get_cursor(seat->input->config, seat->seat->name);
if (cc != NULL) {
cursor_theme = cc->theme;
if (cc->default_image != NULL) {
seat->cursor->default_xcursor = cc->default_image;
}
}
if (!seat->cursor->xcursor_manager) {
seat->cursor->xcursor_manager =
wlr_xcursor_manager_create(cursor_theme, ROOTS_XCURSOR_SIZE);
if (seat->cursor->xcursor_manager == NULL) {
wlr_log(L_ERROR, "Cannot create XCursor manager for theme %s",
cursor_theme);
return;
}
}
struct roots_output *output;
wl_list_for_each(output, &seat->input->server->desktop->outputs, link) {
float scale = output->wlr_output->scale;
if (wlr_xcursor_manager_load(seat->cursor->xcursor_manager, scale)) {
wlr_log(L_ERROR, "Cannot load xcursor theme for output '%s' "
"with scale %f", output->wlr_output->name, scale);
}
}
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
seat->cursor->default_xcursor, seat->cursor->cursor);
wlr_cursor_warp(seat->cursor->cursor, NULL, seat->cursor->cursor->x,
seat->cursor->cursor->y);
}
bool roots_seat_has_meta_pressed(struct roots_seat *seat) {
struct roots_keyboard *keyboard;
wl_list_for_each(keyboard, &seat->keyboards, link) {
if (!keyboard->config->meta_key) {
continue;
}
uint32_t modifiers =
wlr_keyboard_get_modifiers(keyboard->device->keyboard);
if ((modifiers ^ keyboard->config->meta_key) == 0) {
return true;
}
}
return false;
}
struct roots_view *roots_seat_get_focus(struct roots_seat *seat) {
if (!seat->has_focus || wl_list_empty(&seat->views)) {
return NULL;
}
struct roots_seat_view *seat_view =
wl_container_of(seat->views.next, seat_view, link);
return seat_view->view;
}
static void seat_view_destroy(struct roots_seat_view *seat_view) {
struct roots_seat *seat = seat_view->seat;
if (seat_view->view == roots_seat_get_focus(seat)) {
seat->has_focus = false;
seat->cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
if (seat_view == seat->cursor->pointer_view) {
seat->cursor->pointer_view = NULL;
}
wl_list_remove(&seat_view->view_unmap.link);
wl_list_remove(&seat_view->view_destroy.link);
wl_list_remove(&seat_view->link);
free(seat_view);
// Focus first view
if (!wl_list_empty(&seat->views)) {
struct roots_seat_view *first_seat_view = wl_container_of(
seat->views.next, first_seat_view, link);
roots_seat_set_focus(seat, first_seat_view->view);
}
}
static void seat_view_handle_unmap(struct wl_listener *listener, void *data) {
struct roots_seat_view *seat_view =
wl_container_of(listener, seat_view, view_unmap);
seat_view_destroy(seat_view);
}
static void seat_view_handle_destroy(struct wl_listener *listener, void *data) {
struct roots_seat_view *seat_view =
wl_container_of(listener, seat_view, view_destroy);
seat_view_destroy(seat_view);
}
static struct roots_seat_view *seat_add_view(struct roots_seat *seat,
struct roots_view *view) {
struct roots_seat_view *seat_view =
calloc(1, sizeof(struct roots_seat_view));
if (seat_view == NULL) {
return NULL;
}
seat_view->seat = seat;
seat_view->view = view;
wl_list_insert(seat->views.prev, &seat_view->link);
seat_view->view_unmap.notify = seat_view_handle_unmap;
wl_signal_add(&view->events.unmap, &seat_view->view_unmap);
seat_view->view_destroy.notify = seat_view_handle_destroy;
wl_signal_add(&view->events.destroy, &seat_view->view_destroy);
return seat_view;
}
struct roots_seat_view *roots_seat_view_from_view(
struct roots_seat *seat, struct roots_view *view) {
if (view == NULL) {
return NULL;
}
bool found = false;
struct roots_seat_view *seat_view = NULL;
wl_list_for_each(seat_view, &seat->views, link) {
if (seat_view->view == view) {
found = true;
break;
}
}
if (!found) {
seat_view = seat_add_view(seat, view);
if (seat_view == NULL) {
wlr_log(L_ERROR, "Allocation failed");
return NULL;
}
}
return seat_view;
}
bool roots_seat_allow_input(struct roots_seat *seat,
struct wl_resource *resource) {
return !seat->exclusive_client ||
wl_resource_get_client(resource) == seat->exclusive_client;
}
void roots_seat_set_focus(struct roots_seat *seat, struct roots_view *view) {
if (view && !roots_seat_allow_input(seat, view->wlr_surface->resource)) {
return;
}
// Make sure the view will be rendered on top of others, even if it's
// already focused in this seat
if (view != NULL) {
wl_list_remove(&view->link);
wl_list_insert(&seat->input->server->desktop->views, &view->link);
}
bool unfullscreen = true;
#ifdef WLR_HAS_XWAYLAND
if (view && view->type == ROOTS_XWAYLAND_VIEW &&
view->xwayland_surface->override_redirect) {
unfullscreen = false;
}
#endif
if (view && unfullscreen) {
struct roots_desktop *desktop = view->desktop;
struct roots_output *output;
struct wlr_box box;
view_get_box(view, &box);
wl_list_for_each(output, &desktop->outputs, link) {
if (output->fullscreen_view &&
output->fullscreen_view != view &&
wlr_output_layout_intersects(
desktop->layout,
output->wlr_output, &box)) {
view_set_fullscreen(output->fullscreen_view,
false, NULL);
}
}
}
struct roots_view *prev_focus = roots_seat_get_focus(seat);
if (view == prev_focus) {
return;
}
#ifdef WLR_HAS_XWAYLAND
if (view && view->type == ROOTS_XWAYLAND_VIEW &&
wlr_xwayland_surface_is_unmanaged(view->xwayland_surface)) {
return;
}
#endif
struct roots_seat_view *seat_view = NULL;
if (view != NULL) {
seat_view = roots_seat_view_from_view(seat, view);
if (seat_view == NULL) {
return;
}
}
seat->has_focus = false;
// Deactivate the old view if it is not focused by some other seat
if (prev_focus != NULL && !input_view_has_focus(seat->input, prev_focus)) {
view_activate(prev_focus, false);
}
if (view == NULL) {
seat->cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
wlr_seat_keyboard_clear_focus(seat->seat);
return;
}
wl_list_remove(&seat_view->link);
wl_list_insert(&seat->views, &seat_view->link);
view_damage_whole(view);
if (seat->focused_layer) {
return;
}
view_activate(view, true);
seat->has_focus = true;
struct wlr_keyboard *keyboard = wlr_seat_get_keyboard(seat->seat);
if (keyboard != NULL) {
wlr_seat_keyboard_notify_enter(seat->seat, view->wlr_surface,
keyboard->keycodes, keyboard->num_keycodes,
&keyboard->modifiers);
} else {
wlr_seat_keyboard_notify_enter(seat->seat, view->wlr_surface,
NULL, 0, NULL);
}
}
/**
* Focus semantics of layer surfaces are somewhat detached from the normal focus
* flow. For layers above the shell layer, for example, you cannot unfocus them.
* You also cannot alt-tab between layer surfaces and shell surfaces.
*/
void roots_seat_set_focus_layer(struct roots_seat *seat,
struct wlr_layer_surface *layer) {
if (!layer) {
seat->focused_layer = NULL;
return;
}
struct wlr_keyboard *keyboard = wlr_seat_get_keyboard(seat->seat);
if (!roots_seat_allow_input(seat, layer->resource)) {
return;
}
if (seat->has_focus) {
struct roots_view *prev_focus = roots_seat_get_focus(seat);
wlr_seat_keyboard_clear_focus(seat->seat);
view_activate(prev_focus, false);
}
seat->has_focus = false;
if (layer->layer >= ZWLR_LAYER_SHELL_V1_LAYER_TOP) {
seat->focused_layer = layer;
}
if (keyboard != NULL) {
wlr_seat_keyboard_notify_enter(seat->seat, layer->surface,
keyboard->keycodes, keyboard->num_keycodes,
&keyboard->modifiers);
} else {
wlr_seat_keyboard_notify_enter(seat->seat, layer->surface,
NULL, 0, NULL);
}
}
void roots_seat_set_exclusive_client(struct roots_seat *seat,
struct wl_client *client) {
if (!client) {
seat->exclusive_client = client;
// Triggers a refocus of the topmost surface layer if necessary
// TODO: Make layer surface focus per-output based on cursor position
struct roots_output *output;
wl_list_for_each(output, &seat->input->server->desktop->outputs, link) {
arrange_layers(output);
}
return;
}
if (seat->focused_layer) {
if (wl_resource_get_client(seat->focused_layer->resource) != client) {
roots_seat_set_focus_layer(seat, NULL);
}
}
if (seat->has_focus) {
struct roots_view *focus = roots_seat_get_focus(seat);
if (wl_resource_get_client(focus->wlr_surface->resource) != client) {
roots_seat_set_focus(seat, NULL);
}
}
if (seat->seat->pointer_state.focused_client) {
if (seat->seat->pointer_state.focused_client->client != client) {
wlr_seat_pointer_clear_focus(seat->seat);
}
}
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
struct wlr_touch_point *point;
wl_list_for_each(point, &seat->seat->touch_state.touch_points, link) {
if (point->client->client != client) {
wlr_seat_touch_point_clear_focus(seat->seat,
now.tv_nsec / 1000, point->touch_id);
}
}
seat->exclusive_client = client;
}
void roots_seat_cycle_focus(struct roots_seat *seat) {
if (wl_list_empty(&seat->views)) {
return;
}
struct roots_seat_view *first_seat_view = wl_container_of(
seat->views.next, first_seat_view, link);
if (!seat->has_focus) {
roots_seat_set_focus(seat, first_seat_view->view);
return;
}
if (wl_list_length(&seat->views) < 2) {
return;
}
// Focus the next view
struct roots_seat_view *next_seat_view = wl_container_of(
first_seat_view->link.next, next_seat_view, link);
roots_seat_set_focus(seat, next_seat_view->view);
// Move the first view to the end of the list
wl_list_remove(&first_seat_view->link);
wl_list_insert(seat->views.prev, &first_seat_view->link);
}
void roots_seat_begin_move(struct roots_seat *seat, struct roots_view *view) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_MOVE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
if (view->maximized) {
cursor->view_x = view->saved.x;
cursor->view_y = view->saved.y;
} else {
cursor->view_x = view->x;
cursor->view_y = view->y;
}
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
ROOTS_XCURSOR_MOVE, seat->cursor->cursor);
}
void roots_seat_begin_resize(struct roots_seat *seat, struct roots_view *view,
uint32_t edges) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_RESIZE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
if (view->maximized) {
cursor->view_x = view->saved.x;
cursor->view_y = view->saved.y;
cursor->view_width = view->saved.width;
cursor->view_height = view->saved.height;
} else {
cursor->view_x = view->x;
cursor->view_y = view->y;
struct wlr_box box;
view_get_box(view, &box);
cursor->view_width = box.width;
cursor->view_height = box.height;
}
cursor->resize_edges = edges;
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
const char *resize_name = wlr_xcursor_get_resize_name(edges);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
resize_name, seat->cursor->cursor);
}
void roots_seat_begin_rotate(struct roots_seat *seat, struct roots_view *view) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_ROTATE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
cursor->view_rotation = view->rotation;
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
ROOTS_XCURSOR_ROTATE, seat->cursor->cursor);
}
void roots_seat_end_compositor_grab(struct roots_seat *seat) {
struct roots_cursor *cursor = seat->cursor;
struct roots_view *view = roots_seat_get_focus(seat);
if (view == NULL) {
return;
}
switch(cursor->mode) {
case ROOTS_CURSOR_MOVE:
view_move(view, cursor->view_x, cursor->view_y);
break;
case ROOTS_CURSOR_RESIZE:
view_move_resize(view, cursor->view_x, cursor->view_y, cursor->view_width, cursor->view_height);
break;
case ROOTS_CURSOR_ROTATE:
view->rotation = cursor->view_rotation;
break;
case ROOTS_CURSOR_PASSTHROUGH:
break;
}
cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
struct roots_seat *input_last_active_seat(struct roots_input *input) {
struct roots_seat *seat = NULL, *_seat;
wl_list_for_each(_seat, &input->seats, link) {
if (!seat || (seat->seat->last_event.tv_sec > _seat->seat->last_event.tv_sec &&
seat->seat->last_event.tv_nsec > _seat->seat->last_event.tv_nsec)) {
seat = _seat;
}
}
return seat;
}
| 1 | 11,743 | No need to damage after `roots_drag_icon_update_position`, this is already done in `roots_drag_icon_update_position` | swaywm-wlroots | c |
@@ -102,11 +102,13 @@ def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
return model
-def preprocess_example_input(input_config):
+def preprocess_example_input(input_config, device=torch.device('cpu')):
"""Prepare an example input image for ``generate_inputs_and_wrap_model``.
Args:
input_config (dict): customized config describing the example input.
+ device (<class 'torch.device'>): device type (CPU or CUDA)
+ for the one_img.
Returns:
tuple: (one_img, one_meta), tensor of the example input image and \ | 1 | from functools import partial
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
def generate_inputs_and_wrap_model(config_path,
checkpoint_path,
input_config,
cfg_options=None):
"""Prepare sample input and wrap model for ONNX export.
The ONNX export API only accept args, and all inputs should be
torch.Tensor or corresponding types (such as tuple of tensor).
So we should call this function before exporting. This function will:
1. generate corresponding inputs which are used to execute the model.
2. Wrap the model's forward function.
For example, the MMDet models' forward function has a parameter
``return_loss:bool``. As we want to set it as False while export API
supports neither bool type or kwargs. So we have to replace the forward
method like ``model.forward = partial(model.forward, return_loss=False)``.
Args:
config_path (str): the OpenMMLab config for the model we want to
export to ONNX
checkpoint_path (str): Path to the corresponding checkpoint
input_config (dict): the exactly data in this dict depends on the
framework. For MMSeg, we can just declare the input shape,
and generate the dummy data accordingly. However, for MMDet,
we may pass the real img path, or the NMS will return None
as there is no legal bbox.
Returns:
tuple: (model, tensor_data) wrapped model which can be called by
``model(*tensor_data)`` and a list of inputs which are used to
execute the model while exporting.
"""
model = build_model_from_cfg(
config_path, checkpoint_path, cfg_options=cfg_options)
one_img, one_meta = preprocess_example_input(input_config)
tensor_data = [one_img]
model.forward = partial(
model.forward, img_metas=[[one_meta]], return_loss=False)
# pytorch has some bug in pytorch1.3, we have to fix it
# by replacing these existing op
opset_version = 11
# put the import within the function thus it will not cause import error
# when not using this function
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(opset_version)
return model, tensor_data
def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
"""Build a model from config and load the given checkpoint.
Args:
config_path (str): the OpenMMLab config for the model we want to
export to ONNX
checkpoint_path (str): Path to the corresponding checkpoint
Returns:
torch.nn.Module: the built model
"""
from mmdet.models import build_detector
cfg = mmcv.Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the model
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
from mmdet.datasets import DATASETS
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
model.CLASSES = dataset.CLASSES
model.cpu().eval()
return model
def preprocess_example_input(input_config):
"""Prepare an example input image for ``generate_inputs_and_wrap_model``.
Args:
input_config (dict): customized config describing the example input.
Returns:
tuple: (one_img, one_meta), tensor of the example input image and \
meta information for the example input image.
Examples:
>>> from mmdet.core.export import preprocess_example_input
>>> input_config = {
>>> 'input_shape': (1,3,224,224),
>>> 'input_path': 'demo/demo.jpg',
>>> 'normalize_cfg': {
>>> 'mean': (123.675, 116.28, 103.53),
>>> 'std': (58.395, 57.12, 57.375)
>>> }
>>> }
>>> one_img, one_meta = preprocess_example_input(input_config)
>>> print(one_img.shape)
torch.Size([1, 3, 224, 224])
>>> print(one_meta)
{'img_shape': (224, 224, 3),
'ori_shape': (224, 224, 3),
'pad_shape': (224, 224, 3),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False}
"""
input_path = input_config['input_path']
input_shape = input_config['input_shape']
one_img = mmcv.imread(input_path)
one_img = mmcv.imresize(one_img, input_shape[2:][::-1])
show_img = one_img.copy()
if 'normalize_cfg' in input_config.keys():
normalize_cfg = input_config['normalize_cfg']
mean = np.array(normalize_cfg['mean'], dtype=np.float32)
std = np.array(normalize_cfg['std'], dtype=np.float32)
to_rgb = normalize_cfg.get('to_rgb', True)
one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
one_img = one_img.transpose(2, 0, 1)
one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(
True)
(_, C, H, W) = input_shape
one_meta = {
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.ones(4, dtype=np.float32),
'flip': False,
'show_img': show_img,
'flip_direction': None
}
return one_img, one_meta
| 1 | 25,490 | Have you tested exporting to ONNX with `device=cuda`? | open-mmlab-mmdetection | py |
@@ -78,10 +78,14 @@ func newJobLogOpts(vars jobLogsVars) (*jobLogsOpts, error) {
// Validate returns an error if the values provided by flags are invalid.
func (o *jobLogsOpts) Validate() error {
if o.appName != "" {
- _, err := o.configStore.GetApplication(o.appName)
- if err != nil {
+ if _, err := o.configStore.GetApplication(o.appName); err != nil {
return err
}
+ if o.name != "" {
+ if _, err := o.configStore.GetJob(o.appName, o.name); err != nil {
+ return err
+ }
+ }
}
if o.since != 0 && o.humanStartTime != "" { | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/logging"
"github.com/aws/copilot-cli/internal/pkg/term/log"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/spf13/cobra"
)
const (
jobAppNamePrompt = "Which application does your job belong to?"
)
type jobLogsVars struct {
wkldLogsVars
includeStateMachineLogs bool // Whether to include the logs from the state machine log streams
}
type jobLogsOpts struct {
jobLogsVars
wkldLogOpts
}
func newJobLogOpts(vars jobLogsVars) (*jobLogsOpts, error) {
configStore, err := config.NewStore()
if err != nil {
return nil, fmt.Errorf("connect to environment config store: %w", err)
}
deployStore, err := deploy.NewStore(configStore)
if err != nil {
return nil, fmt.Errorf("connect to deploy store: %w", err)
}
opts := &jobLogsOpts{
jobLogsVars: vars,
wkldLogOpts: wkldLogOpts{
w: log.OutputWriter,
configStore: configStore,
deployStore: deployStore,
sel: selector.NewDeploySelect(prompt.New(), configStore, deployStore),
},
}
opts.initLogsSvc = func() error {
env, err := opts.configStore.GetEnvironment(opts.appName, opts.envName)
if err != nil {
return fmt.Errorf("get environment: %w", err)
}
sess, err := sessions.NewProvider().FromRole(env.ManagerRoleARN, env.Region)
if err != nil {
return err
}
opts.logsSvc, err = logging.NewServiceClient(&logging.NewServiceLogsConfig{
Sess: sess,
App: opts.appName,
Env: opts.envName,
Svc: opts.name,
})
if err != nil {
return err
}
return nil
}
return opts, nil
}
// Validate returns an error if the values provided by flags are invalid.
func (o *jobLogsOpts) Validate() error {
if o.appName != "" {
_, err := o.configStore.GetApplication(o.appName)
if err != nil {
return err
}
}
if o.since != 0 && o.humanStartTime != "" {
return errors.New("only one of --since or --start-time may be used")
}
if o.humanEndTime != "" && o.follow {
return errors.New("only one of --follow or --end-time may be used")
}
if o.since != 0 {
if o.since < 0 {
return fmt.Errorf("--since must be greater than 0")
}
// round up to the nearest second
o.startTime = parseSince(o.since)
}
if o.humanStartTime != "" {
startTime, err := parseRFC3339(o.humanStartTime)
if err != nil {
return fmt.Errorf(`invalid argument %s for "--start-time" flag: %w`, o.humanStartTime, err)
}
o.startTime = aws.Int64(startTime)
}
if o.humanEndTime != "" {
endTime, err := parseRFC3339(o.humanEndTime)
if err != nil {
return fmt.Errorf(`invalid argument %s for "--end-time" flag: %w`, o.humanEndTime, err)
}
o.endTime = aws.Int64(endTime)
}
if o.limit != 0 && (o.limit < cwGetLogEventsLimitMin || o.limit > cwGetLogEventsLimitMax) {
return fmt.Errorf("--limit %d is out-of-bounds, value must be between %d and %d", o.limit, cwGetLogEventsLimitMin, cwGetLogEventsLimitMax)
}
return nil
}
// Ask asks for fields that are required but not passed in.
func (o *jobLogsOpts) Ask() error {
if err := o.askApp(); err != nil {
return err
}
return nil
}
func (o *jobLogsOpts) askApp() error {
if o.appName != "" {
return nil
}
app, err := o.sel.Application(jobAppNamePrompt, svcAppNameHelpPrompt)
if err != nil {
return fmt.Errorf("select application: %w", err)
}
o.appName = app
return nil
}
// Execute outputs logs of the job.
func (o *jobLogsOpts) Execute() error {
return nil
}
// buildJobLogsCmd builds the command for displaying job logs in an application.
func buildJobLogsCmd() *cobra.Command {
vars := jobLogsVars{}
cmd := &cobra.Command{
Use: "logs",
Short: "Displays logs of a deployed job.",
Hidden: true,
Example: `
Displays logs of the job "my-job" in environment "test".
/code $ copilot job logs -n my-job -e test
Displays logs in the last hour.
/code $ copilot job logs --since 1h
Displays logs from 2006-01-02T15:04:05 to 2006-01-02T15:05:05.
/code $ copilot job logs --start-time 2006-01-02T15:04:05+00:00 --end-time 2006-01-02T15:05:05+00:00
Displays logs from specific task IDs.
/code $ copilot job logs --tasks 709c7eae05f947f6861b150372ddc443,1de57fd63c6a4920ac416d02add891b9
Displays logs in real time.
/code $ copilot job logs --follow
Displays container logs and state machine execution logs from the last execution.
/code $ copilot job logs --include-state-machine`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newJobLogOpts(vars)
if err != nil {
return err
}
if err := opts.Validate(); err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
return opts.Execute()
}),
}
cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", svcFlagDescription)
cmd.Flags().StringVarP(&vars.envName, envFlag, envFlagShort, "", envFlagDescription)
cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription)
cmd.Flags().StringVar(&vars.humanStartTime, startTimeFlag, "", startTimeFlagDescription)
cmd.Flags().StringVar(&vars.humanEndTime, endTimeFlag, "", endTimeFlagDescription)
cmd.Flags().BoolVar(&vars.shouldOutputJSON, jsonFlag, false, jsonFlagDescription)
cmd.Flags().BoolVar(&vars.follow, followFlag, false, followFlagDescription)
cmd.Flags().DurationVar(&vars.since, sinceFlag, 0, sinceFlagDescription)
cmd.Flags().IntVar(&vars.limit, limitFlag, 0, limitFlagDescription)
cmd.Flags().StringSliceVar(&vars.taskIDs, tasksFlag, nil, tasksLogsFlagDescription)
cmd.Flags().BoolVar(&vars.includeStateMachineLogs, includeStateMachineLogsFlag, false, includeStateMachineLogsFlagDescription)
return cmd
}
| 1 | 19,101 | Do we also need to validate `envName` flag then? `appName` and `envName` are used in `initLogsSvc` which are called by `svc logs` from within `Execute()` | aws-copilot-cli | go |
@@ -147,6 +147,19 @@ class Auth extends Controller
*/
public function restore_onSubmit()
{
+ // Force Trusted Host verification on password reset link generation
+ // regardless of config to protect against host header poisoning
+ $trustedHosts = Config::get('app.trustedHosts', false);
+ if ($trustedHosts === false) {
+ $url = Config::get('app.url', null);
+ if (!empty($url)) {
+ // Explicitly only allow the APP_URL host and subdomains
+ Request::setTrustedHosts(['^(.+\.)?' . preg_quote(parse_url($url, PHP_URL_HOST)) . '$']);
+ // Trigger the host validation logic
+ Request::getHost();
+ }
+ }
+
$rules = [
'login' => 'required|between:2,255'
]; | 1 | <?php namespace Backend\Controllers;
use Mail;
use Flash;
use Backend;
use Request;
use Validator;
use BackendAuth;
use Backend\Models\AccessLog;
use Backend\Classes\Controller;
use System\Classes\UpdateManager;
use ApplicationException;
use ValidationException;
use Exception;
use Config;
/**
* Authentication controller
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*
*/
class Auth extends Controller
{
/**
* @var array Public controller actions
*/
protected $publicActions = ['index', 'signin', 'signout', 'restore', 'reset'];
/**
* Constructor.
*/
public function __construct()
{
parent::__construct();
$this->layout = 'auth';
}
/**
* Default route, redirects to signin.
*/
public function index()
{
return Backend::redirect('backend/auth/signin');
}
/**
* Displays the log in page.
*/
public function signin()
{
$this->bodyClass = 'signin';
// Clear Cache and any previous data to fix invalid security token issue
$this->setResponseHeader('Cache-Control', 'no-cache, no-store, must-revalidate');
try {
if (post('postback')) {
return $this->signin_onSubmit();
}
$this->bodyClass .= ' preload';
} catch (Exception $ex) {
Flash::error($ex->getMessage());
}
}
public function signin_onSubmit()
{
$rules = [
'login' => 'required|between:2,255',
'password' => 'required|between:4,255'
];
$validation = Validator::make(post(), $rules);
if ($validation->fails()) {
throw new ValidationException($validation);
}
if (is_null($remember = Config::get('cms.backendForceRemember', true))) {
$remember = (bool) post('remember');
}
// Authenticate user
$user = BackendAuth::authenticate([
'login' => post('login'),
'password' => post('password')
], $remember);
if (is_null($runMigrationsOnLogin = Config::get('cms.runMigrationsOnLogin', null))) {
$runMigrationsOnLogin = Config::get('app.debug', false);
}
if ($runMigrationsOnLogin) {
try {
// Load version updates
UpdateManager::instance()->update();
} catch (Exception $ex) {
Flash::error($ex->getMessage());
}
}
// Log the sign in event
AccessLog::add($user);
// Redirect to the intended page after successful sign in
return Backend::redirectIntended('backend');
}
/**
* Logs out a backend user.
*/
public function signout()
{
if (BackendAuth::isImpersonator()) {
BackendAuth::stopImpersonate();
} else {
BackendAuth::logout();
}
// Add HTTP Header 'Clear Site Data' to purge all sensitive data upon signout
if (Request::secure()) {
$this->setResponseHeader('Clear-Site-Data', 'cache, cookies, storage, executionContexts');
}
return Backend::redirect('backend');
}
/**
* Request a password reset verification code.
*/
public function restore()
{
try {
if (post('postback')) {
return $this->restore_onSubmit();
}
} catch (Exception $ex) {
Flash::error($ex->getMessage());
}
}
/**
* Submits the restore form.
*/
public function restore_onSubmit()
{
$rules = [
'login' => 'required|between:2,255'
];
$validation = Validator::make(post(), $rules);
if ($validation->fails()) {
throw new ValidationException($validation);
}
$user = BackendAuth::findUserByLogin(post('login'));
if (!$user) {
throw new ValidationException([
'login' => trans('backend::lang.account.restore_error', ['login' => post('login')])
]);
}
Flash::success(trans('backend::lang.account.restore_success'));
$code = $user->getResetPasswordCode();
$link = Backend::url('backend/auth/reset/' . $user->id . '/' . $code);
$data = [
'name' => $user->full_name,
'link' => $link,
];
Mail::send('backend::mail.restore', $data, function ($message) use ($user) {
$message->to($user->email, $user->full_name)->subject(trans('backend::lang.account.password_reset'));
});
return Backend::redirect('backend/auth/signin');
}
/**
* Reset backend user password using verification code.
*/
public function reset($userId = null, $code = null)
{
try {
if (post('postback')) {
return $this->reset_onSubmit();
}
if (!$userId || !$code) {
throw new ApplicationException(trans('backend::lang.account.reset_error'));
}
} catch (Exception $ex) {
Flash::error($ex->getMessage());
}
$this->vars['code'] = $code;
$this->vars['id'] = $userId;
}
/**
* Submits the reset form.
*/
public function reset_onSubmit()
{
if (!post('id') || !post('code')) {
throw new ApplicationException(trans('backend::lang.account.reset_error'));
}
$rules = [
'password' => 'required|between:4,255'
];
$validation = Validator::make(post(), $rules);
if ($validation->fails()) {
throw new ValidationException($validation);
}
$code = post('code');
$user = BackendAuth::findUserById(post('id'));
if (!$user->checkResetPasswordCode($code)) {
throw new ApplicationException(trans('backend::lang.account.reset_error'));
}
if (!$user->attemptResetPassword($code, post('password'))) {
throw new ApplicationException(trans('backend::lang.account.reset_fail'));
}
$user->clearResetPassword();
Flash::success(trans('backend::lang.account.reset_success'));
return Backend::redirect('backend/auth/signin');
}
}
| 1 | 19,278 | @LukeTowers I think I would prefer that we don't force it, on the basis that: a) some people would be opting to configure their web server to protect against this kind of attack and would disable this feature in October CMS to get a small performance increase. b) it might be a bit misleading to say that `app.trustedHosts` set to `false` disables the feature entirely, just to have it be overridden here regardless. | octobercms-october | php |
@@ -259,7 +259,7 @@ func (c *NetworkPolicyController) triggerParentGroupSync(grp *antreatypes.Group)
// triggerCNPUpdates triggers processing of ClusterNetworkPolicies associated with the input ClusterGroup.
func (c *NetworkPolicyController) triggerCNPUpdates(cg *crdv1alpha3.ClusterGroup) error {
// If a ClusterGroup is added/updated, it might have a reference in ClusterNetworkPolicy.
- cnps, err := c.cnpInformer.Informer().GetIndexer().ByIndex(ClusterGroupIndex, cg.Name)
+ cnps, err := c.acnpInformer.Informer().GetIndexer().ByIndex(ClusterGroupIndex, cg.Name)
if err != nil {
klog.Errorf("Error retrieving ClusterNetworkPolicies corresponding to ClusterGroup %s", cg.Name)
return err | 1 | // Copyright 2021 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networkpolicy
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"antrea.io/antrea/pkg/apis/controlplane"
crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1"
crdv1alpha3 "antrea.io/antrea/pkg/apis/crd/v1alpha3"
"antrea.io/antrea/pkg/controller/networkpolicy/store"
antreatypes "antrea.io/antrea/pkg/controller/types"
)
// addClusterGroup is responsible for processing the ADD event of a ClusterGroup resource.
func (c *NetworkPolicyController) addClusterGroup(curObj interface{}) {
cg := curObj.(*crdv1alpha3.ClusterGroup)
key := internalGroupKeyFunc(cg)
klog.V(2).Infof("Processing ADD event for ClusterGroup %s", cg.Name)
newGroup := c.processClusterGroup(cg)
klog.V(2).Infof("Creating new internal Group %s", newGroup.UID)
c.internalGroupStore.Create(newGroup)
c.enqueueInternalGroup(key)
}
// updateClusterGroup is responsible for processing the UPDATE event of a ClusterGroup resource.
func (c *NetworkPolicyController) updateClusterGroup(oldObj, curObj interface{}) {
cg := curObj.(*crdv1alpha3.ClusterGroup)
og := oldObj.(*crdv1alpha3.ClusterGroup)
key := internalGroupKeyFunc(cg)
klog.V(2).Infof("Processing UPDATE event for ClusterGroup %s", cg.Name)
newGroup := c.processClusterGroup(cg)
oldGroup := c.processClusterGroup(og)
selectorUpdated := func() bool {
return getNormalizedNameForSelector(newGroup.Selector) != getNormalizedNameForSelector(oldGroup.Selector)
}
svcRefUpdated := func() bool {
oldSvc, newSvc := oldGroup.ServiceReference, newGroup.ServiceReference
if oldSvc != nil && newSvc != nil && oldSvc.Name == newSvc.Name && oldSvc.Namespace == newSvc.Namespace {
return false
} else if oldSvc == nil && newSvc == nil {
return false
}
return true
}
ipBlocksUpdated := func() bool {
oldIPBs, newIPBs := sets.String{}, sets.String{}
for _, ipb := range oldGroup.IPBlocks {
oldIPBs.Insert(ipNetToCIDRStr(ipb.CIDR))
}
for _, ipb := range newGroup.IPBlocks {
newIPBs.Insert(ipNetToCIDRStr(ipb.CIDR))
}
return oldIPBs.Equal(newIPBs)
}
childGroupsUpdated := func() bool {
oldChildGroups, newChildGroups := sets.String{}, sets.String{}
for _, c := range oldGroup.ChildGroups {
oldChildGroups.Insert(c)
}
for _, c := range newGroup.ChildGroups {
newChildGroups.Insert(c)
}
return !oldChildGroups.Equal(newChildGroups)
}
if !ipBlocksUpdated() && !svcRefUpdated() && !selectorUpdated() && !childGroupsUpdated() {
// No change in the contents of the ClusterGroup. No need to enqueue for further sync.
return
}
c.internalGroupStore.Update(newGroup)
c.enqueueInternalGroup(key)
}
// deleteClusterGroup is responsible for processing the DELETE event of a ClusterGroup resource.
func (c *NetworkPolicyController) deleteClusterGroup(oldObj interface{}) {
og, ok := oldObj.(*crdv1alpha3.ClusterGroup)
klog.V(2).Infof("Processing DELETE event for ClusterGroup %s", og.Name)
if !ok {
tombstone, ok := oldObj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting ClusterGroup, invalid type: %v", oldObj)
return
}
og, ok = tombstone.Obj.(*crdv1alpha3.ClusterGroup)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting ClusterGroup, invalid type: %v", tombstone.Obj)
return
}
}
key := internalGroupKeyFunc(og)
klog.V(2).Infof("Deleting internal Group %s", key)
err := c.internalGroupStore.Delete(key)
if err != nil {
klog.Errorf("Unable to delete internal Group %s from store: %v", key, err)
}
c.enqueueInternalGroup(key)
}
func (c *NetworkPolicyController) processClusterGroup(cg *crdv1alpha3.ClusterGroup) *antreatypes.Group {
internalGroup := antreatypes.Group{
Name: cg.Name,
UID: cg.UID,
}
if len(cg.Spec.ChildGroups) > 0 {
for _, childCGName := range cg.Spec.ChildGroups {
internalGroup.ChildGroups = append(internalGroup.ChildGroups, string(childCGName))
}
return &internalGroup
}
if len(cg.Spec.IPBlocks) > 0 {
for i := range cg.Spec.IPBlocks {
ipb, _ := toAntreaIPBlockForCRD(&cg.Spec.IPBlocks[i])
internalGroup.IPBlocks = append(internalGroup.IPBlocks, *ipb)
}
return &internalGroup
}
svcSelector := cg.Spec.ServiceReference
if svcSelector != nil {
// ServiceReference will be converted to groupSelector once the internalGroup is synced.
internalGroup.ServiceReference = &controlplane.ServiceReference{
Namespace: svcSelector.Namespace,
Name: svcSelector.Name,
}
} else {
groupSelector := toGroupSelector("", cg.Spec.PodSelector, cg.Spec.NamespaceSelector, cg.Spec.ExternalEntitySelector)
internalGroup.Selector = groupSelector
}
return &internalGroup
}
// filterInternalGroupsForService computes a list of internal Group keys which references the Service.
func (c *NetworkPolicyController) filterInternalGroupsForService(obj metav1.Object) sets.String {
matchingKeySet := sets.String{}
indexKey, _ := cache.MetaNamespaceKeyFunc(obj)
matchedSvcGroups, _ := c.internalGroupStore.GetByIndex(store.ServiceIndex, indexKey)
for i := range matchedSvcGroups {
key, _ := store.GroupKeyFunc(matchedSvcGroups[i])
matchingKeySet.Insert(key)
}
return matchingKeySet
}
func (c *NetworkPolicyController) enqueueInternalGroup(key string) {
klog.V(4).Infof("Adding new key %s to internal Group queue", key)
c.internalGroupQueue.Add(key)
}
func (c *NetworkPolicyController) internalGroupWorker() {
for c.processNextInternalGroupWorkItem() {
}
}
// Processes an item in the "internalGroup" work queue, by calling
// syncInternalGroup after casting the item to a string (Group key).
// If syncInternalGroup returns an error, this function handles it by re-queueing
// the item so that it can be processed again later. If syncInternalGroup is
// successful, the ClusterGroup is removed from the queue until we get notify
// of a new change. This function return false if and only if the work queue
// was shutdown (no more items will be processed).
func (c *NetworkPolicyController) processNextInternalGroupWorkItem() bool {
key, quit := c.internalGroupQueue.Get()
if quit {
return false
}
defer c.internalGroupQueue.Done(key)
err := c.syncInternalGroup(key.(string))
if err != nil {
// Put the item back in the workqueue to handle any transient errors.
c.internalGroupQueue.AddRateLimited(key)
klog.Errorf("Failed to sync internal Group %s: %v", key, err)
return true
}
// If no error occurs we Forget this item so it does not get queued again until
// another change happens.
c.internalGroupQueue.Forget(key)
return true
}
func (c *NetworkPolicyController) syncInternalGroup(key string) error {
// Retrieve the internal Group corresponding to this key.
grpObj, found, _ := c.internalGroupStore.Get(key)
if !found {
klog.V(2).Infof("Internal group %s not found.", key)
c.groupingInterface.DeleteGroup(clusterGroupType, key)
return nil
}
grp := grpObj.(*antreatypes.Group)
// Retrieve the ClusterGroup corresponding to this key.
cg, err := c.cgLister.Get(grp.Name)
if err != nil {
klog.Infof("Didn't find the ClusterGroup %s, skip processing of internal group", grp.Name)
return nil
}
selectorUpdated := c.processServiceReference(grp)
if grp.Selector != nil {
c.groupingInterface.AddGroup(clusterGroupType, grp.Name, grp.Selector)
} else {
c.groupingInterface.DeleteGroup(clusterGroupType, grp.Name)
}
if selectorUpdated {
// Update the internal Group object in the store with the new selector.
updatedGrp := &antreatypes.Group{
UID: grp.UID,
Name: grp.Name,
Selector: grp.Selector,
ServiceReference: grp.ServiceReference,
ChildGroups: grp.ChildGroups,
}
klog.V(2).Infof("Updating existing internal Group %s", key)
c.internalGroupStore.Update(updatedGrp)
}
// Update the ClusterGroup status to Realized as Antrea has recognized the Group and
// processed its group members.
err = c.updateGroupStatus(cg, v1.ConditionTrue)
if err != nil {
klog.Errorf("Failed to update ClusterGroup %s GroupMembersComputed condition to %s: %v", cg.Name, v1.ConditionTrue, err)
return err
}
c.triggerParentGroupSync(grp)
return c.triggerCNPUpdates(cg)
}
func (c *NetworkPolicyController) triggerParentGroupSync(grp *antreatypes.Group) {
// TODO: if the max supported group nesting level increases, a Group having children
// will no longer be a valid indication that it cannot have parents.
if len(grp.ChildGroups) == 0 {
parentGroupObjs, err := c.internalGroupStore.GetByIndex(store.ChildGroupIndex, grp.Name)
if err != nil {
klog.Errorf("Error retrieving parents of ClusterGroup %s: %v", grp.Name, err)
}
for _, p := range parentGroupObjs {
parentGrp := p.(*antreatypes.Group)
c.enqueueInternalGroup(parentGrp.Name)
}
}
}
// triggerCNPUpdates triggers processing of ClusterNetworkPolicies associated with the input ClusterGroup.
func (c *NetworkPolicyController) triggerCNPUpdates(cg *crdv1alpha3.ClusterGroup) error {
// If a ClusterGroup is added/updated, it might have a reference in ClusterNetworkPolicy.
cnps, err := c.cnpInformer.Informer().GetIndexer().ByIndex(ClusterGroupIndex, cg.Name)
if err != nil {
klog.Errorf("Error retrieving ClusterNetworkPolicies corresponding to ClusterGroup %s", cg.Name)
return err
}
for _, obj := range cnps {
cnp := obj.(*crdv1alpha1.ClusterNetworkPolicy)
// Re-process ClusterNetworkPolicies which may be affected due to updates to CG.
curInternalNP := c.processClusterNetworkPolicy(cnp)
klog.V(2).Infof("Updating existing internal NetworkPolicy %s for %s", curInternalNP.Name, curInternalNP.SourceRef.ToString())
key := internalNetworkPolicyKeyFunc(cnp)
// Lock access to internal NetworkPolicy store such that concurrent access
// to an internal NetworkPolicy is not allowed. This will avoid the
// case in which an Update to an internal NetworkPolicy object may
// cause the SpanMeta member to be overridden with stale SpanMeta members
// from an older internal NetworkPolicy.
c.internalNetworkPolicyMutex.Lock()
oldInternalNPObj, _, _ := c.internalNetworkPolicyStore.Get(key)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
// Must preserve old internal NetworkPolicy Spac.
curInternalNP.SpanMeta = oldInternalNP.SpanMeta
c.internalNetworkPolicyStore.Update(curInternalNP)
// Unlock the internal NetworkPolicy store.
c.internalNetworkPolicyMutex.Unlock()
// Enqueue addressGroup keys to update their group members.
// TODO: optimize this to avoid enqueueing address groups when not updated.
for _, atg := range curInternalNP.AppliedToGroups {
c.enqueueAppliedToGroup(atg)
}
for _, rule := range curInternalNP.Rules {
for _, addrGroupName := range rule.From.AddressGroups {
c.enqueueAddressGroup(addrGroupName)
}
for _, addrGroupName := range rule.To.AddressGroups {
c.enqueueAddressGroup(addrGroupName)
}
}
c.enqueueInternalNetworkPolicy(key)
c.deleteDereferencedAddressGroups(oldInternalNP)
for _, atg := range oldInternalNP.AppliedToGroups {
c.deleteDereferencedAppliedToGroup(atg)
}
}
return nil
}
// updateGroupStatus updates the Status subresource for a ClusterGroup.
func (c *NetworkPolicyController) updateGroupStatus(cg *crdv1alpha3.ClusterGroup, cStatus v1.ConditionStatus) error {
condStatus := crdv1alpha3.GroupCondition{
Status: cStatus,
Type: crdv1alpha3.GroupMembersComputed,
}
if groupMembersComputedConditionEqual(cg.Status.Conditions, condStatus) {
// There is no change in conditions.
return nil
}
condStatus.LastTransitionTime = metav1.Now()
status := crdv1alpha3.GroupStatus{
Conditions: []crdv1alpha3.GroupCondition{condStatus},
}
klog.V(4).Infof("Updating ClusterGroup %s status to %#v", cg.Name, condStatus)
toUpdate := cg.DeepCopy()
toUpdate.Status = status
_, err := c.crdClient.CrdV1alpha3().ClusterGroups().UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{})
return err
}
// groupMembersComputedConditionEqual checks whether the condition status for GroupMembersComputed condition
// is same. Returns true if equal, otherwise returns false. It disregards the lastTransitionTime field.
func groupMembersComputedConditionEqual(conds []crdv1alpha3.GroupCondition, condition crdv1alpha3.GroupCondition) bool {
for _, c := range conds {
if c.Type == crdv1alpha3.GroupMembersComputed {
if c.Status == condition.Status {
return true
}
}
}
return false
}
// processServiceReference knows how to process the serviceReference in the group, and set the group
// selector based on the Service referenced. It returns true if the group's selector needs to be
// updated after serviceReference processing, and false otherwise.
func (c *NetworkPolicyController) processServiceReference(group *antreatypes.Group) bool {
svcRef := group.ServiceReference
if svcRef == nil {
return false
}
originalSelectorName := getNormalizedNameForSelector(group.Selector)
svc, err := c.serviceLister.Services(svcRef.Namespace).Get(svcRef.Name)
if err != nil {
klog.V(2).Infof("Error getting Service object %s/%s: %v, setting empty selector for Group %s", svcRef.Namespace, svcRef.Name, err, group.Name)
group.Selector = nil
return originalSelectorName == getNormalizedNameForSelector(nil)
}
newSelector := c.serviceToGroupSelector(svc)
group.Selector = newSelector
return originalSelectorName == getNormalizedNameForSelector(newSelector)
}
// serviceToGroupSelector knows how to generate GroupSelector for a Service.
func (c *NetworkPolicyController) serviceToGroupSelector(service *v1.Service) *antreatypes.GroupSelector {
if len(service.Spec.Selector) == 0 {
klog.Infof("Service %s/%s is without selectors and not supported by serviceReference in ClusterGroup", service.Namespace, service.Name)
return nil
}
svcPodSelector := metav1.LabelSelector{
MatchLabels: service.Spec.Selector,
}
// Convert Service.spec.selector to GroupSelector by setting the Namespace to the Service's Namespace
// and podSelector to Service's selector.
groupSelector := toGroupSelector(service.Namespace, &svcPodSelector, nil, nil)
return groupSelector
}
// GetAssociatedGroups retrieves the internal Groups associated with the entity being
// queried (Pod or ExternalEntity identified by name and namespace).
func (c *NetworkPolicyController) GetAssociatedGroups(name, namespace string) ([]antreatypes.Group, error) {
// Try Pod first, then ExternalEntity.
groups, exists := c.groupingInterface.GetGroupsForPod(namespace, name)
if !exists {
groups, exists = c.groupingInterface.GetGroupsForExternalEntity(namespace, name)
if !exists {
return nil, nil
}
}
clusterGroups, exists := groups[clusterGroupType]
if !exists {
return nil, nil
}
var groupObjs []antreatypes.Group
for _, g := range clusterGroups {
groupObjs = append(groupObjs, c.getAssociatedGroupsByName(g)...)
}
// Remove duplicates in the groupObj slice.
groupKeys, j := make(map[string]bool), 0
for _, g := range groupObjs {
if _, exists := groupKeys[g.Name]; !exists {
groupKeys[g.Name] = true
groupObjs[j] = g
j++
}
}
return groupObjs[:j], nil
}
// getAssociatedGroupsByName retrieves the internal Group and all it's parent Group objects
// (if any) by Group name.
func (c *NetworkPolicyController) getAssociatedGroupsByName(grpName string) []antreatypes.Group {
var groups []antreatypes.Group
groupObj, found, _ := c.internalGroupStore.Get(grpName)
if !found {
return groups
}
grp := groupObj.(*antreatypes.Group)
groups = append(groups, *grp)
parentGroupObjs, err := c.internalGroupStore.GetByIndex(store.ChildGroupIndex, grp.Name)
if err != nil {
klog.Errorf("Error retrieving parents of ClusterGroup %s: %v", grp.Name, err)
}
for _, p := range parentGroupObjs {
parentGrp := p.(*antreatypes.Group)
groups = append(groups, *parentGrp)
}
return groups
}
// GetGroupMembers returns the current members of a ClusterGroup.
func (c *NetworkPolicyController) GetGroupMembers(cgName string) (controlplane.GroupMemberSet, error) {
groupObj, found, _ := c.internalGroupStore.Get(cgName)
if found {
group := groupObj.(*antreatypes.Group)
return c.getClusterGroupMemberSet(group), nil
}
return controlplane.GroupMemberSet{}, fmt.Errorf("no internal Group with name %s is found", cgName)
}
| 1 | 42,202 | i like that you are trying to correct the naming, but its making the PR longer :( i guess in future we can do such changes in separate PRs | antrea-io-antrea | go |
@@ -257,12 +257,12 @@ describe('text.accessibleTextVirtual', function() {
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2label')[0];
- // Chrome 72: This is This is a label of
- // Firefox 62: This is ARIA Label
- // Safari 12.0: This is This is a label of
+ // Chrome 86: This is This is a label of
+ // Firefox 82: This is ARIA Label everything
+ // Safari 14.0: This is This is a label of everything
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
- 'This is This is a label of'
+ 'This is This is a label of everything'
);
});
| 1 | describe('text.accessibleTextVirtual', function() {
'use strict';
var fixture = document.getElementById('fixture');
var shadowSupport = axe.testUtils.shadowSupport;
afterEach(function() {
fixture.innerHTML = '';
axe._tree = null;
});
it('is called through accessibleText with a DOM node', function() {
var accessibleText = axe.commons.text.accessibleText;
fixture.innerHTML = '<label><input type="button"></label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('input');
assert.equal(accessibleText(target), '');
});
it('should match the first example from the ARIA spec', function() {
fixture.innerHTML =
'<ul role="menubar">' +
' <!-- Rule 2A: "File" label via aria-labelledby -->' +
' <li role="menuitem" aria-haspopup="true" aria-labelledby="fileLabel" id="rule2a">' +
' <span id="fileLabel">File</span>' +
' <ul role="menu">' +
' <!-- Rule 2C: "New" label via Namefrom:contents -->' +
' <li role="menuitem" id="rule2c">New</li>' +
' <li role="menuitem">Open…</li>' +
' …' +
' </ul>' +
' </li>' +
'</ul>';
axe.testUtils.flatTreeSetup(fixture);
var rule2a = axe.utils.querySelectorAll(axe._tree, '#rule2a')[0];
var rule2c = axe.utils.querySelectorAll(axe._tree, '#rule2c')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(rule2a), 'File');
assert.equal(axe.commons.text.accessibleTextVirtual(rule2c), 'New');
});
it('should match the second example from the ARIA spec', function() {
fixture.innerHTML =
'<fieldset>' +
' <legend>Meeting alarms</legend>' +
' <!-- Rule 2A: "Beep" label given by native HTML label element -->' +
' <input type="checkbox" id="beep"> <label for="beep">Beep</label> <br>' +
' <input type="checkbox" id="mtgTitle"> <label for="mtgTitle">Display the meeting title</label> <br>' +
' <!-- Rule 2B -->' +
' <input type="checkbox" id="flash">' +
' <label for="flash">' +
' Flash the screen' +
' <!-- Rule 2A: label of text input given by aria-label, "Number of times to flash screen" -->' +
' <input type="text" value="3" size="2" id="numTimes" title="Number of times to flash screen">' +
' times' +
' </label>' +
'</fieldset>';
axe.testUtils.flatTreeSetup(fixture);
var rule2a = axe.utils.querySelectorAll(axe._tree, '#beep')[0];
var rule2b = axe.utils.querySelectorAll(axe._tree, '#flash')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(rule2a), 'Beep');
// Chrome 72: "Flash the screen 3 times"
// Firefox 62: "Flash the screen 3 times"
// Safari 12.0: "Flash the screen 3 times"
assert.equal(
axe.commons.text.accessibleTextVirtual(rule2b),
'Flash the screen 3 times'
);
});
it('should use aria-labelledby if present', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-labelledby="t1label" aria-label="ARIA Label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t1')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is a label'
);
});
it('should use recusive aria-labelledby properly', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-labelledby="t1 t1label" aria-label="ARIA Label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t1')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'ARIA Label This is a label'
);
});
it('should include hidden text referred to with aria-labelledby', function() {
fixture.innerHTML =
'<div id="t1label" style="display:none">This is a ' +
'<span style="visibility:hidden">hidden </span>' +
'<span aria-hidden="true">secret</span></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t1" aria-labelledby="t1label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t1')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is a hidden secret'
);
});
it('should allow setting the initial includeHidden value', function() {
fixture.innerHTML =
'<label id="lbl1" style="display:none;">hidden label</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#lbl1')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target, {
includeHidden: false
}),
''
);
assert.equal(
axe.commons.text.accessibleTextVirtual(target, {
includeHidden: true
}),
'hidden label'
);
});
it('should use aria-label if present with no labelledby', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-label="ARIA Label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t1')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'ARIA Label');
});
it('should use alt on imgs with no ARIA', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'id="t1"> of <i>everything</i></div>' +
'<img alt="Alt text goes here" id="target">' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Alt text goes here'
);
});
it('should use alt on image inputs with no ARIA', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'id="t1"> of <i>everything</i></div>' +
'<input type="image" alt="Alt text goes here" id="target">' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Alt text goes here'
);
});
it('should use not use alt on text inputs with no ARIA', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'id="t1"> of <i>everything</i></div>' +
'<input type="text" alt="Alt text goes here" id="target">' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it('should use HTML label if no ARIA information', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t1')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'HTML Label');
});
it('should handle last ditch title attribute', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-labelledby="t1label" aria-label="ARIA Label" id="t1"> of <i title="italics"></i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2label')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is This is a label of italics'
);
});
it('should handle totally empty elements', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-labelledby="t1label" aria-label="ARIA Label" id="t1"> of <i></i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2label')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is This is a label of'
);
});
it('should handle author name-from roles properly', function() {
fixture.innerHTML =
'<div id="t2label" role="heading">This is ' +
' <input type="text" value="the value" ' +
' aria-labelledby="t1label" aria-label="ARIA Label" id="t1">' +
' of <i role="alert">everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2label')[0];
// Chrome 72: This is This is a label of
// Firefox 62: This is ARIA Label
// Safari 12.0: This is This is a label of
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is This is a label of'
);
});
it('should only show each node once when label is before input', function() {
fixture.innerHTML =
'<div id="target"><label for="tb1">My form input</label>' +
'<input type="text" id="tb1"></div>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'My form input'
);
});
it('should only show each node once when label follows input', function() {
fixture.innerHTML =
'<div id="target">' +
'<input type="text" id="tb1"></div>' +
'<label for="tb1">My form input</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'My form input'
);
});
it('should handle nested inputs in normal context', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-labelledby="t1label" aria-label="ARIA Label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2label')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is This is a label of everything'
);
});
it('should use handle nested inputs properly in labelledby context', function() {
// Chrome 72: This is This is a label of everything
// Firefox 62: This is ARIA Label the value of everything
// Safari 12.0: THis is This is a label of everything
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-labelledby="t1label" aria-label="ARIA Label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is ARIA Label of everything'
);
});
it('should use ignore hidden inputs', function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="hidden" value="the value" ' +
'Label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is of everything'
);
});
it('should use handle inputs with no type as if they were text inputs', function() {
fixture.innerHTML =
'<div id="t2label">This is <input value="the value" ' +
'aria-labelledby="t1label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2')[0];
// Chrome 70: "This is This is a label of everything"
// Firefox 62: "This is the value of everything"
// Safari 12.0: "This is This is a label of everything"
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is the value of everything'
);
});
it('should use handle nested selects properly in labelledby context', function() {
fixture.innerHTML =
'<div id="t2label">This is <select multiple ' +
'aria-labelledby="t1label" id="t1">' +
'<option selected>first</option><option>second</option><option selected>third</option>' +
'</select> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2')[0];
// Chrome 70: "This is This is a label of everything"
// Firefox 62: "This is of everything"
// Safari 12.0: "This is first third label of"
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is of everything'
);
});
it('should use handle nested textareas properly in labelledby context', function() {
fixture.innerHTML =
'<div id="t2label">This is <textarea ' +
'aria-labelledby="t1label" id="t1">the value</textarea> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2')[0];
// Chrome 70: "This is This is a label of everything"
// Firefox 62: "This is ARIA Label the value of everything"
// Safari 12.0: "This is This is a label of everything"
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This is the value of everything'
);
});
it('should use handle ARIA labels properly in labelledby context', function() {
fixture.innerHTML =
'<div id="t2label">This <span aria-label="not a span">span</span>' +
' is <input type="text" value="the value" ' +
'aria-labelledby="t1label" id="t1"> of <i>everything</i></div>' +
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t2')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'This not a span is the value of everything'
);
});
it('should come up empty if input is labeled only by select options', function() {
fixture.innerHTML =
'<label for="target">' +
'<select id="select">' +
' <option selected="selected">Chosen</option>' +
' <option>Not Selected</option>' +
'</select>' +
'</label>' +
'<input id="target" type="text" />';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
// Chrome 70: ""
// Firefox 62: "Chosen"
// Safari 12.0: "Chosen"
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it("should be empty if input is labeled by labeled select (ref'd string labels have spotty support)", function() {
fixture.innerHTML =
'<label for="select">My Select</label>' +
'<label for="target">' +
'<select id="select">' +
' <option selected="selected">Chosen</option>' +
' <option>Not Selected</option>' +
'</select>' +
'</label>' +
'<input id="target" type="text" />';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it('should be empty for an empty label wrapping a select', function() {
fixture.innerHTML =
'<label>' +
'<span class="label"></span>' +
'<select id="target">' +
'<option value="1" selected="selected">Please choose a region</option>' +
'<option value="2">Coastal</option>' +
'<option value="3">Forest</option>' +
'<option value="4">Grasslands</option>' +
'<option value="5">Mountains</option>' +
'</select>' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it('should not return select options if input is aria-labelled by a select', function() {
fixture.innerHTML =
'<label>' +
'<select id="select">' +
' <option selected="selected">Chosen</option>' +
' <option>Not Selected</option>' +
'</select>' +
'</label>' +
'<input aria-labelledby="select" type="text" id="target" />';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#target')[0];
// Chrome 70: ""
// Firefox 62: ""
// Safari 12.0: "Chosen"
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it('shoud properly fall back to title', function() {
fixture.innerHTML = '<a href="#" role="presentation" title="Hello"></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should give text even for role=presentation on anchors', function() {
fixture.innerHTML = '<a href="#" role="presentation">Hello</a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should give text even for role=presentation on buttons', function() {
fixture.innerHTML = '<button role="presentation">Hello</button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should give text even for role=presentation on summary', function() {
fixture.innerHTML = '<summary role="presentation">Hello</summary>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'summary')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('shoud properly fall back to title', function() {
fixture.innerHTML = '<a href="#" role="none" title="Hello"></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should give text even for role=none on anchors', function() {
fixture.innerHTML = '<a href="#" role="none">Hello</a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should give text even for role=none on buttons', function() {
fixture.innerHTML = '<button role="none">Hello</button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should give text even for role=none on summary', function() {
fixture.innerHTML = '<summary role="none">Hello</summary>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'summary')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should not add extra spaces around phrasing elements', function() {
fixture.innerHTML = '<a href="#">Hello<span>World</span></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'HelloWorld');
});
it('should add spaces around non-phrasing elements', function() {
fixture.innerHTML = '<a href="#">Hello<div>World</div></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello World');
});
it('should not look at scripts', function() {
fixture.innerHTML =
'<a href="#"><script> var ajiasdf = true; </script></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it('should use <label> for input buttons', function() {
fixture.innerHTML = '<label><input type="button"></label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it('should not stop when attributes contain whitespace', function() {
fixture.innerHTML =
'<button aria-label=" " aria-labelledby=" ">Hello World</button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello World');
});
(shadowSupport.v1 ? it : xit)(
'should only find aria-labelledby element in the same context ',
function() {
fixture.innerHTML =
'<div id="t2label">This is <input type="text" value="the value" ' +
'aria-labelledby="t1label" aria-label="ARIA Label" id="t1"> of <i>everything</i></div>' +
'<div id="shadow"></div>';
var shadow = document
.getElementById('shadow')
.attachShadow({ mode: 'open' });
shadow.innerHTML =
'<div id="t1label">This is a <b>label</b></div>' +
'<label for="t1">HTML Label</label>' +
'<input type="text" id="t2" aria-labelledby="t2label">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, '#t1')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'ARIA Label'
);
}
);
(shadowSupport.v1 ? it : xit)(
'should find attributes within a shadow tree',
function() {
fixture.innerHTML = '<div id="shadow"></div>';
var shadow = document
.getElementById('shadow')
.attachShadow({ mode: 'open' });
shadow.innerHTML = '<input type="text" id="t1" title="I will be king">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'I will be king'
);
}
);
(shadowSupport.v1 ? it : xit)(
'should find attributes within a slot on the shadow tree',
function() {
fixture.innerHTML =
'<div id="shadow"><input type="text" id="t1" title="you will be queen"></div>';
var shadow = document
.getElementById('shadow')
.attachShadow({ mode: 'open' });
shadow.innerHTML = '<slot></slot>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'you will be queen'
);
}
);
(shadowSupport.v1 ? it : xit)(
'should find fallback content for shadow DOM',
function() {
fixture.innerHTML = '<div id="shadow"></div>';
var shadow = document
.getElementById('shadow')
.attachShadow({ mode: 'open' });
shadow.innerHTML =
'<input type="text" id="t1">' +
'<label for="t1"><slot>Fallback content heroes</slot></label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Fallback content heroes'
);
}
);
describe('figure', function() {
it('should check aria-labelledby', function() {
fixture.innerHTML =
'<div id="t1">Hello</div>' +
'<figure aria-labelledby="t1">Not part of a11yName <figcaption>Fail</figcaption></figure>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'figure')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should check aria-label', function() {
fixture.innerHTML =
'<figure aria-label="Hello">Not part of a11yName <figcaption>Fail</figcaption></figure>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'figure')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should check the figures figcaption', function() {
fixture.innerHTML =
'<figure>Not part of a11yName <figcaption>Hello</figcaption></figure>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'figure')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should check title on figure', function() {
fixture.innerHTML =
'<figure title="Hello">Not part of a11yName <figcaption></figcaption></figure>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'figure')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should fall back to innerText of figure', function() {
fixture.innerHTML = '<figure>Hello<figcaption></figcaption></figure>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'figure')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
(shadowSupport.v1 ? it : xit)(
'should check within the composed (shadow) tree',
function() {
var node = document.createElement('div');
node.innerHTML = 'Hello';
var shadowRoot = node.attachShadow({ mode: 'open' });
shadowRoot.innerHTML =
'<figure>Not part of a11yName <figcaption><slot></slot></figcaption></figure>';
fixture.appendChild(node);
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'figure')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
}
);
});
describe('img', function() {
it('should work with aria-labelledby attribute', function() {
fixture.innerHTML =
'<div id="t1">Hello</div><div id="t2">World</div>' +
'<img aria-labelledby="t1 t2">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'img')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should work with aria-label attribute', function() {
fixture.innerHTML = '<img aria-label="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'img')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should work with alt attribute', function() {
fixture.innerHTML = '<img alt="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'img')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should work with title attribute', function() {
fixture.innerHTML = '<img title="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'img')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
});
describe('input buttons', function() {
it('should find value for input type=button', function() {
fixture.innerHTML = '<input type="button" value="Hello">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should find value for input type=reset', function() {
fixture.innerHTML = '<input type="reset" value="Hello">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should find value for input type=submit', function() {
fixture.innerHTML = '<input type="submit" value="Hello">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should provide a default value for input type="submit"', function() {
fixture.innerHTML = '<input type="submit">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
// IE inserts this for us, thanks!
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
target.actualNode.value || 'Submit'
);
});
it('should provide a default value for input type="reset"', function() {
fixture.innerHTML = '<input type="reset">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
var defaultText = axe.commons.text.accessibleTextVirtual(target);
assert.isString(defaultText);
// IE inserts this for us, thanks!
assert.equal(defaultText, target.actualNode.value || 'Reset');
});
it('should find title for input type=button', function() {
fixture.innerHTML = '<input type="button" title="Hello">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Hello');
});
it('should find title for input type=reset', function() {
fixture.innerHTML = '<input type="reset" title="Hello">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
// IE does not use title; but will use default value instead
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
target.actualNode.value || 'Hello'
);
});
it('should find title for input type=submit', function() {
fixture.innerHTML = '<input type="submit" title="Hello">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
// Again, default value takes precedence over title
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
target.actualNode.value || 'Hello'
);
});
});
describe('tables', function() {
it('should work with aria-labelledby', function() {
fixture.innerHTML =
'<div id="t1">Hello</div><div id="t2">World</div>' +
'<table aria-labelledby="t1 t2"></table>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'table')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should work with aria-label', function() {
fixture.innerHTML = '<table aria-label="Hello World"></table>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'table')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should work with the caption element', function() {
fixture.innerHTML =
'<table><caption>Hello World</caption><tr><td>Stuff</td></tr></table>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'table')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should work with the title attribute', function() {
fixture.innerHTML = '<table title="Hello World"></table>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'table')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should work with the summary attribute', function() {
fixture.innerHTML = '<table summary="Hello World"></table>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'table')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should prefer summary attribute over title attribute', function() {
// Chrome 70: "Hello world"
// Firefox 62: "Hello world"
// Safari 12.0: "FAIL"
fixture.innerHTML = '<table summary="Hello World" title="FAIL"></table>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'table')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
});
describe('text inputs', function() {
var types = ['text', 'password', 'search', 'tel', 'email', 'url', null];
it('should find aria-labelledby', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML =
'<div id="t1">Hello</div><div id="t2">World</div>' +
'<input' +
t +
' aria-labelledby="t1 t2">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World',
type
);
});
});
it('should find aria-label', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML = '<input' + t + ' aria-label="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World',
type
);
});
});
it('should find an implicit label', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML =
'<label for="t1">Hello World' + '<input' + t + '></label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World',
type
);
});
});
it('should find an explicit label', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML =
'<label for="t1">Hello World</label>' + '<input' + t + ' id="t1">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World',
type
);
});
});
it('should find implicit labels with id that does not match to a label', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML =
'<label for="t1">Hello World' + '<input' + t + ' id="foo"></label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World',
type
);
});
});
it('should find a placeholder attribute', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML = '<input' + t + ' placeholder="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World',
type
);
});
});
it('should find a title attribute', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML = '<input' + t + ' title="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World',
type
);
});
});
it('should otherwise be empty string', function() {
types.forEach(function(type) {
var t = type ? ' type="' + type + '"' : '';
fixture.innerHTML = '<input' + t + '>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
});
});
describe('textarea', function() {
it('should find aria-labelledby', function() {
fixture.innerHTML =
'<div id="t1">Hello</div><div id="t2">World</div>' +
'<textarea aria-labelledby="t1 t2"></textarea>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'textarea')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find aria-label', function() {
fixture.innerHTML = '<textarea aria-label="Hello World"></textarea>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'textarea')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find an implicit label', function() {
fixture.innerHTML =
'<label for="t1">Hello World' + '<textarea></textarea></label>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'textarea')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find an explicit label', function() {
fixture.innerHTML =
'<label for="t1">Hello World</label>' + '<textarea id="t1"></textarea>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'textarea')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find a placeholder attribute', function() {
fixture.innerHTML = '<textarea placeholder="Hello World"></textarea>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'textarea')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find a title attribute', function() {
fixture.innerHTML = '<textarea title="Hello World"></textarea>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'textarea')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should otherwise be empty string', function() {
fixture.innerHTML = '<textarea></textarea>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'textarea')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
});
describe('image inputs', function() {
it('should find aria-labelledby', function() {
fixture.innerHTML =
'<div id="t1">Hello</div><div id="t2">World</div>' +
'<input type="image" aria-labelledby="t1 t2">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find aria-label', function() {
fixture.innerHTML = '<input type="image" aria-label="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find an alt attribute', function() {
fixture.innerHTML = '<input type="image" alt="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find a title attribute', function() {
fixture.innerHTML = '<input type="image" title="Hello World">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should otherwise be "Submit" string', function() {
fixture.innerHTML = '<input type="image">';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'input')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), 'Submit');
});
});
describe('a', function() {
it('should find aria-labelledby', function() {
fixture.innerHTML =
'<div id="t1">Hello</div><div id="t2">World</div>' +
'<a aria-labelledby="t1 t2"></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find aria-label', function() {
fixture.innerHTML = '<a aria-label="Hello World"></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should check subtree', function() {
fixture.innerHTML = '<a><span>Hello<span> World</span></span></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find a title attribute', function() {
fixture.innerHTML = '<a title="Hello World"></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should otherwise be empty string', function() {
fixture.innerHTML = '<a></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
it('should use text from a table with a single cell and role=presentation', function() {
fixture.innerHTML =
'<a href="example.html">' +
'<table role="presentation">' +
'<tr>' +
'<td>' +
'Descriptive Link Text' +
'</td>' +
'</tr>' +
'</table>' +
'</a>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'a')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Descriptive Link Text'
);
});
});
describe('button', function() {
it('should find aria-labelledby', function() {
fixture.innerHTML =
'<div id="t1">Hello</div><div id="t2">World</div>' +
'<button aria-labelledby="t1 t2"></button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find aria-label', function() {
fixture.innerHTML = '<button aria-label="Hello World"></button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should check subtree', function() {
fixture.innerHTML =
'<button><span>Hello<span> World</span></span></button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should find a title attribute', function() {
fixture.innerHTML = '<button title="Hello World"></button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(
axe.commons.text.accessibleTextVirtual(target),
'Hello World'
);
});
it('should otherwise be empty string', function() {
fixture.innerHTML = '<button></button>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, 'button')[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
});
describe('text level semantics', function() {
var tags = [
'em',
'strong',
'small',
's',
'cite',
'q',
'dfn',
'abbr',
'time',
'code',
'var',
'samp',
'kbd',
'sub',
'sup',
'i',
'b',
'u',
'mark',
'ruby',
'rt',
'rp',
'bdi',
'bdo',
'br',
'wbr'
];
it('should find aria-labelledby', function() {
tags.forEach(function(tag) {
fixture.innerHTML = '<div id="t1">Hello</div><div id="t2">World</div>';
axe.testUtils.flatTreeSetup(fixture);
var elm = document.createElement(tag);
elm.setAttribute('aria-labelledby', 't1 t2');
elm.style.display = 'inline'; // Firefox hides some of these elements because reasons...
fixture.appendChild(elm);
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.getNodeFromTree(elm);
var result = axe.commons.text.accessibleTextVirtual(target);
assert.equal(result, 'Hello World', tag);
});
});
it('should find aria-label', function() {
tags.forEach(function(tag) {
var elm = document.createElement(tag);
elm.setAttribute('aria-label', 'Hello World');
elm.style.display = 'inline'; // Firefox hack, see above
fixture.appendChild(elm);
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.getNodeFromTree(elm);
var result = axe.commons.text.accessibleTextVirtual(target);
assert.equal(result, 'Hello World', tag);
});
});
it('should find a title attribute', function() {
tags.forEach(function(tag) {
var elm = document.createElement(tag);
elm.setAttribute('title', 'Hello World');
elm.style.display = 'inline'; // Firefox hack, see above
fixture.appendChild(elm);
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.getNodeFromTree(elm);
var result = axe.commons.text.accessibleTextVirtual(target);
assert.equal(result, 'Hello World', tag);
});
});
it('should otherwise be empty string', function() {
tags.forEach(function(tag) {
fixture.innerHTML = '<' + tag + '></' + tag + '>';
axe.testUtils.flatTreeSetup(fixture);
var target = axe.utils.querySelectorAll(axe._tree, tag)[0];
assert.equal(axe.commons.text.accessibleTextVirtual(target), '');
});
});
});
describe('text.accessibleText acceptence tests', function() {
'use strict';
// Tests borrowed from the AccName 1.1 testing docs
// https://www.w3.org/wiki/AccName_1.1_Testable_Statements#Name_test_case_539
var ariaValuetext = xit; // Not acc supported
var pseudoText = xit; // Not acc supported
var fixture = document.getElementById('fixture');
var accessibleText = axe.commons.text.accessibleText;
var _unsupported;
before(function() {
_unsupported = axe.commons.text.unsupported.accessibleNameFromFieldValue;
axe.commons.text.unsupported.accessibleNameFromFieldValue = [];
});
after(function() {
axe.commons.text.unsupported.accessibleNameFromFieldValue = _unsupported;
});
afterEach(function() {
fixture.innerHTML = '';
axe._tree = null;
});
it('passes test 1', function() {
fixture.innerHTML = '<input type="button" aria-label="Rich" id="test">';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Rich');
});
it('passes test 2', function() {
fixture.innerHTML =
'<div id="ID1">Rich\'s button</div>' +
'<input type="button" aria-labelledby="ID1" id="test">';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), "Rich's button");
});
it('passes test 3', function() {
fixture.innerHTML =
'<div id="ID1">Rich\'s button</div>' +
'<input type="button" aria-label="bar" aria-labelledby="ID1" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), "Rich's button");
});
it('passes test 4', function() {
fixture.innerHTML = '<input type="reset" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Reset');
});
it('passes test 5', function() {
fixture.innerHTML = '<input type="button" id="test" value="foo"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 6', function() {
fixture.innerHTML =
'<input src="baz.html" type="image" id="test" alt="foo"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 7', function() {
fixture.innerHTML =
'<label for="test">States:</label>' + '<input type="text" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'States:');
});
it('passes test 8', function() {
fixture.innerHTML =
'<label for="test">' +
'foo' +
'<input type="text" value="David"/>' +
'</label>' +
'<input type="text" id="test" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo David');
});
it('passes test 9', function() {
fixture.innerHTML =
'<label for="test">' +
'crazy' +
' <select name="member" size="1" role="menu" tabindex="0">' +
' <option role="menuitem" value="beard" selected="true">clown</option>' +
' <option role="menuitem" value="scuba">rich</option>' +
' </select>' +
'</label> ' +
'<input type="text" id="test" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
ariaValuetext('passes test 10', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuetext="Monday" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="text" id="test" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy Monday');
});
it('passes test 11', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="text" id="test" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy 4');
});
it('passes test 12', function() {
fixture.innerHTML =
'<input type="text" id="test" title="crazy" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
pseudoText('passes test 13', function() {
fixture.innerHTML =
'<style>' +
' label:before { content:"fancy "; }' +
'</style>' +
'<label for="test">fruit</label>' +
'<input type="text" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 14', function() {
fixture.innerHTML =
'<style type="text/css">' +
' [data-after]:after { content: attr(data-after); }' +
'</style>' +
'<label for="test" data-after="test content"></label>' +
'<input type="text" id="test">';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'test content');
});
it('passes test 15', function() {
fixture.innerHTML = '<img id="test" src="foo.jpg" aria-label="1"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), '1');
});
it('passes test 16', function() {
fixture.innerHTML =
'<img id="test" src="foo.jpg" aria-label="1" alt="a" title="t"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), '1');
});
// To the best of my knowledge, this test is incorrect
// Chrome and Firefox seem to return "peanuts", so does axe-core.
xit('passes test 17', function() {
fixture.innerHTML =
'<input type="text" value="peanuts" id="test">' +
'<img aria-labelledby="test" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), '');
});
it('passes test 18', function() {
fixture.innerHTML =
'<img id="test" aria-labelledby="test" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), '');
});
// To the best of my knowledge, this test is incorrect
// Chrome and Firefox seem to return "peanuts", so does axe-core.
xit('passes test 19', function() {
fixture.innerHTML =
'<input type="text" value="peanuts" id="test">' +
'<img aria-labelledby="test" aria-label="1" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), '');
});
it('passes test 20', function() {
fixture.innerHTML =
'<img id="test" aria-labelledby="test" aria-label="1" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), '1');
});
it('passes test 21', function() {
fixture.innerHTML =
'<input type="text" value="peanuts" id="ID1">' +
'<input type="text" value="popcorn" id="ID2">' +
'<input type="text" value="apple jacks" id="ID3">' +
'<img aria-labelledby="ID1 ID2 ID3" id="test" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'peanuts popcorn apple jacks');
});
it('passes test 22', function() {
fixture.innerHTML =
'<input type="text" value="peanuts" id="ID1">' +
'<img id="test" aria-label="l" aria-labelledby="test ID1" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'l peanuts');
});
it('passes test 23', function() {
fixture.innerHTML =
'<input type="text" value="peanuts" id="ID1">' +
'<input type="text" value="popcorn" id="ID2">' +
'<img id="test" aria-label="l" aria-labelledby="test ID1 ID2" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'l peanuts popcorn');
});
it('passes test 24', function() {
fixture.innerHTML =
'<input type="text" value="peanuts" id="ID1">' +
'<input type="text" value="popcorn" id="ID2">' +
'<input type="text" value="apple jacks" id="ID3">' +
'<img id="test" aria-label="l" aria-labelledby="test ID1 ID2 ID3" alt= "a" title="t" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'l peanuts popcorn apple jacks');
});
it('passes test 25', function() {
fixture.innerHTML =
'<input type="text" value="peanuts" id="ID1">' +
'<input type="text" value="popcorn" id="ID2">' +
'<input type="text" value="apple jacks" id="ID3">' +
'<img id="test" aria-label="" aria-labelledby="test ID1 ID2 ID3" alt="" title="t" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 't peanuts popcorn apple jacks');
});
it('passes test 26', function() {
fixture.innerHTML =
'<div id="test" aria-labelledby="ID1">foo</div>' +
'<span id="ID1">bar</span>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'bar');
});
it('passes test 27', function() {
fixture.innerHTML = '<div id="test" aria-label="Tag">foo</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Tag');
});
it('passes test 28', function() {
fixture.innerHTML =
'<div id="test" aria-labelledby="ID1" aria-label="Tag">foo</div>' +
'<span id="ID1">bar</span>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'bar');
});
it('passes test 29', function() {
fixture.innerHTML =
'<div id="test" aria-labelledby="ID0 ID1" aria-label="Tag">foo</div>' +
'<span id="ID0">bar</span>' +
'<span id="ID1">baz</span>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'bar baz');
});
// Should only pass in strict mode
it('passes test 30', function() {
fixture.innerHTML = '<div id="test">Div with text</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target, { strict: true }), '');
});
it('passes test 31', function() {
fixture.innerHTML = '<div id="test" role="button">foo</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 32', function() {
fixture.innerHTML =
'<div id="test" role="button" title="Tag" style="outline:medium solid black; width:2em; height:1em;">' +
'</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Tag');
});
it('passes test 33', function() {
fixture.innerHTML =
'<div id="ID1">foo</div>' +
'<a id="test" href="test.html" aria-labelledby="ID1">bar</a>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 34', function() {
fixture.innerHTML =
'<a id="test" href="test.html" aria-label="Tag">ABC</a>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Tag');
});
it('passes test 35', function() {
fixture.innerHTML =
'<a href="test.html" id="test" aria-labelledby="ID1" aria-label="Tag">foo</a>' +
'<p id="ID1">bar</p>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'bar');
});
it('passes test 36', function() {
fixture.innerHTML =
'<a href="test.html" id="test" aria-labelledby="test ID1" aria-label="Tag"></a>' +
'<p id="ID1">foo</p>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Tag foo');
});
it('passes test 37', function() {
fixture.innerHTML = '<a href="test.html" id="test">ABC</a>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'ABC');
});
it('passes test 38', function() {
fixture.innerHTML = '<a href="test.html" id="test" title="Tag"></a>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Tag');
});
it('passes test 39', function() {
fixture.innerHTML =
'<input id="test" type="text" aria-labelledby="ID1 ID2 ID3">' +
'<p id="ID1">foo</p>' +
'<p id="ID2">bar</p>' +
'<p id="ID3">baz</p>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
it('passes test 40', function() {
fixture.innerHTML =
'<input id="test" type="text" aria-label="bar" aria-labelledby="ID1 test">' +
'<div id="ID1">foo</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar');
});
it('passes test 41', function() {
fixture.innerHTML =
'<input id="test" type="text"/>' + '<label for="test">foo</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 42', function() {
fixture.innerHTML =
'<input type="password" id="test">' + '<label for="test">foo</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 43', function() {
fixture.innerHTML =
'<input type="checkbox" id="test">' +
'<label for="test">foo</label></body>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 44', function() {
fixture.innerHTML =
'<input type="radio" id="test">' + '<label for="test">foo</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 45', function() {
fixture.innerHTML =
'<input type="file" id="test">' + '<label for="test">foo</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 46', function() {
fixture.innerHTML =
'<input type="image" id="test">' + '<label for="test">foo</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 47', function() {
fixture.innerHTML =
'<input type="checkbox" id="test">' +
'<label for="test">foo<input type="text" value="bar">baz</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
it('passes test 48', function() {
fixture.innerHTML =
'<input type="text" id="test">' +
'<label for="test">foo<input type="text" value="bar">baz</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
it('passes test 49', function() {
fixture.innerHTML =
'<input type="password" id="test">' +
'<label for="test">foo<input type="text" value="bar">baz</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
it('passes test 50', function() {
fixture.innerHTML =
'<input type="radio" id="test">' +
'<label for="test">foo<input type="text" value="bar">baz</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
it('passes test 51', function() {
fixture.innerHTML =
'<input type="file" id="test">' +
'<label for="test">foo <input type="text" value="bar"> baz</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
pseudoText('passes test 52', function() {
fixture.innerHTML =
'<style type="text/css">' +
' label:before { content: "foo"; }' +
' label:after { content: "baz"; }' +
'</style>' +
'<form>' +
' <label for="test" title="bar"><input id="test" type="text" name="test" title="buz"></label> ' +
'</form>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
pseudoText('passes test 53', function() {
fixture.innerHTML =
'<style type="text/css">' +
' label:before { content: "foo"; }' +
' label:after { content: "baz"; }' +
'</style>' +
'<form>' +
' <label for="test" title="bar"><input id="test" type="password" name="test" title="buz"></label> ' +
'</form>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo bar baz');
});
pseudoText('passes test 54', function() {
fixture.innerHTML =
'<style type="text/css">' +
' label:before { content: "foo"; }' +
' label:after { content: "baz"; }' +
'</style>' +
'<form>' +
' <label for="test"><input id="test" type="checkbox" name="test" title=" bar "></label>' +
'</form>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo baz');
});
pseudoText('passes test 55', function() {
fixture.innerHTML =
'<style type="text/css">' +
' label:before { content: "foo"; }' +
' label:after { content: "baz"; }' +
'</style>' +
'<form>' +
' <label for="test"><input id="test" type="radio" name="test" title=" bar "></label> ' +
'</form>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo baz');
});
pseudoText('passes test 56', function() {
fixture.innerHTML =
'<style type="text/css">' +
' label:before { content: "foo"; }' +
' label:after { content: "baz"; }' +
'</style>' +
'<form>' +
' <label for="test"><input id="test" type="file" name="test" title="bar"></label>' +
'</form>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo baz');
});
pseudoText('passes test 57', function() {
fixture.innerHTML =
'<style type="text/css">' +
' label:before { content: "foo"; }' +
' label:after { content: "baz"; }' +
'</style>' +
'<form>' +
' <label for="test"><input id="test" type="image" src="foo.jpg" name="test" title="bar"></label>' +
'</form>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo baz');
});
it('passes test 58', function() {
fixture.innerHTML =
'<label for="test">States:</label>' +
'<input type="password" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'States:');
});
it('passes test 59', function() {
fixture.innerHTML =
'<label for="test">States:</label>' +
'<input type="checkbox" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'States:');
});
it('passes test 60', function() {
fixture.innerHTML =
'<label for="test">States:</label>' + '<input type="radio" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'States:');
});
it('passes test 61', function() {
fixture.innerHTML =
'<label for="test">File:</label>' + '<input type="file" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'File:');
});
it('passes test 62', function() {
fixture.innerHTML =
'<label for="test">States:</label>' +
'<input type="image" id="test" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'States:');
});
it('passes test 63', function() {
fixture.innerHTML =
'<label for="test">' +
' foo' +
' <input type="text" value="David"/>' +
'</label>' +
'<input type="password" id="test" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo David');
});
it('passes test 64', function() {
fixture.innerHTML =
'<label for="test">' +
' foo' +
' <input type="text" value="David"/>' +
'</label>' +
'<input type="checkbox" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo David');
});
it('passes test 65', function() {
fixture.innerHTML =
'<label for="test">' +
' foo' +
' <input type="text" value="David"/>' +
'</label>' +
'<input type="radio" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo David');
});
it('passes test 66', function() {
fixture.innerHTML =
'<label for="test">' +
' foo' +
' <input type="text" value="David"/>' +
'</label>' +
'<input type="file" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo David');
});
it('passes test 67', function() {
fixture.innerHTML =
'<label for="test">' +
' foo' +
' <input type="text" value="David"/>' +
'</label>' +
'<input type="image" id="test" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo David');
});
it('passes test 68', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <select name="member" size="1" role="menu" tabindex="0">' +
' <option role="menuitem" value="beard" selected="true">clown</option>' +
' <option role="menuitem" value="scuba">rich</option>' +
' </select>' +
'</label> ' +
'<input type="password" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 69', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <select name="member" size="1" role="menu" tabindex="0">' +
' <option role="menuitem" value="beard" selected="true">clown</option>' +
' <option role="menuitem" value="scuba">rich</option>' +
' </select>' +
'</label> ' +
'<input type="checkbox" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 70', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <select name="member" size="1" role="menu" tabindex="0">' +
' <option role="menuitem" value="beard" selected="true">clown</option>' +
' <option role="menuitem" value="scuba">rich</option>' +
' </select>' +
'</label> ' +
'<input type="radio" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 71', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <select name="member" size="1" role="menu" tabindex="0">' +
' <option role="menuitem" value="beard" selected="true">clown</option>' +
' <option role="menuitem" value="scuba">rich</option>' +
' </select>' +
'</label> ' +
'<input type="file" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 72', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <select name="member" size="1" role="menu" tabindex="0">' +
' <option role="menuitem" value="beard" selected="true">clown</option>' +
' <option role="menuitem" value="scuba">rich</option>' +
' </select>' +
'</label> ' +
'<input type="image" id="test" src="foo.jpg"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
ariaValuetext('passes test 73', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuetext="Monday" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="password" value="baz" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy Monday');
});
ariaValuetext('passes test 74', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuetext="Monday"' +
' aria-valuemin="1" aria-valuemax="7" aria-valuenow="4"></div>' +
'</label>' +
'<input type="checkbox" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy Monday');
});
ariaValuetext('passes test 75', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuetext="Monday"' +
' aria-valuemin="1" aria-valuemax="7" aria-valuenow="4"></div>' +
'</label>' +
'<input type="radio" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy Monday');
});
ariaValuetext('passes test 76', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuetext="Monday"' +
' aria-valuemin="1" aria-valuemax="7" aria-valuenow="4"></div>' +
'</label>' +
'<input type="file" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy Monday');
});
ariaValuetext('passes test 77', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuetext="Monday" aria-valuemin="1"' +
' aria-valuemax="7" aria-valuenow="4"></div>' +
'</label>' +
'<input type="image" src="foo.jpg" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy Monday');
});
it('passes test 78', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="password" id="test" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy 4');
});
it('passes test 79', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="checkbox" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy 4');
});
it('passes test 80', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="radio" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy 4');
});
it('passes test 81', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="file" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy 4');
});
it('passes test 82', function() {
fixture.innerHTML =
'<label for="test">' +
' crazy' +
' <div role="spinbutton" aria-valuemin="1" aria-valuemax="7" aria-valuenow="4">' +
' </div>' +
'</label>' +
'<input type="image" src="foo.jpg" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy 4');
});
it('passes test 83', function() {
fixture.innerHTML =
'<input type="password" id="test" title="crazy" value="baz"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 84', function() {
fixture.innerHTML = '<input type="checkbox" id="test" title="crazy"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 85', function() {
fixture.innerHTML = '<input type="radio" id="test" title="crazy"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 86', function() {
fixture.innerHTML = '<input type="file" id="test" title="crazy"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
it('passes test 87', function() {
fixture.innerHTML =
'<input type="image" src="foo.jpg" id="test" title="crazy"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'crazy');
});
pseudoText('passes test 88', function() {
fixture.innerHTML =
'<style>' +
' label:before { content:"fancy "; }' +
'</style>' +
'<label for="test">fruit</label>' +
'<input type="password" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 89', function() {
fixture.innerHTML =
'<style>' +
' label:before { content:"fancy "; }' +
'</style>' +
'<label for="test">fruit</label>' +
'<input type="checkbox" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 90', function() {
fixture.innerHTML =
'<style>' +
' label:before { content:"fancy "; }' +
'</style>' +
'<label for="test">fruit</label>' +
'<input type="radio" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 91', function() {
fixture.innerHTML =
'<style>' +
' label:before { content:"fancy "; }' +
'</style>' +
'<label for="test">fruit</label>' +
'<input type="file" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 92', function() {
fixture.innerHTML =
'<style>' +
' label:before { content:"fancy "; }' +
'</style>' +
'<label for="test">fruit</label>' +
'<input type="image" src="foo.jpg" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 93', function() {
fixture.innerHTML =
'<style>' +
' label:after { content:" fruit"; }' +
'</style>' +
'<label for="test">fancy</label>' +
'<input type="password" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 94', function() {
fixture.innerHTML =
'<style>' +
' label:after { content:" fruit"; }' +
'</style>' +
'<label for="test">fancy</label>' +
'<input type="checkbox" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 95', function() {
fixture.innerHTML =
'<style>' +
' label:after { content:" fruit"; }' +
'</style>' +
'<label for="test">fancy</label>' +
'<input type="radio" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 96', function() {
fixture.innerHTML =
'<style>' +
' label:after { content:" fruit"; }' +
'</style>' +
'<label for="test">fancy</label>' +
'<input type="file" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
pseudoText('passes test 97', function() {
fixture.innerHTML =
'<style>' +
' label:after { content:" fruit"; }' +
'</style>' +
'<label for="test">fancy</label>' +
'<input type="image" src="foo.jpg" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'fancy fruit');
});
it('passes test 98', function() {
fixture.innerHTML =
'<input type="checkbox" id="test" />' +
'<label for="test">Flash the screen ' +
' <div role="combobox">' +
' <div role="textbox"></div>' +
' <ul role="listbox" style="list-style-type: none;">' +
' <li role="option" aria-selected="true">1</li>' +
' <li role="option">2</li>' +
' <li role="option">3</li>' +
' </ul>' +
' </div>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
// Chrome 72: "Flash the screen 1 times"
// Safari 12.0: "Flash the screen 1 times"
// Firefox 62: "Flash the screen 1 times"
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 99', function() {
fixture.innerHTML =
'<input type="checkbox" id="test" />' +
'<label for="test">Flash the screen ' +
' <span role="menu">' +
' <span role="menuitem" aria-selected="true">1</span>' +
' <span role="menuitem" hidden>2</span>' +
' <span role="menuitem" hidden>3</span>' +
' </span>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen times.');
});
it('passes test 100', function() {
fixture.innerHTML =
'<input type="checkbox" id="test" />' +
'<label for="test">Flash the screen ' +
' <select size="1">' +
' <option selected="selected">1</option>' +
' <option>2</option>' +
' <option>3</option>' +
' </select>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 101', function() {
fixture.innerHTML =
'<input type="checkbox" id="test" />' +
'<label for="test">foo <input role="slider" type="range" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz ' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 102', function() {
fixture.innerHTML =
'<input type="checkbox" id="test" />' +
'<label for="test">foo <input role="spinbutton" type="number" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 103', function() {
fixture.innerHTML = '<input type="checkbox" id="test" title="foo" />';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 104', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">Flash the screen ' +
' <div role="combobox">' +
' <div role="textbox"></div>' +
' <ul role="listbox" style="list-style-type: none;">' +
' <li role="option" aria-selected="true">1 </li>' +
' <li role="option">2 </li>' +
' <li role="option">3 </li>' +
' </ul>' +
' </div>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 105', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">Flash the screen ' +
' <span role="menu">' +
' <span role="menuitem" aria-selected="true">1</span>' +
' <span role="menuitem" hidden>2</span>' +
' <span role="menuitem" hidden>3</span>' +
' </span>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen times.');
});
it('passes test 106', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">Flash the screen ' +
' <select size="1">' +
' <option selected="selected">1</option>' +
' <option>2</option>' +
' <option>3</option>' +
' </select>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 107', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">foo <input role="slider" type="range" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 108', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">foo <input role="spinbutton" type="number" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 109', function() {
fixture.innerHTML = '<input type="file" id="test" title="foo" />';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 110', function() {
fixture.innerHTML =
'<input type="image" src="test.png" id="test" title="foo" />';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 111', function() {
fixture.innerHTML =
'<input type="password" id="test" />' +
'<label for="test">Flash the screen ' +
' <div role="combobox">' +
' <div role="textbox"></div>' +
' <ul role="listbox" style="list-style-type: none;">' +
' <li role="option" aria-selected="true">1</li>' +
' <li role="option">2</li>' +
' <li role="option">3</li>' +
' </ul>' +
' </div>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 112', function() {
fixture.innerHTML =
'<input type="password" id="test" />' +
'<label for="test">Flash the screen ' +
' <span role="menu">' +
' <span role="menuitem" aria-selected="true">1</span>' +
' <span role="menuitem" hidden>2</span>' +
' <span role="menuitem" hidden>3</span>' +
' </span>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen times.');
});
it('passes test 113', function() {
fixture.innerHTML =
'<input type="password" id="test" />' +
'<label for="test">Flash the screen ' +
' <select size="1">' +
' <option selected="selected">1</option>' +
' <option>2</option>' +
' <option>3</option>' +
' </select>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 114', function() {
fixture.innerHTML =
'<input type="password" id="test" />' +
'<label for="test">foo <input role="slider" type="range" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 115', function() {
fixture.innerHTML =
'<input type="password" id="test" />' +
'<label for="test">foo <input role="spinbutton" type="number" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 116', function() {
fixture.innerHTML = '<input type="password" id="test" title="foo" />';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 117', function() {
fixture.innerHTML =
'<input type="radio" id="test" />' +
'<label for="test">Flash the screen ' +
' <div role="combobox">' +
' <div role="textbox"></div>' +
' <ul role="listbox" style="list-style-type: none;">' +
' <li role="option" aria-selected="true">1</li>' +
' <li role="option">2</li>' +
' <li role="option">3</li>' +
' </ul>' +
' </div>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 118', function() {
fixture.innerHTML =
'<input type="radio" id="test" />' +
'<label for="test">Flash the screen ' +
' <span role="menu">' +
' <span role="menuitem" aria-selected="true">1</span>' +
' <span role="menuitem" hidden>2</span>' +
' <span role="menuitem" hidden>3</span>' +
' </span>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen times.');
});
it('passes test 119', function() {
fixture.innerHTML =
'<input type="radio" id="test" />' +
'<label for="test">Flash the screen ' +
' <select size="1">' +
' <option selected="selected">1</option>' +
' <option>2</option>' +
' <option>3</option>' +
' </select>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
// Chrome 72: "Flash the screen 1 times"
// Firefox 62: "Flash the screen 1 times"
// Safari 12.0: "Flash the screen 1 times"
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 120', function() {
fixture.innerHTML =
'<input type="radio" id="test" />' +
'<label for="test">foo <input role="slider" type="range" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
// Chrome 70: Foo 5 baz
// Firefox 62: Foo 5 baz
// Safari 12.0: Foo 5 baz
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 121', function() {
fixture.innerHTML =
'<input type="radio" id="test" />' +
'<label for="test">foo <input role="spinbutton" type="number" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 122', function() {
fixture.innerHTML = '<input type="radio" id="test" title="foo" />';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
it('passes test 123', function() {
fixture.innerHTML =
'<input type="text" id="test" />' +
'<label for="test">Flash the screen ' +
' <div role="combobox">' +
' <div role="textbox"></div>' +
' <ul role="listbox" style="list-style-type: none;">' +
' <li role="option" aria-selected="true">1</li>' +
' <li role="option">2</li>' +
' <li role="option">3</li>' +
' </ul>' +
' </div>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
// Chrome 72: "Flash the screen 1 times."
// Firefox 62: "Flash the screen 1 times."
// Safari 12.0: "Flash the screen 1 times."
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 124', function() {
fixture.innerHTML =
'<input type="text" id="test" />' +
'<label for="test">Flash the screen ' +
' <span role="menu">' +
' <span role="menuitem" aria-selected="true">1</span>' +
' <span role="menuitem" hidden>2</span>' +
' <span role="menuitem" hidden>3</span>' +
' </span>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen times.');
});
it('passes test 125', function() {
fixture.innerHTML =
'<input type="text" id="test" />' +
'<label for="test">Flash the screen ' +
' <select size="1">' +
' <option selected="selected">1</option>' +
' <option>2</option>' +
' <option>3</option>' +
' </select>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
// Chrome 72: Flash the screen 1 times
// Firefox 62: Flash the screen 1 times
// Safari 12.0: Flash the screen 1 times
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 126', function() {
fixture.innerHTML =
'<input type="text" id="test" />' +
'<label for="test">foo <input role="slider" type="range" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 127', function() {
fixture.innerHTML =
'<input type="text" id="test" />' +
'<label for="test">foo <input role="spinbutton" type="number" value="5" min="1" max="10" aria-valuenow="5" aria-valuemin="1" aria-valuemax="10"> baz' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo 5 baz');
});
it('passes test 128', function() {
fixture.innerHTML = '<input type="text" id="test" title="foo" />';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'foo');
});
// Skip from 128 - 138 as those are name description cases
it('passes test 139', function() {
fixture.innerHTML =
'<style>' +
' .hidden { display: none; }' +
'</style>' +
'<div id="test" role="link" tabindex="0">' +
' <span aria-hidden="true"><i> Hello, </i></span>' +
' <span>My</span> name is' +
' <div><img src="file.jpg" title="Bryan" alt="" role="presentation" /></div>' +
' <span role="presentation" aria-label="Eli">' +
' <span aria-label="Garaventa">Zambino</span>' +
' </span>' +
' <span>the weird.</span>' +
' (QED)' +
' <span class="hidden"><i><b>and don\'t you forget it.</b></i></span>' +
' <table>' +
' <tr>' +
' <td>Where</td>' +
' <td style="visibility:hidden;"><div>in</div></td>' +
' <td><div style="display:none;">the world</div></td>' +
' <td>are my marbles?</td>' +
' </tr>' +
' </table>' +
'</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
// Chrome 72: "My name is Eli the weird. (QED) Where are my marbles?"
// Safari 12.0: "My name is Eli the weird. (QED) Where are my marbles?"
// Firefox 62: "Hello, My name is Eli the weird. (QED)"
assert.equal(
accessibleText(target),
'My name is Eli the weird. (QED) Where are my marbles?'
);
});
it('passes test 140', function() {
fixture.innerHTML =
'<style>' +
' .hidden { display: none; }' +
'</style>' +
'<input id="test" type="text" aria-labelledby="lblId" />' +
'<div id="lblId" >' +
' <span aria-hidden="true"><i> Hello, </i></span>' +
' <span>My</span> name is' +
' <div><img src="file.jpg" title="Bryan" alt="" role="presentation" /></div>' +
' <span role="presentation" aria-label="Eli">' +
' <span aria-label="Garaventa">Zambino</span>' +
' </span>' +
' <span>the weird.</span>' +
' (QED)' +
' <span class="hidden"><i><b>and don\'t you forget it.</b></i></span>' +
' <table>' +
' <tr>' +
' <td>Where</td>' +
' <td style="visibility:hidden;"><div>in</div></td>' +
' <td><div style="display:none;">the world</div></td>' +
' <td>are my marbles?</td>' +
' </tr>' +
' </table>' +
'</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(
accessibleText(target),
'My name is Eli the weird. (QED) Where are my marbles?'
);
});
// Disabling this, axe has a buggy implicitName computation
// shouldn't be a big deal
xit('passes test 141', function() {
fixture.innerHTML =
'<style>' +
' .hidden { display: none; }' +
'</style>' +
'<input type="text" id="test" />' +
'<label for="test" id="label">' +
' <span aria-hidden="true"><i> Hello, </i></span>' +
' <span>My</span> name is' +
' <div><img src="file.jpg" title="Bryan" alt="" role="presentation" /></div>' +
' <span role="presentation" aria-label="Eli">' +
' <span aria-label="Garaventa">Zambino</span>' +
' </span>' +
' <span>the weird.</span>' +
' (QED)' +
' <span class="hidden"><i><b>and don\'t you forget it.</b></i></span>' +
' <table>' +
' <tr>' +
' <td>Where</td>' +
' <td style="visibility:hidden;"><div>in</div></td>' +
' <td><div style="display:none;">the world</div></td>' +
' <td>are my marbles?</td>' +
' </tr>' +
' </table>' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(
accessibleText(target),
'My name is Eli the weird. (QED) Where are my marbles?'
);
});
it('passes test 143', function() {
fixture.innerHTML =
' <style>' +
' .hidden { display: none; }' +
' </style>' +
' <div>' +
' <input id="test" type="text" aria-labelledby="lbl1 lbl2" aria-describedby="descId" />' +
' <span>' +
' <span aria-hidden="true" id="lbl1">Important</span>' +
' <span class="hidden">' +
' <span aria-hidden="true" id="lbl2">stuff</span>' +
' </span>' +
' </span>' +
' </div>' +
' <div class="hidden">' +
' <div id="descId">' +
' <span aria-hidden="true"><i> Hello, </i></span>' +
' <span>My</span> name is' +
' <div><img src="file.jpg" title="Bryan" alt="" role="presentation" /></div>' +
' <span role="presentation" aria-label="Eli">' +
' <span aria-label="Garaventa">Zambino</span>' +
' </span>' +
' <span>the weird.</span>' +
' (QED)' +
' <span class="hidden"><i><b>and don\'t you forget it.</b></i></span>' +
' <table>' +
' <tr>' +
' <td>Where</td>' +
' <td style="visibility:hidden;"><div>in</div></td>' +
' <td><div style="display:none;">the world</div></td>' +
' <td>are my marbles?</td>' +
' </tr>' +
' </table>' +
' </div>' +
' </div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Important stuff');
});
it('passes test 144', function() {
fixture.innerHTML =
'<input id="test" role="combobox" type="text" title="Choose your language" value="English">';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Choose your language');
});
it('passes test 145', function() {
fixture.innerHTML =
'<div id="test" role="combobox" tabindex="0" title="Choose your language.">' +
' <span> English </span>' +
'</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Choose your language.');
});
it('passes test 147', function() {
fixture.innerHTML =
'<input type="checkbox" id="test" />' +
'<label for="test">Flash the screen ' +
' <ul role="listbox" style="list-style-type: none;">' +
' <li role="option" aria-selected="true">1</li>' +
' <li role="option">2</li>' +
' <li role="option">3</li>' +
' </ul>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
pseudoText('passes test 148', function() {
fixture.innerHTML =
'<input type="checkbox" id="test" />' +
'<label for="test">Flash the screen ' +
' <div role="textbox" contenteditable>1</div>' +
' times.' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 149', function() {
fixture.innerHTML =
'<label for="test">a test</label>' +
'<label>This <input type="checkbox" id="test" /> is</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'a test This is');
});
it('passes test 150', function() {
fixture.innerHTML =
'<label>This <input type="checkbox" id="test" /> is</label>' +
'<label for="test">a test</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'This is a test');
});
it('passes test 151', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">W<i>h<b>a</b></i>t<br>is<div>your<div>name<b>?</b></div></div></label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'What is your name?');
});
pseudoText('passes test 152', function() {
fixture.innerHTML =
'<style>' +
' label:before { content: "This"; display: block; }' +
' label:after { content: "."; }' +
'</style>' +
'<label for="test">is a test</label>' +
'<input type="text" id="test"/>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'This is a test.');
});
it('passes test 153', function() {
fixture.innerHTML =
'<style>' +
' .hidden { display: none; }' +
'</style>' +
'<input type="file" id="test" />' +
'<label for="test">' +
' <span class="hidden">1</span><span>2</span>' +
' <span style="visibility: hidden;">3</span><span>4</span>' +
' <span hidden>5</span><span>6</span>' +
' <span aria-hidden="true">7</span><span>8</span>' +
' <span aria-hidden="false" class="hidden">9</span><span>10</span>' +
'</label>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), '2 4 6 8 10');
});
it('passes test 154', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">Flash <span aria-owns="id1">the screen</span> times.</label>' +
'<div>' +
' <div id="id1" role="combobox" aria-owns="id2">' +
' <div role="textbox"></div>' +
' </div>' +
'</div>' +
'<div>' +
' <ul id="id2" role="listbox" style="list-style-type: none;">' +
' <li role="option" >1 </li>' +
' <li role="option" aria-selected="true">2 </li>' +
' <li role="option">3 </li>' +
' </ul>' +
'</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 2 times.');
});
it('passes test 155', function() {
fixture.innerHTML =
'<input type="file" id="test" />' +
'<label for="test">Flash <span aria-owns="id1">the screen</span> times.</label>' +
'<div id="id1">' +
' <div role="combobox">' +
' <div role="textbox"></div>' +
' <ul role="listbox" style="list-style-type: none;">' +
' <li role="option" aria-selected="true">1 </li>' +
' <li role="option">2 </li>' +
' <li role="option">3 </li>' +
' </ul>' +
' </div>' +
'</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Flash the screen 1 times.');
});
it('passes test 156', function() {
fixture.innerHTML =
'<style>' +
' .hidden { display: none; }' +
'</style>' +
'<div id="test" role="link" tabindex="0">' +
' <span aria-hidden="true"><i> Hello, </i></span>' +
' <span>My</span> name is' +
' <div><img src="file.jpg" title="Bryan" alt="" role="presentation" /></div>' +
' <span role="presentation" aria-label="Eli"><span aria-label="Garaventa">Zambino</span></span>' +
' <span>the weird.</span>' +
' (QED)' +
' <span class="hidden"><i><b>and don\'t you forget it.</b></i></span>' +
'</div>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'My name is Eli the weird. (QED)');
});
it('passes test 158', function() {
fixture.innerHTML =
'<a id="test" href="#" aria-label="California"' +
' title="San Francisco">United States</a>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'California');
});
it('passes test 159', function() {
fixture.innerHTML =
'<h2 id="test">' +
'Country of origin:' +
'<input role="combobox" type="text" title="Choose your country." value="United States">' +
'</h2>';
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#test');
assert.equal(accessibleText(target), 'Country of origin: United States');
});
/**
// In case anyone even wants it, here's the script used to generate these test cases
function getTestCase(content, index = 0) {
const regex = new RegExp('if given\n([^]*)\nthen the accessible name of the element with id of "(.*)" is "(.*)"')
const out = content.match(regex)
if (!out || out.length !== 4) {
return;
}
const [, html, id, expected] = out;
const strings = html.split(/\n/g).map(
line => `'${line.substr(2)}'`
).join(' +\n ') + ';'
return `
it('passes test ${index + 1}', function () {
fixture.innerHTML = ${strings}
axe.testUtils.flatTreeSetup(fixture);
var target = fixture.querySelector('#${id}');
assert.equal(accessibleText(target), '${expected}');
});`
}*/
});
});
| 1 | 16,045 | Are we marking what the browser's accessibility tree says or what screen readers with those browsers say? | dequelabs-axe-core | js |
@@ -156,6 +156,10 @@ func (s *stream) Close() error {
return nil
}
+func (s *stream) FullClose() error {
+ return s.Close()
+}
+
type record struct {
b []byte
c int | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package streamtest
import (
"context"
"errors"
"io"
"sync"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
ErrRecordsNotFound = errors.New("records not found")
ErrStreamNotSupported = errors.New("stream not supported")
)
type Recorder struct {
records map[string][]*Record
recordsMu sync.Mutex
protocols []p2p.ProtocolSpec
middlewares []p2p.HandlerMiddleware
}
func WithProtocols(protocols ...p2p.ProtocolSpec) Option {
return optionFunc(func(r *Recorder) {
r.protocols = append(r.protocols, protocols...)
})
}
func WithMiddlewares(middlewares ...p2p.HandlerMiddleware) Option {
return optionFunc(func(r *Recorder) {
r.middlewares = append(r.middlewares, middlewares...)
})
}
func New(opts ...Option) *Recorder {
r := &Recorder{
records: make(map[string][]*Record),
}
for _, o := range opts {
o.apply(r)
}
return r
}
func (r *Recorder) NewStream(_ context.Context, addr swarm.Address, protocolName, protocolVersion, streamName string) (p2p.Stream, error) {
recordIn := newRecord()
recordOut := newRecord()
streamOut := newStream(recordIn, recordOut)
streamIn := newStream(recordOut, recordIn)
var handler p2p.HandlerFunc
for _, p := range r.protocols {
if p.Name == protocolName && p.Version == protocolVersion {
for _, s := range p.StreamSpecs {
if s.Name == streamName {
handler = s.Handler
}
}
}
}
if handler == nil {
return nil, ErrStreamNotSupported
}
for i := len(r.middlewares) - 1; i >= 0; i-- {
handler = r.middlewares[i](handler)
}
record := &Record{in: recordIn, out: recordOut}
go func() {
err := handler(p2p.Peer{Address: addr}, streamIn)
if err != nil && err != io.EOF {
record.setErr(err)
}
}()
id := addr.String() + p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName)
r.recordsMu.Lock()
defer r.recordsMu.Unlock()
r.records[id] = append(r.records[id], record)
return streamOut, nil
}
func (r *Recorder) Records(addr swarm.Address, protocolName, protocolVersio, streamName string) ([]*Record, error) {
id := addr.String() + p2p.NewSwarmStreamName(protocolName, protocolVersio, streamName)
r.recordsMu.Lock()
defer r.recordsMu.Unlock()
records, ok := r.records[id]
if !ok {
return nil, ErrRecordsNotFound
}
return records, nil
}
type Record struct {
in *record
out *record
err error
errMu sync.Mutex
}
func (r *Record) In() []byte {
return r.in.bytes()
}
func (r *Record) Out() []byte {
return r.out.bytes()
}
func (r *Record) Err() error {
r.errMu.Lock()
defer r.errMu.Unlock()
return r.err
}
func (r *Record) setErr(err error) {
r.errMu.Lock()
defer r.errMu.Unlock()
r.err = err
}
type stream struct {
in io.WriteCloser
out io.ReadCloser
}
func newStream(in io.WriteCloser, out io.ReadCloser) *stream {
return &stream{in: in, out: out}
}
func (s *stream) Read(p []byte) (int, error) {
return s.out.Read(p)
}
func (s *stream) Write(p []byte) (int, error) {
return s.in.Write(p)
}
func (s *stream) Close() error {
if err := s.in.Close(); err != nil {
return err
}
if err := s.out.Close(); err != nil {
return err
}
return nil
}
type record struct {
b []byte
c int
closed bool
cond *sync.Cond
}
func newRecord() *record {
return &record{
cond: sync.NewCond(new(sync.Mutex)),
}
}
func (r *record) Read(p []byte) (n int, err error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
for r.c == len(r.b) && !r.closed {
r.cond.Wait()
}
end := r.c + len(p)
if end > len(r.b) {
end = len(r.b)
}
n = copy(p, r.b[r.c:end])
r.c += n
if r.closed {
err = io.EOF
}
return n, err
}
func (r *record) Write(p []byte) (int, error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
defer r.cond.Signal()
r.b = append(r.b, p...)
return len(p), nil
}
func (r *record) Close() error {
r.cond.L.Lock()
defer r.cond.L.Unlock()
defer r.cond.Broadcast()
r.closed = true
return nil
}
func (r *record) bytes() []byte {
r.cond.L.Lock()
defer r.cond.L.Unlock()
return r.b
}
type Option interface {
apply(*Recorder)
}
type optionFunc func(*Recorder)
func (f optionFunc) apply(r *Recorder) { f(r) }
| 1 | 8,843 | This method should behave as intended, to block until it reads an EOF from the other side. | ethersphere-bee | go |
@@ -1924,7 +1924,7 @@ func GitFsck() {
repo := bean.(*Repository)
repoPath := repo.RepoPath()
if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil {
- desc := fmt.Sprintf("Fail to health check repository '%s': %v", repoPath, err)
+ desc := fmt.Sprintf("Health check failed on repository '%s': %v", repoPath, err)
log.Warn(desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(3, "CreateRepositoryNotice: %v", err) | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"bytes"
"fmt"
"image"
_ "image/jpeg"
"image/png"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/Unknwon/cae/zip"
"github.com/Unknwon/com"
"github.com/go-xorm/xorm"
"github.com/mcuadros/go-version"
"github.com/nfnt/resize"
log "gopkg.in/clog.v1"
"gopkg.in/ini.v1"
git "github.com/gogs/git-module"
api "github.com/gogs/go-gogs-client"
"github.com/gogs/gogs/models/errors"
"github.com/gogs/gogs/pkg/avatar"
"github.com/gogs/gogs/pkg/bindata"
"github.com/gogs/gogs/pkg/markup"
"github.com/gogs/gogs/pkg/process"
"github.com/gogs/gogs/pkg/setting"
"github.com/gogs/gogs/pkg/sync"
)
// REPO_AVATAR_URL_PREFIX is used to identify a URL is to access repository avatar.
const REPO_AVATAR_URL_PREFIX = "repo-avatars"
var repoWorkingPool = sync.NewExclusivePool()
var (
Gitignores, Licenses, Readmes, LabelTemplates []string
// Maximum items per page in forks, watchers and stars of a repo
ItemsPerPage = 40
)
func LoadRepoConfig() {
// Load .gitignore and license files and readme templates.
types := []string{"gitignore", "license", "readme", "label"}
typeFiles := make([][]string, 4)
for i, t := range types {
files, err := bindata.AssetDir("conf/" + t)
if err != nil {
log.Fatal(4, "Fail to get %s files: %v", t, err)
}
customPath := path.Join(setting.CustomPath, "conf", t)
if com.IsDir(customPath) {
customFiles, err := com.StatDir(customPath)
if err != nil {
log.Fatal(4, "Fail to get custom %s files: %v", t, err)
}
for _, f := range customFiles {
if !com.IsSliceContainsStr(files, f) {
files = append(files, f)
}
}
}
typeFiles[i] = files
}
Gitignores = typeFiles[0]
Licenses = typeFiles[1]
Readmes = typeFiles[2]
LabelTemplates = typeFiles[3]
sort.Strings(Gitignores)
sort.Strings(Licenses)
sort.Strings(Readmes)
sort.Strings(LabelTemplates)
// Filter out invalid names and promote preferred licenses.
sortedLicenses := make([]string, 0, len(Licenses))
for _, name := range setting.Repository.PreferredLicenses {
if com.IsSliceContainsStr(Licenses, name) {
sortedLicenses = append(sortedLicenses, name)
}
}
for _, name := range Licenses {
if !com.IsSliceContainsStr(setting.Repository.PreferredLicenses, name) {
sortedLicenses = append(sortedLicenses, name)
}
}
Licenses = sortedLicenses
}
func NewRepoContext() {
zip.Verbose = false
// Check Git installation.
if _, err := exec.LookPath("git"); err != nil {
log.Fatal(4, "Fail to test 'git' command: %v (forgotten install?)", err)
}
// Check Git version.
var err error
setting.Git.Version, err = git.BinVersion()
if err != nil {
log.Fatal(4, "Fail to get Git version: %v", err)
}
log.Info("Git Version: %s", setting.Git.Version)
if version.Compare("1.7.1", setting.Git.Version, ">") {
log.Fatal(4, "Gogs requires Git version greater or equal to 1.7.1")
}
git.HookDir = "custom_hooks"
git.HookSampleDir = "hooks"
git.DefaultCommitsPageSize = setting.UI.User.CommitsPagingNum
// Git requires setting user.name and user.email in order to commit changes.
for configKey, defaultValue := range map[string]string{"user.name": "Gogs", "user.email": "[email protected]"} {
if stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" {
// ExitError indicates this config is not set
if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" {
if _, stderr, gerr := process.Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil {
log.Fatal(4, "Fail to set git %s(%s): %s", configKey, gerr, stderr)
}
log.Info("Git config %s set to %s", configKey, defaultValue)
} else {
log.Fatal(4, "Fail to get git %s(%s): %s", configKey, err, stderr)
}
}
}
// Set git some configurations.
if _, stderr, err := process.Exec("NewRepoContext(git config --global core.quotepath false)",
"git", "config", "--global", "core.quotepath", "false"); err != nil {
log.Fatal(4, "Fail to execute 'git config --global core.quotepath false': %s", stderr)
}
RemoveAllWithNotice("Clean up repository temporary data", filepath.Join(setting.AppDataPath, "tmp"))
}
// Repository contains information of a repository.
type Repository struct {
ID int64
OwnerID int64 `xorm:"UNIQUE(s)"`
Owner *User `xorm:"-" json:"-"`
LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"INDEX NOT NULL"`
Description string `xorm:"VARCHAR(512)"`
Website string
DefaultBranch string
Size int64 `xorm:"NOT NULL DEFAULT 0"`
UseCustomAvatar bool
// Counters
NumWatches int
NumStars int
NumForks int
NumIssues int
NumClosedIssues int
NumOpenIssues int `xorm:"-" json:"-"`
NumPulls int
NumClosedPulls int
NumOpenPulls int `xorm:"-" json:"-"`
NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumOpenMilestones int `xorm:"-" json:"-"`
NumTags int `xorm:"-" json:"-"`
IsPrivate bool
IsBare bool
IsMirror bool
*Mirror `xorm:"-" json:"-"`
// Advanced settings
EnableWiki bool `xorm:"NOT NULL DEFAULT true"`
AllowPublicWiki bool
EnableExternalWiki bool
ExternalWikiURL string
EnableIssues bool `xorm:"NOT NULL DEFAULT true"`
AllowPublicIssues bool
EnableExternalTracker bool
ExternalTrackerURL string
ExternalTrackerFormat string
ExternalTrackerStyle string
ExternalMetas map[string]string `xorm:"-" json:"-"`
EnablePulls bool `xorm:"NOT NULL DEFAULT true"`
PullsIgnoreWhitespace bool `xorm:"NOT NULL DEFAULT false"`
PullsAllowRebase bool `xorm:"NOT NULL DEFAULT false"`
IsFork bool `xorm:"NOT NULL DEFAULT false"`
ForkID int64
BaseRepo *Repository `xorm:"-" json:"-"`
Created time.Time `xorm:"-" json:"-"`
CreatedUnix int64
Updated time.Time `xorm:"-" json:"-"`
UpdatedUnix int64
}
func (repo *Repository) BeforeInsert() {
repo.CreatedUnix = time.Now().Unix()
repo.UpdatedUnix = repo.CreatedUnix
}
func (repo *Repository) BeforeUpdate() {
repo.UpdatedUnix = time.Now().Unix()
}
func (repo *Repository) AfterSet(colName string, _ xorm.Cell) {
switch colName {
case "default_branch":
// FIXME: use models migration to solve all at once.
if len(repo.DefaultBranch) == 0 {
repo.DefaultBranch = "master"
}
case "num_closed_issues":
repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
case "num_closed_pulls":
repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
case "num_closed_milestones":
repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
case "external_tracker_style":
if len(repo.ExternalTrackerStyle) == 0 {
repo.ExternalTrackerStyle = markup.ISSUE_NAME_STYLE_NUMERIC
}
case "created_unix":
repo.Created = time.Unix(repo.CreatedUnix, 0).Local()
case "updated_unix":
repo.Updated = time.Unix(repo.UpdatedUnix, 0)
}
}
func (repo *Repository) loadAttributes(e Engine) (err error) {
if repo.Owner == nil {
repo.Owner, err = getUserByID(e, repo.OwnerID)
if err != nil {
return fmt.Errorf("getUserByID [%d]: %v", repo.OwnerID, err)
}
}
if repo.IsFork && repo.BaseRepo == nil {
repo.BaseRepo, err = getRepositoryByID(e, repo.ForkID)
if err != nil {
if errors.IsRepoNotExist(err) {
repo.IsFork = false
repo.ForkID = 0
} else {
return fmt.Errorf("getRepositoryByID [%d]: %v", repo.ForkID, err)
}
}
}
return nil
}
func (repo *Repository) LoadAttributes() error {
return repo.loadAttributes(x)
}
// IsPartialPublic returns true if repository is public or allow public access to wiki or issues.
func (repo *Repository) IsPartialPublic() bool {
return !repo.IsPrivate || repo.AllowPublicWiki || repo.AllowPublicIssues
}
func (repo *Repository) CanGuestViewWiki() bool {
return repo.EnableWiki && !repo.EnableExternalWiki && repo.AllowPublicWiki
}
func (repo *Repository) CanGuestViewIssues() bool {
return repo.EnableIssues && !repo.EnableExternalTracker && repo.AllowPublicIssues
}
// MustOwner always returns a valid *User object to avoid conceptually impossible error handling.
// It creates a fake object that contains error deftail when error occurs.
func (repo *Repository) MustOwner() *User {
return repo.mustOwner(x)
}
func (repo *Repository) FullName() string {
return repo.MustOwner().Name + "/" + repo.Name
}
func (repo *Repository) HTMLURL() string {
return setting.AppURL + repo.FullName()
}
// CustomAvatarPath returns repository custom avatar file path.
func (repo *Repository) CustomAvatarPath() string {
return filepath.Join(setting.RepositoryAvatarUploadPath, com.ToStr(repo.ID))
}
// RelAvatarLink returns relative avatar link to the site domain,
// which includes app sub-url as prefix.
// Since Gravatar support not needed here - just check for image path.
func (repo *Repository) RelAvatarLink() string {
defaultImgUrl := ""
if !com.IsExist(repo.CustomAvatarPath()) {
return defaultImgUrl
}
return fmt.Sprintf("%s/%s/%d", setting.AppSubURL, REPO_AVATAR_URL_PREFIX, repo.ID)
}
// AvatarLink returns repository avatar absolute link.
func (repo *Repository) AvatarLink() string {
link := repo.RelAvatarLink()
if link[0] == '/' && link[1] != '/' {
return setting.AppURL + strings.TrimPrefix(link, setting.AppSubURL)[1:]
}
return link
}
// UploadAvatar saves custom avatar for repository.
// FIXME: split uploads to different subdirs in case we have massive number of repositories.
func (repo *Repository) UploadAvatar(data []byte) error {
img, _, err := image.Decode(bytes.NewReader(data))
if err != nil {
return fmt.Errorf("decode image: %v", err)
}
os.MkdirAll(setting.RepositoryAvatarUploadPath, os.ModePerm)
fw, err := os.Create(repo.CustomAvatarPath())
if err != nil {
return fmt.Errorf("create custom avatar directory: %v", err)
}
defer fw.Close()
m := resize.Resize(avatar.AVATAR_SIZE, avatar.AVATAR_SIZE, img, resize.NearestNeighbor)
if err = png.Encode(fw, m); err != nil {
return fmt.Errorf("encode image: %v", err)
}
return nil
}
// DeleteAvatar deletes the repository custom avatar.
func (repo *Repository) DeleteAvatar() error {
log.Trace("DeleteAvatar [%d]: %s", repo.ID, repo.CustomAvatarPath())
if err := os.Remove(repo.CustomAvatarPath()); err != nil {
return err
}
repo.UseCustomAvatar = false
return UpdateRepository(repo, false)
}
// This method assumes following fields have been assigned with valid values:
// Required - BaseRepo (if fork)
// Arguments that are allowed to be nil: permission
func (repo *Repository) APIFormat(permission *api.Permission, user ...*User) *api.Repository {
cloneLink := repo.CloneLink()
apiRepo := &api.Repository{
ID: repo.ID,
Owner: repo.Owner.APIFormat(),
Name: repo.Name,
FullName: repo.FullName(),
Description: repo.Description,
Private: repo.IsPrivate,
Fork: repo.IsFork,
Empty: repo.IsBare,
Mirror: repo.IsMirror,
Size: repo.Size,
HTMLURL: repo.HTMLURL(),
SSHURL: cloneLink.SSH,
CloneURL: cloneLink.HTTPS,
Website: repo.Website,
Stars: repo.NumStars,
Forks: repo.NumForks,
Watchers: repo.NumWatches,
OpenIssues: repo.NumOpenIssues,
DefaultBranch: repo.DefaultBranch,
Created: repo.Created,
Updated: repo.Updated,
Permissions: permission,
// Reserved for go-gogs-client change
// AvatarUrl: repo.AvatarLink(),
}
if repo.IsFork {
p := &api.Permission{Pull: true}
if len(user) != 0 {
p.Admin = user[0].IsAdminOfRepo(repo)
p.Push = user[0].IsWriterOfRepo(repo)
}
apiRepo.Parent = repo.BaseRepo.APIFormat(p)
}
return apiRepo
}
func (repo *Repository) getOwner(e Engine) (err error) {
if repo.Owner != nil {
return nil
}
repo.Owner, err = getUserByID(e, repo.OwnerID)
return err
}
func (repo *Repository) GetOwner() error {
return repo.getOwner(x)
}
func (repo *Repository) mustOwner(e Engine) *User {
if err := repo.getOwner(e); err != nil {
return &User{
Name: "error",
FullName: err.Error(),
}
}
return repo.Owner
}
func (repo *Repository) UpdateSize() error {
countObject, err := git.GetRepoSize(repo.RepoPath())
if err != nil {
return fmt.Errorf("GetRepoSize: %v", err)
}
repo.Size = countObject.Size + countObject.SizePack
if _, err = x.Id(repo.ID).Cols("size").Update(repo); err != nil {
return fmt.Errorf("update size: %v", err)
}
return nil
}
// ComposeMetas composes a map of metas for rendering external issue tracker URL.
func (repo *Repository) ComposeMetas() map[string]string {
if !repo.EnableExternalTracker {
return nil
} else if repo.ExternalMetas == nil {
repo.ExternalMetas = map[string]string{
"format": repo.ExternalTrackerFormat,
"user": repo.MustOwner().Name,
"repo": repo.Name,
}
switch repo.ExternalTrackerStyle {
case markup.ISSUE_NAME_STYLE_ALPHANUMERIC:
repo.ExternalMetas["style"] = markup.ISSUE_NAME_STYLE_ALPHANUMERIC
default:
repo.ExternalMetas["style"] = markup.ISSUE_NAME_STYLE_NUMERIC
}
}
return repo.ExternalMetas
}
// DeleteWiki removes the actual and local copy of repository wiki.
func (repo *Repository) DeleteWiki() {
wikiPaths := []string{repo.WikiPath(), repo.LocalWikiPath()}
for _, wikiPath := range wikiPaths {
RemoveAllWithNotice("Delete repository wiki", wikiPath)
}
}
// getUsersWithAccesMode returns users that have at least given access mode to the repository.
func (repo *Repository) getUsersWithAccesMode(e Engine, mode AccessMode) (_ []*User, err error) {
if err = repo.getOwner(e); err != nil {
return nil, err
}
accesses := make([]*Access, 0, 10)
if err = e.Where("repo_id = ? AND mode >= ?", repo.ID, mode).Find(&accesses); err != nil {
return nil, err
}
// Leave a seat for owner itself to append later, but if owner is an organization
// and just waste 1 unit is cheaper than re-allocate memory once.
users := make([]*User, 0, len(accesses)+1)
if len(accesses) > 0 {
userIDs := make([]int64, len(accesses))
for i := 0; i < len(accesses); i++ {
userIDs[i] = accesses[i].UserID
}
if err = e.In("id", userIDs).Find(&users); err != nil {
return nil, err
}
}
if !repo.Owner.IsOrganization() {
users = append(users, repo.Owner)
}
return users, nil
}
// getAssignees returns a list of users who can be assigned to issues in this repository.
func (repo *Repository) getAssignees(e Engine) (_ []*User, err error) {
return repo.getUsersWithAccesMode(e, ACCESS_MODE_READ)
}
// GetAssignees returns all users that have read access and can be assigned to issues
// of the repository,
func (repo *Repository) GetAssignees() (_ []*User, err error) {
return repo.getAssignees(x)
}
// GetAssigneeByID returns the user that has write access of repository by given ID.
func (repo *Repository) GetAssigneeByID(userID int64) (*User, error) {
return GetAssigneeByID(repo, userID)
}
// GetWriters returns all users that have write access to the repository.
func (repo *Repository) GetWriters() (_ []*User, err error) {
return repo.getUsersWithAccesMode(x, ACCESS_MODE_WRITE)
}
// GetMilestoneByID returns the milestone belongs to repository by given ID.
func (repo *Repository) GetMilestoneByID(milestoneID int64) (*Milestone, error) {
return GetMilestoneByRepoID(repo.ID, milestoneID)
}
// IssueStats returns number of open and closed repository issues by given filter mode.
func (repo *Repository) IssueStats(userID int64, filterMode FilterMode, isPull bool) (int64, int64) {
return GetRepoIssueStats(repo.ID, userID, filterMode, isPull)
}
func (repo *Repository) GetMirror() (err error) {
repo.Mirror, err = GetMirrorByRepoID(repo.ID)
return err
}
func (repo *Repository) repoPath(e Engine) string {
return RepoPath(repo.mustOwner(e).Name, repo.Name)
}
func (repo *Repository) RepoPath() string {
return repo.repoPath(x)
}
func (repo *Repository) GitConfigPath() string {
return filepath.Join(repo.RepoPath(), "config")
}
func (repo *Repository) RelLink() string {
return "/" + repo.FullName()
}
func (repo *Repository) Link() string {
return setting.AppSubURL + "/" + repo.FullName()
}
func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string {
return fmt.Sprintf("%s/%s/compare/%s...%s", repo.MustOwner().Name, repo.Name, oldCommitID, newCommitID)
}
func (repo *Repository) HasAccess(userID int64) bool {
has, _ := HasAccess(userID, repo, ACCESS_MODE_READ)
return has
}
func (repo *Repository) IsOwnedBy(userID int64) bool {
return repo.OwnerID == userID
}
// CanBeForked returns true if repository meets the requirements of being forked.
func (repo *Repository) CanBeForked() bool {
return !repo.IsBare
}
// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
func (repo *Repository) CanEnablePulls() bool {
return !repo.IsMirror && !repo.IsBare
}
// AllowPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
func (repo *Repository) AllowsPulls() bool {
return repo.CanEnablePulls() && repo.EnablePulls
}
func (repo *Repository) IsBranchRequirePullRequest(name string) bool {
return IsBranchOfRepoRequirePullRequest(repo.ID, name)
}
// CanEnableEditor returns true if repository meets the requirements of web editor.
func (repo *Repository) CanEnableEditor() bool {
return !repo.IsMirror
}
// FIXME: should have a mutex to prevent producing same index for two issues that are created
// closely enough.
func (repo *Repository) NextIssueIndex() int64 {
return int64(repo.NumIssues+repo.NumPulls) + 1
}
func (repo *Repository) LocalCopyPath() string {
return path.Join(setting.AppDataPath, "tmp/local-repo", com.ToStr(repo.ID))
}
// UpdateLocalCopy fetches latest changes of given branch from repoPath to localPath.
// It creates a new clone if local copy does not exist, but does not checks out to a
// specific branch if the local copy belongs to a wiki.
// For existing local copy, it checks out to target branch by default, and safe to
// assume subsequent operations are against target branch when caller has confidence
// about no race condition.
func UpdateLocalCopyBranch(repoPath, localPath, branch string, isWiki bool) (err error) {
if !com.IsExist(localPath) {
// Checkout to a specific branch fails when wiki is an empty repository.
if isWiki {
branch = ""
}
if err = git.Clone(repoPath, localPath, git.CloneRepoOptions{
Timeout: time.Duration(setting.Git.Timeout.Clone) * time.Second,
Branch: branch,
}); err != nil {
return fmt.Errorf("git clone %s: %v", branch, err)
}
} else {
if err = git.Fetch(localPath, git.FetchRemoteOptions{
Prune: true,
}); err != nil {
return fmt.Errorf("git fetch: %v", err)
}
if err = git.Checkout(localPath, git.CheckoutOptions{
Branch: branch,
}); err != nil {
return fmt.Errorf("git checkout %s: %v", branch, err)
}
// Reset to align with remote in case of force push.
if err = git.ResetHEAD(localPath, true, "origin/"+branch); err != nil {
return fmt.Errorf("git reset --hard origin/%s: %v", branch, err)
}
}
return nil
}
// UpdateLocalCopyBranch makes sure local copy of repository in given branch is up-to-date.
func (repo *Repository) UpdateLocalCopyBranch(branch string) error {
return UpdateLocalCopyBranch(repo.RepoPath(), repo.LocalCopyPath(), branch, false)
}
// PatchPath returns corresponding patch file path of repository by given issue ID.
func (repo *Repository) PatchPath(index int64) (string, error) {
if err := repo.GetOwner(); err != nil {
return "", err
}
return filepath.Join(RepoPath(repo.Owner.Name, repo.Name), "pulls", com.ToStr(index)+".patch"), nil
}
// SavePatch saves patch data to corresponding location by given issue ID.
func (repo *Repository) SavePatch(index int64, patch []byte) error {
patchPath, err := repo.PatchPath(index)
if err != nil {
return fmt.Errorf("PatchPath: %v", err)
}
os.MkdirAll(filepath.Dir(patchPath), os.ModePerm)
if err = ioutil.WriteFile(patchPath, patch, 0644); err != nil {
return fmt.Errorf("WriteFile: %v", err)
}
return nil
}
func isRepositoryExist(e Engine, u *User, repoName string) (bool, error) {
has, err := e.Get(&Repository{
OwnerID: u.ID,
LowerName: strings.ToLower(repoName),
})
return has && com.IsDir(RepoPath(u.Name, repoName)), err
}
// IsRepositoryExist returns true if the repository with given name under user has already existed.
func IsRepositoryExist(u *User, repoName string) (bool, error) {
return isRepositoryExist(x, u, repoName)
}
// CloneLink represents different types of clone URLs of repository.
type CloneLink struct {
SSH string
HTTPS string
Git string
}
// ComposeHTTPSCloneURL returns HTTPS clone URL based on given owner and repository name.
func ComposeHTTPSCloneURL(owner, repo string) string {
return fmt.Sprintf("%s%s/%s.git", setting.AppURL, owner, repo)
}
func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
repoName := repo.Name
if isWiki {
repoName += ".wiki"
}
repo.Owner = repo.MustOwner()
cl := new(CloneLink)
if setting.SSH.Port != 22 {
cl.SSH = fmt.Sprintf("ssh://%s@%s:%d/%s/%s.git", setting.RunUser, setting.SSH.Domain, setting.SSH.Port, repo.Owner.Name, repoName)
} else {
cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", setting.RunUser, setting.SSH.Domain, repo.Owner.Name, repoName)
}
cl.HTTPS = ComposeHTTPSCloneURL(repo.Owner.Name, repoName)
return cl
}
// CloneLink returns clone URLs of repository.
func (repo *Repository) CloneLink() (cl *CloneLink) {
return repo.cloneLink(false)
}
type MigrateRepoOptions struct {
Name string
Description string
IsPrivate bool
IsMirror bool
RemoteAddr string
}
/*
GitHub, GitLab, Gogs: *.wiki.git
BitBucket: *.git/wiki
*/
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
// wikiRemoteURL returns accessible repository URL for wiki if exists.
// Otherwise, it returns an empty string.
func wikiRemoteURL(remote string) string {
remote = strings.TrimSuffix(remote, ".git")
for _, suffix := range commonWikiURLSuffixes {
wikiURL := remote + suffix
if git.IsRepoURLAccessible(git.NetworkOptions{
URL: wikiURL,
}) {
return wikiURL
}
}
return ""
}
// MigrateRepository migrates a existing repository from other project hosting.
func MigrateRepository(doer, owner *User, opts MigrateRepoOptions) (*Repository, error) {
repo, err := CreateRepository(doer, owner, CreateRepoOptions{
Name: opts.Name,
Description: opts.Description,
IsPrivate: opts.IsPrivate,
IsMirror: opts.IsMirror,
})
if err != nil {
return nil, err
}
repoPath := RepoPath(owner.Name, opts.Name)
wikiPath := WikiPath(owner.Name, opts.Name)
if owner.IsOrganization() {
t, err := owner.GetOwnerTeam()
if err != nil {
return nil, err
}
repo.NumWatches = t.NumMembers
} else {
repo.NumWatches = 1
}
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
RemoveAllWithNotice("Repository path erase before creation", repoPath)
if err = git.Clone(opts.RemoteAddr, repoPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
}); err != nil {
return repo, fmt.Errorf("Clone: %v", err)
}
wikiRemotePath := wikiRemoteURL(opts.RemoteAddr)
if len(wikiRemotePath) > 0 {
RemoveAllWithNotice("Repository wiki path erase before creation", wikiPath)
if err = git.Clone(wikiRemotePath, wikiPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
}); err != nil {
log.Trace("Fail to clone wiki: %v", err)
RemoveAllWithNotice("Delete repository wiki for initialization failure", wikiPath)
}
}
// Check if repository is empty.
_, stderr, err := com.ExecCmdDir(repoPath, "git", "log", "-1")
if err != nil {
if strings.Contains(stderr, "fatal: bad default revision 'HEAD'") {
repo.IsBare = true
} else {
return repo, fmt.Errorf("check bare: %v - %s", err, stderr)
}
}
if !repo.IsBare {
// Try to get HEAD branch and set it as default branch.
gitRepo, err := git.OpenRepository(repoPath)
if err != nil {
return repo, fmt.Errorf("OpenRepository: %v", err)
}
headBranch, err := gitRepo.GetHEADBranch()
if err != nil {
return repo, fmt.Errorf("GetHEADBranch: %v", err)
}
if headBranch != nil {
repo.DefaultBranch = headBranch.Name
}
if err = repo.UpdateSize(); err != nil {
log.Error(2, "UpdateSize [repo_id: %d]: %v", repo.ID, err)
}
}
if opts.IsMirror {
if _, err = x.InsertOne(&Mirror{
RepoID: repo.ID,
Interval: setting.Mirror.DefaultInterval,
EnablePrune: true,
NextSync: time.Now().Add(time.Duration(setting.Mirror.DefaultInterval) * time.Hour),
}); err != nil {
return repo, fmt.Errorf("InsertOne: %v", err)
}
repo.IsMirror = true
return repo, UpdateRepository(repo, false)
}
return CleanUpMigrateInfo(repo)
}
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
// This also removes possible user credentials.
func cleanUpMigrateGitConfig(configPath string) error {
cfg, err := ini.Load(configPath)
if err != nil {
return fmt.Errorf("open config file: %v", err)
}
cfg.DeleteSection("remote \"origin\"")
if err = cfg.SaveToIndent(configPath, "\t"); err != nil {
return fmt.Errorf("save config file: %v", err)
}
return nil
}
var hooksTpls = map[string]string{
"pre-receive": "#!/usr/bin/env %s\n\"%s\" hook --config='%s' pre-receive\n",
"update": "#!/usr/bin/env %s\n\"%s\" hook --config='%s' update $1 $2 $3\n",
"post-receive": "#!/usr/bin/env %s\n\"%s\" hook --config='%s' post-receive\n",
}
func createDelegateHooks(repoPath string) (err error) {
for _, name := range git.HookNames {
hookPath := filepath.Join(repoPath, "hooks", name)
if err = ioutil.WriteFile(hookPath,
[]byte(fmt.Sprintf(hooksTpls[name], setting.ScriptType, setting.AppPath, setting.CustomConf)),
os.ModePerm); err != nil {
return fmt.Errorf("create delegate hook '%s': %v", hookPath, err)
}
}
return nil
}
// Finish migrating repository and/or wiki with things that don't need to be done for mirrors.
func CleanUpMigrateInfo(repo *Repository) (*Repository, error) {
repoPath := repo.RepoPath()
if err := createDelegateHooks(repoPath); err != nil {
return repo, fmt.Errorf("createDelegateHooks: %v", err)
}
if repo.HasWiki() {
if err := createDelegateHooks(repo.WikiPath()); err != nil {
return repo, fmt.Errorf("createDelegateHooks.(wiki): %v", err)
}
}
if err := cleanUpMigrateGitConfig(repo.GitConfigPath()); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig: %v", err)
}
if repo.HasWiki() {
if err := cleanUpMigrateGitConfig(path.Join(repo.WikiPath(), "config")); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig.(wiki): %v", err)
}
}
return repo, UpdateRepository(repo, false)
}
// initRepoCommit temporarily changes with work directory.
func initRepoCommit(tmpPath string, sig *git.Signature) (err error) {
var stderr string
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git add): %s", tmpPath),
"git", "add", "--all"); err != nil {
return fmt.Errorf("git add: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git commit): %s", tmpPath),
"git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email),
"-m", "Initial commit"); err != nil {
return fmt.Errorf("git commit: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git push): %s", tmpPath),
"git", "push", "origin", "master"); err != nil {
return fmt.Errorf("git push: %s", stderr)
}
return nil
}
type CreateRepoOptions struct {
Name string
Description string
Gitignores string
License string
Readme string
IsPrivate bool
IsMirror bool
AutoInit bool
}
func getRepoInitFile(tp, name string) ([]byte, error) {
relPath := path.Join("conf", tp, strings.TrimLeft(path.Clean("/"+name), "/"))
// Use custom file when available.
customPath := path.Join(setting.CustomPath, relPath)
if com.IsFile(customPath) {
return ioutil.ReadFile(customPath)
}
return bindata.Asset(relPath)
}
func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error {
// Clone to temprory path and do the init commit.
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir)
if err != nil {
return fmt.Errorf("git clone: %v - %s", err, stderr)
}
// README
data, err := getRepoInitFile("readme", opts.Readme)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.Readme, err)
}
cloneLink := repo.CloneLink()
match := map[string]string{
"Name": repo.Name,
"Description": repo.Description,
"CloneURL.SSH": cloneLink.SSH,
"CloneURL.HTTPS": cloneLink.HTTPS,
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "README.md"),
[]byte(com.Expand(string(data), match)), 0644); err != nil {
return fmt.Errorf("write README.md: %v", err)
}
// .gitignore
if len(opts.Gitignores) > 0 {
var buf bytes.Buffer
names := strings.Split(opts.Gitignores, ",")
for _, name := range names {
data, err = getRepoInitFile("gitignore", name)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", name, err)
}
buf.WriteString("# ---> " + name + "\n")
buf.Write(data)
buf.WriteString("\n")
}
if buf.Len() > 0 {
if err = ioutil.WriteFile(filepath.Join(tmpDir, ".gitignore"), buf.Bytes(), 0644); err != nil {
return fmt.Errorf("write .gitignore: %v", err)
}
}
}
// LICENSE
if len(opts.License) > 0 {
data, err = getRepoInitFile("license", opts.License)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.License, err)
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "LICENSE"), data, 0644); err != nil {
return fmt.Errorf("write LICENSE: %v", err)
}
}
return nil
}
// initRepository performs initial commit with chosen setup files on behave of doer.
func initRepository(e Engine, repoPath string, doer *User, repo *Repository, opts CreateRepoOptions) (err error) {
// Somehow the directory could exist.
if com.IsExist(repoPath) {
return fmt.Errorf("initRepository: path already exists: %s", repoPath)
}
// Init bare new repository.
if err = git.InitRepository(repoPath, true); err != nil {
return fmt.Errorf("InitRepository: %v", err)
} else if err = createDelegateHooks(repoPath); err != nil {
return fmt.Errorf("createDelegateHooks: %v", err)
}
tmpDir := filepath.Join(os.TempDir(), "gogs-"+repo.Name+"-"+com.ToStr(time.Now().Nanosecond()))
// Initialize repository according to user's choice.
if opts.AutoInit {
os.MkdirAll(tmpDir, os.ModePerm)
defer RemoveAllWithNotice("Delete repository for auto-initialization", tmpDir)
if err = prepareRepoCommit(repo, tmpDir, repoPath, opts); err != nil {
return fmt.Errorf("prepareRepoCommit: %v", err)
}
// Apply changes and commit.
if err = initRepoCommit(tmpDir, doer.NewGitSig()); err != nil {
return fmt.Errorf("initRepoCommit: %v", err)
}
}
// Re-fetch the repository from database before updating it (else it would
// override changes that were done earlier with sql)
if repo, err = getRepositoryByID(e, repo.ID); err != nil {
return fmt.Errorf("getRepositoryByID: %v", err)
}
if !opts.AutoInit {
repo.IsBare = true
}
repo.DefaultBranch = "master"
if err = updateRepository(e, repo, false); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return nil
}
var (
reservedRepoNames = []string{".", ".."}
reservedRepoPatterns = []string{"*.git", "*.wiki"}
)
// IsUsableRepoName return an error if given name is a reserved name or pattern.
func IsUsableRepoName(name string) error {
return isUsableName(reservedRepoNames, reservedRepoPatterns, name)
}
func createRepository(e *xorm.Session, doer, owner *User, repo *Repository) (err error) {
if err = IsUsableRepoName(repo.Name); err != nil {
return err
}
has, err := isRepositoryExist(e, owner, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{owner.Name, repo.Name}
}
if _, err = e.Insert(repo); err != nil {
return err
}
owner.NumRepos++
// Remember visibility preference.
owner.LastRepoVisibility = repo.IsPrivate
if err = updateUser(e, owner); err != nil {
return fmt.Errorf("updateUser: %v", err)
}
// Give access to all members in owner team.
if owner.IsOrganization() {
t, err := owner.getOwnerTeam(e)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(e, repo); err != nil {
return fmt.Errorf("addRepository: %v", err)
}
} else {
// Organization automatically called this in addRepository method.
if err = repo.recalculateAccesses(e); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
if err = watchRepo(e, owner.ID, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = newRepoAction(e, doer, owner, repo); err != nil {
return fmt.Errorf("newRepoAction: %v", err)
}
return repo.loadAttributes(e)
}
// CreateRepository creates a repository for given user or organization.
func CreateRepository(doer, owner *User, opts CreateRepoOptions) (_ *Repository, err error) {
if !owner.CanCreateRepo() {
return nil, errors.ReachLimitOfRepo{owner.RepoCreationNum()}
}
repo := &Repository{
OwnerID: owner.ID,
Owner: owner,
Name: opts.Name,
LowerName: strings.ToLower(opts.Name),
Description: opts.Description,
IsPrivate: opts.IsPrivate,
EnableWiki: true,
EnableIssues: true,
EnablePulls: true,
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, doer, owner, repo); err != nil {
return nil, err
}
// No need for init mirror.
if !opts.IsMirror {
repoPath := RepoPath(owner.Name, repo.Name)
if err = initRepository(sess, repoPath, doer, repo, opts); err != nil {
RemoveAllWithNotice("Delete repository for initialization failure", repoPath)
return nil, fmt.Errorf("initRepository: %v", err)
}
_, stderr, err := process.ExecDir(-1,
repoPath, fmt.Sprintf("CreateRepository 'git update-server-info': %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, fmt.Errorf("CreateRepository 'git update-server-info': %s", stderr)
}
}
return repo, sess.Commit()
}
func countRepositories(userID int64, private bool) int64 {
sess := x.Where("id > 0")
if userID > 0 {
sess.And("owner_id = ?", userID)
}
if !private {
sess.And("is_private=?", false)
}
count, err := sess.Count(new(Repository))
if err != nil {
log.Error(4, "countRepositories: %v", err)
}
return count
}
// CountRepositories returns number of repositories.
// Argument private only takes effect when it is false,
// set it true to count all repositories.
func CountRepositories(private bool) int64 {
return countRepositories(-1, private)
}
// CountUserRepositories returns number of repositories user owns.
// Argument private only takes effect when it is false,
// set it true to count all repositories.
func CountUserRepositories(userID int64, private bool) int64 {
return countRepositories(userID, private)
}
func Repositories(page, pageSize int) (_ []*Repository, err error) {
repos := make([]*Repository, 0, pageSize)
return repos, x.Limit(pageSize, (page-1)*pageSize).Asc("id").Find(&repos)
}
// RepositoriesWithUsers returns number of repos in given page.
func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) {
repos, err := Repositories(page, pageSize)
if err != nil {
return nil, fmt.Errorf("Repositories: %v", err)
}
for i := range repos {
if err = repos[i].GetOwner(); err != nil {
return nil, err
}
}
return repos, nil
}
// FilterRepositoryWithIssues selects repositories that are using interal issue tracker
// and has disabled external tracker from given set.
// It returns nil if result set is empty.
func FilterRepositoryWithIssues(repoIDs []int64) ([]int64, error) {
if len(repoIDs) == 0 {
return nil, nil
}
repos := make([]*Repository, 0, len(repoIDs))
if err := x.Where("enable_issues=?", true).
And("enable_external_tracker=?", false).
In("id", repoIDs).
Cols("id").
Find(&repos); err != nil {
return nil, fmt.Errorf("filter valid repositories %v: %v", repoIDs, err)
}
if len(repos) == 0 {
return nil, nil
}
repoIDs = make([]int64, len(repos))
for i := range repos {
repoIDs[i] = repos[i].ID
}
return repoIDs, nil
}
// RepoPath returns repository path by given user and repository name.
func RepoPath(userName, repoName string) string {
return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
}
// TransferOwnership transfers all corresponding setting from old user to new one.
func TransferOwnership(doer *User, newOwnerName string, repo *Repository) error {
newOwner, err := GetUserByName(newOwnerName)
if err != nil {
return fmt.Errorf("get new owner '%s': %v", newOwnerName, err)
}
// Check if new owner has repository with same name.
has, err := IsRepositoryExist(newOwner, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{newOwnerName, repo.Name}
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return fmt.Errorf("sess.Begin: %v", err)
}
owner := repo.Owner
// Note: we have to set value here to make sure recalculate accesses is based on
// new owner.
repo.OwnerID = newOwner.ID
repo.Owner = newOwner
// Update repository.
if _, err := sess.ID(repo.ID).Update(repo); err != nil {
return fmt.Errorf("update owner: %v", err)
}
// Remove redundant collaborators.
collaborators, err := repo.getCollaborators(sess)
if err != nil {
return fmt.Errorf("getCollaborators: %v", err)
}
// Dummy object.
collaboration := &Collaboration{RepoID: repo.ID}
for _, c := range collaborators {
collaboration.UserID = c.ID
if c.ID == newOwner.ID || newOwner.IsOrgMember(c.ID) {
if _, err = sess.Delete(collaboration); err != nil {
return fmt.Errorf("remove collaborator '%d': %v", c.ID, err)
}
}
}
// Remove old team-repository relations.
if owner.IsOrganization() {
if err = owner.getTeams(sess); err != nil {
return fmt.Errorf("getTeams: %v", err)
}
for _, t := range owner.Teams {
if !t.hasRepository(sess, repo.ID) {
continue
}
t.NumRepos--
if _, err := sess.ID(t.ID).AllCols().Update(t); err != nil {
return fmt.Errorf("decrease team repository count '%d': %v", t.ID, err)
}
}
if err = owner.removeOrgRepo(sess, repo.ID); err != nil {
return fmt.Errorf("removeOrgRepo: %v", err)
}
}
if newOwner.IsOrganization() {
t, err := newOwner.getOwnerTeam(sess)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(sess, repo); err != nil {
return fmt.Errorf("add to owner team: %v", err)
}
} else {
// Organization called this in addRepository method.
if err = repo.recalculateAccesses(sess); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
// Update repository count.
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos+1 WHERE id=?", newOwner.ID); err != nil {
return fmt.Errorf("increase new owner repository count: %v", err)
} else if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", owner.ID); err != nil {
return fmt.Errorf("decrease old owner repository count: %v", err)
}
if err = watchRepo(sess, newOwner.ID, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = transferRepoAction(sess, doer, owner, repo); err != nil {
return fmt.Errorf("transferRepoAction: %v", err)
}
// Rename remote repository to new path and delete local copy.
os.MkdirAll(UserPath(newOwner.Name), os.ModePerm)
if err = os.Rename(RepoPath(owner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
RemoveAllWithNotice("Delete repository local copy", repo.LocalCopyPath())
// Rename remote wiki repository to new path and delete local copy.
wikiPath := WikiPath(owner.Name, repo.Name)
if com.IsExist(wikiPath) {
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
if err = os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
}
return sess.Commit()
}
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
func ChangeRepositoryName(u *User, oldRepoName, newRepoName string) (err error) {
oldRepoName = strings.ToLower(oldRepoName)
newRepoName = strings.ToLower(newRepoName)
if err = IsUsableRepoName(newRepoName); err != nil {
return err
}
has, err := IsRepositoryExist(u, newRepoName)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, newRepoName}
}
repo, err := GetRepositoryByName(u.ID, oldRepoName)
if err != nil {
return fmt.Errorf("GetRepositoryByName: %v", err)
}
// Change repository directory name
if err = os.Rename(repo.RepoPath(), RepoPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
wikiPath := repo.WikiPath()
if com.IsExist(wikiPath) {
if err = os.Rename(wikiPath, WikiPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
}
RemoveAllWithNotice("Delete repository local copy", repo.LocalCopyPath())
return nil
}
func getRepositoriesByForkID(e Engine, forkID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, e.Where("fork_id=?", forkID).Find(&repos)
}
// GetRepositoriesByForkID returns all repositories with given fork ID.
func GetRepositoriesByForkID(forkID int64) ([]*Repository, error) {
return getRepositoriesByForkID(x, forkID)
}
func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) {
repo.LowerName = strings.ToLower(repo.Name)
if len(repo.Description) > 512 {
repo.Description = repo.Description[:512]
}
if len(repo.Website) > 255 {
repo.Website = repo.Website[:255]
}
if _, err = e.ID(repo.ID).AllCols().Update(repo); err != nil {
return fmt.Errorf("update: %v", err)
}
if visibilityChanged {
if err = repo.getOwner(e); err != nil {
return fmt.Errorf("getOwner: %v", err)
}
if repo.Owner.IsOrganization() {
// Organization repository need to recalculate access table when visivility is changed
if err = repo.recalculateTeamAccesses(e, 0); err != nil {
return fmt.Errorf("recalculateTeamAccesses: %v", err)
}
}
// Create/Remove git-daemon-export-ok for git-daemon
daemonExportFile := path.Join(repo.RepoPath(), "git-daemon-export-ok")
if repo.IsPrivate && com.IsExist(daemonExportFile) {
if err = os.Remove(daemonExportFile); err != nil {
log.Error(4, "Failed to remove %s: %v", daemonExportFile, err)
}
} else if !repo.IsPrivate && !com.IsExist(daemonExportFile) {
if f, err := os.Create(daemonExportFile); err != nil {
log.Error(4, "Failed to create %s: %v", daemonExportFile, err)
} else {
f.Close()
}
}
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err)
}
for i := range forkRepos {
forkRepos[i].IsPrivate = repo.IsPrivate
if err = updateRepository(e, forkRepos[i], true); err != nil {
return fmt.Errorf("updateRepository[%d]: %v", forkRepos[i].ID, err)
}
}
// Change visibility of generated actions
if _, err = e.Where("repo_id = ?", repo.ID).Cols("is_private").Update(&Action{IsPrivate: repo.IsPrivate}); err != nil {
return fmt.Errorf("change action visibility of repository: %v", err)
}
}
return nil
}
func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) {
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if err = updateRepository(x, repo, visibilityChanged); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return sess.Commit()
}
// DeleteRepository deletes a repository for a user or organization.
func DeleteRepository(uid, repoID int64) error {
repo := &Repository{ID: repoID, OwnerID: uid}
has, err := x.Get(repo)
if err != nil {
return err
} else if !has {
return errors.RepoNotExist{repoID, uid, ""}
}
// In case is a organization.
org, err := GetUserByID(uid)
if err != nil {
return err
}
if org.IsOrganization() {
if err = org.GetTeams(); err != nil {
return err
}
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return err
}
if org.IsOrganization() {
for _, t := range org.Teams {
if !t.hasRepository(sess, repoID) {
continue
} else if err = t.removeRepository(sess, repo, false); err != nil {
return err
}
}
}
if err = deleteBeans(sess,
&Repository{ID: repoID},
&Access{RepoID: repo.ID},
&Action{RepoID: repo.ID},
&Watch{RepoID: repoID},
&Star{RepoID: repoID},
&Mirror{RepoID: repoID},
&IssueUser{RepoID: repoID},
&Milestone{RepoID: repoID},
&Release{RepoID: repoID},
&Collaboration{RepoID: repoID},
&PullRequest{BaseRepoID: repoID},
&ProtectBranch{RepoID: repoID},
&ProtectBranchWhitelist{RepoID: repoID},
&Webhook{RepoID: repoID},
&HookTask{RepoID: repoID},
); err != nil {
return fmt.Errorf("deleteBeans: %v", err)
}
// Delete comments and attachments.
issues := make([]*Issue, 0, 25)
attachmentPaths := make([]string, 0, len(issues))
if err = sess.Where("repo_id=?", repoID).Find(&issues); err != nil {
return err
}
for i := range issues {
if _, err = sess.Delete(&Comment{IssueID: issues[i].ID}); err != nil {
return err
}
attachments := make([]*Attachment, 0, 5)
if err = sess.Where("issue_id=?", issues[i].ID).Find(&attachments); err != nil {
return err
}
for j := range attachments {
attachmentPaths = append(attachmentPaths, attachments[j].LocalPath())
}
if _, err = sess.Delete(&Attachment{IssueID: issues[i].ID}); err != nil {
return err
}
}
if _, err = sess.Delete(&Issue{RepoID: repoID}); err != nil {
return err
}
if repo.IsFork {
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil {
return fmt.Errorf("decrease fork count: %v", err)
}
}
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", uid); err != nil {
return err
}
if err = sess.Commit(); err != nil {
return fmt.Errorf("Commit: %v", err)
}
// Remove repository files.
repoPath := repo.RepoPath()
RemoveAllWithNotice("Delete repository files", repoPath)
repo.DeleteWiki()
// Remove attachment files.
for i := range attachmentPaths {
RemoveAllWithNotice("Delete attachment", attachmentPaths[i])
}
if repo.NumForks > 0 {
if _, err = x.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil {
log.Error(4, "reset 'fork_id' and 'is_fork': %v", err)
}
}
return nil
}
// GetRepositoryByRef returns a Repository specified by a GFM reference.
// See https://help.github.com/articles/writing-on-github#references for more information on the syntax.
func GetRepositoryByRef(ref string) (*Repository, error) {
n := strings.IndexByte(ref, byte('/'))
if n < 2 {
return nil, errors.InvalidRepoReference{ref}
}
userName, repoName := ref[:n], ref[n+1:]
user, err := GetUserByName(userName)
if err != nil {
return nil, err
}
return GetRepositoryByName(user.ID, repoName)
}
// GetRepositoryByName returns the repository by given name under user if exists.
func GetRepositoryByName(ownerID int64, name string) (*Repository, error) {
repo := &Repository{
OwnerID: ownerID,
LowerName: strings.ToLower(name),
}
has, err := x.Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, errors.RepoNotExist{0, ownerID, name}
}
return repo, repo.LoadAttributes()
}
func getRepositoryByID(e Engine, id int64) (*Repository, error) {
repo := new(Repository)
has, err := e.ID(id).Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, errors.RepoNotExist{id, 0, ""}
}
return repo, repo.loadAttributes(e)
}
// GetRepositoryByID returns the repository by given id if exists.
func GetRepositoryByID(id int64) (*Repository, error) {
return getRepositoryByID(x, id)
}
type UserRepoOptions struct {
UserID int64
Private bool
Page int
PageSize int
}
// GetUserRepositories returns a list of repositories of given user.
func GetUserRepositories(opts *UserRepoOptions) ([]*Repository, error) {
sess := x.Where("owner_id=?", opts.UserID).Desc("updated_unix")
if !opts.Private {
sess.And("is_private=?", false)
}
if opts.Page <= 0 {
opts.Page = 1
}
sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
repos := make([]*Repository, 0, opts.PageSize)
return repos, sess.Find(&repos)
}
// GetUserRepositories returns a list of mirror repositories of given user.
func GetUserMirrorRepositories(userID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, x.Where("owner_id = ?", userID).And("is_mirror = ?", true).Find(&repos)
}
// GetRecentUpdatedRepositories returns the list of repositories that are recently updated.
func GetRecentUpdatedRepositories(page, pageSize int) (repos []*Repository, err error) {
return repos, x.Limit(pageSize, (page-1)*pageSize).
Where("is_private=?", false).Limit(pageSize).Desc("updated_unix").Find(&repos)
}
// GetUserAndCollaborativeRepositories returns list of repositories the user owns and collaborates.
func GetUserAndCollaborativeRepositories(userID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
if err := x.Alias("repo").
Join("INNER", "collaboration", "collaboration.repo_id = repo.id").
Where("collaboration.user_id = ?", userID).
Find(&repos); err != nil {
return nil, fmt.Errorf("select collaborative repositories: %v", err)
}
ownRepos := make([]*Repository, 0, 10)
if err := x.Where("owner_id = ?", userID).Find(&ownRepos); err != nil {
return nil, fmt.Errorf("select own repositories: %v", err)
}
return append(repos, ownRepos...), nil
}
func getRepositoryCount(e Engine, u *User) (int64, error) {
return x.Count(&Repository{OwnerID: u.ID})
}
// GetRepositoryCount returns the total number of repositories of user.
func GetRepositoryCount(u *User) (int64, error) {
return getRepositoryCount(x, u)
}
type SearchRepoOptions struct {
Keyword string
OwnerID int64
UserID int64 // When set results will contain all public/private repositories user has access to
OrderBy string
Private bool // Include private repositories in results
Page int
PageSize int // Can be smaller than or equal to setting.ExplorePagingNum
}
// SearchRepositoryByName takes keyword and part of repository name to search,
// it returns results in given range and number of total results.
func SearchRepositoryByName(opts *SearchRepoOptions) (repos []*Repository, count int64, err error) {
if opts.Page <= 0 {
opts.Page = 1
}
repos = make([]*Repository, 0, opts.PageSize)
sess := x.Alias("repo")
// Attempt to find repositories that opts.UserID has access to,
// this does not include other people's private repositories even if opts.UserID is an admin.
if !opts.Private && opts.UserID > 0 {
sess.Join("LEFT", "access", "access.repo_id = repo.id").
Where("repo.owner_id = ? OR access.user_id = ? OR repo.is_private = ? OR (repo.is_private = ? AND (repo.allow_public_wiki = ? OR repo.allow_public_issues = ?))", opts.UserID, opts.UserID, false, true, true, true)
} else {
// Only return public repositories if opts.Private is not set
if !opts.Private {
sess.And("repo.is_private = ? OR (repo.is_private = ? AND (repo.allow_public_wiki = ? OR repo.allow_public_issues = ?))", false, true, true, true)
}
}
if len(opts.Keyword) > 0 {
sess.And("repo.lower_name LIKE ? OR repo.description LIKE ?", "%"+strings.ToLower(opts.Keyword)+"%", "%"+strings.ToLower(opts.Keyword)+"%")
}
if opts.OwnerID > 0 {
sess.And("repo.owner_id = ?", opts.OwnerID)
}
// We need all fields (repo.*) in final list but only ID (repo.id) is good enough for counting.
count, err = sess.Clone().Distinct("repo.id").Count(new(Repository))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
if len(opts.OrderBy) > 0 {
sess.OrderBy("repo." + opts.OrderBy)
}
return repos, count, sess.Distinct("repo.*").Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&repos)
}
func DeleteOldRepositoryArchives() {
if taskStatusTable.IsRunning(_CLEAN_OLD_ARCHIVES) {
return
}
taskStatusTable.Start(_CLEAN_OLD_ARCHIVES)
defer taskStatusTable.Stop(_CLEAN_OLD_ARCHIVES)
log.Trace("Doing: DeleteOldRepositoryArchives")
formats := []string{"zip", "targz"}
oldestTime := time.Now().Add(-setting.Cron.RepoArchiveCleanup.OlderThan)
if err := x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
basePath := filepath.Join(repo.RepoPath(), "archives")
for _, format := range formats {
dirPath := filepath.Join(basePath, format)
if !com.IsDir(dirPath) {
continue
}
dir, err := os.Open(dirPath)
if err != nil {
log.Error(3, "Fail to open directory '%s': %v", dirPath, err)
continue
}
fis, err := dir.Readdir(0)
dir.Close()
if err != nil {
log.Error(3, "Fail to read directory '%s': %v", dirPath, err)
continue
}
for _, fi := range fis {
if fi.IsDir() || fi.ModTime().After(oldestTime) {
continue
}
archivePath := filepath.Join(dirPath, fi.Name())
if err = os.Remove(archivePath); err != nil {
desc := fmt.Sprintf("Fail to health delete archive '%s': %v", archivePath, err)
log.Warn(desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(3, "CreateRepositoryNotice: %v", err)
}
}
}
}
return nil
}); err != nil {
log.Error(2, "DeleteOldRepositoryArchives: %v", err)
}
}
// DeleteRepositoryArchives deletes all repositories' archives.
func DeleteRepositoryArchives() error {
if taskStatusTable.IsRunning(_CLEAN_OLD_ARCHIVES) {
return nil
}
taskStatusTable.Start(_CLEAN_OLD_ARCHIVES)
defer taskStatusTable.Stop(_CLEAN_OLD_ARCHIVES)
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return os.RemoveAll(filepath.Join(repo.RepoPath(), "archives"))
})
}
func gatherMissingRepoRecords() ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
if err := x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if !com.IsDir(repo.RepoPath()) {
repos = append(repos, repo)
}
return nil
}); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil {
return nil, fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
return repos, nil
}
// DeleteMissingRepositories deletes all repository records that lost Git files.
func DeleteMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID)
if err := DeleteRepository(repo.OwnerID, repo.ID); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// ReinitMissingRepositories reinitializes all repository records that lost Git files.
func ReinitMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID)
if err := git.InitRepository(repo.RepoPath(), true); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// SyncRepositoryHooks rewrites all repositories' pre-receive, update and post-receive hooks
// to make sure the binary and custom conf path are up-to-date.
func SyncRepositoryHooks() error {
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if err := createDelegateHooks(repo.RepoPath()); err != nil {
return err
}
if repo.HasWiki() {
return createDelegateHooks(repo.WikiPath())
}
return nil
})
}
// Prevent duplicate running tasks.
var taskStatusTable = sync.NewStatusTable()
const (
_MIRROR_UPDATE = "mirror_update"
_GIT_FSCK = "git_fsck"
_CHECK_REPO_STATS = "check_repos_stats"
_CLEAN_OLD_ARCHIVES = "clean_old_archives"
)
// GitFsck calls 'git fsck' to check repository health.
func GitFsck() {
if taskStatusTable.IsRunning(_GIT_FSCK) {
return
}
taskStatusTable.Start(_GIT_FSCK)
defer taskStatusTable.Stop(_GIT_FSCK)
log.Trace("Doing: GitFsck")
if err := x.Where("id>0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
repoPath := repo.RepoPath()
if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil {
desc := fmt.Sprintf("Fail to health check repository '%s': %v", repoPath, err)
log.Warn(desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(3, "CreateRepositoryNotice: %v", err)
}
}
return nil
}); err != nil {
log.Error(2, "GitFsck: %v", err)
}
}
func GitGcRepos() error {
args := append([]string{"gc"}, setting.Git.GCArgs...)
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if err := repo.GetOwner(); err != nil {
return err
}
_, stderr, err := process.ExecDir(
time.Duration(setting.Git.Timeout.GC)*time.Second,
RepoPath(repo.Owner.Name, repo.Name), "Repository garbage collection",
"git", args...)
if err != nil {
return fmt.Errorf("%v: %v", err, stderr)
}
return nil
})
}
type repoChecker struct {
querySQL, correctSQL string
desc string
}
func repoStatsCheck(checker *repoChecker) {
results, err := x.Query(checker.querySQL)
if err != nil {
log.Error(2, "Select %s: %v", checker.desc, err)
return
}
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", checker.desc, id)
_, err = x.Exec(checker.correctSQL, id, id)
if err != nil {
log.Error(2, "Update %s[%d]: %v", checker.desc, id, err)
}
}
}
func CheckRepoStats() {
if taskStatusTable.IsRunning(_CHECK_REPO_STATS) {
return
}
taskStatusTable.Start(_CHECK_REPO_STATS)
defer taskStatusTable.Stop(_CHECK_REPO_STATS)
log.Trace("Doing: CheckRepoStats")
checkers := []*repoChecker{
// Repository.NumWatches
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=?) WHERE id=?",
"repository count 'num_watches'",
},
// Repository.NumStars
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?",
"repository count 'num_stars'",
},
// Label.NumIssues
{
"SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)",
"UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?",
"label count 'num_issues'",
},
// User.NumRepos
{
"SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)",
"UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?",
"user count 'num_repos'",
},
// Issue.NumComments
{
"SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)",
"UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?",
"issue count 'num_comments'",
},
}
for i := range checkers {
repoStatsCheck(checkers[i])
}
// ***** START: Repository.NumClosedIssues *****
desc := "repository count 'num_closed_issues'"
results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, false)
if err != nil {
log.Error(2, "Select %s: %v", desc, err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", desc, id)
_, err = x.Exec("UPDATE `repository` SET num_closed_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, false, id)
if err != nil {
log.Error(2, "Update %s[%d]: %v", desc, id, err)
}
}
}
// ***** END: Repository.NumClosedIssues *****
// FIXME: use checker when stop supporting old fork repo format.
// ***** START: Repository.NumForks *****
results, err = x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
if err != nil {
log.Error(2, "Select repository count 'num_forks': %v", err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating repository count 'num_forks': %d", id)
repo, err := GetRepositoryByID(id)
if err != nil {
log.Error(2, "GetRepositoryByID[%d]: %v", id, err)
continue
}
rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID)
if err != nil {
log.Error(2, "Select count of forks[%d]: %v", repo.ID, err)
continue
}
repo.NumForks = int(parseCountResult(rawResult))
if err = UpdateRepository(repo, false); err != nil {
log.Error(2, "UpdateRepository[%d]: %v", id, err)
continue
}
}
}
// ***** END: Repository.NumForks *****
}
type RepositoryList []*Repository
func (repos RepositoryList) loadAttributes(e Engine) error {
if len(repos) == 0 {
return nil
}
// Load owners
userSet := make(map[int64]*User)
for i := range repos {
userSet[repos[i].OwnerID] = nil
}
userIDs := make([]int64, 0, len(userSet))
for userID := range userSet {
userIDs = append(userIDs, userID)
}
users := make([]*User, 0, len(userIDs))
if err := e.Where("id > 0").In("id", userIDs).Find(&users); err != nil {
return fmt.Errorf("find users: %v", err)
}
for i := range users {
userSet[users[i].ID] = users[i]
}
for i := range repos {
repos[i].Owner = userSet[repos[i].OwnerID]
}
// Load base repositories
repoSet := make(map[int64]*Repository)
for i := range repos {
if repos[i].IsFork {
repoSet[repos[i].ForkID] = nil
}
}
baseIDs := make([]int64, 0, len(repoSet))
for baseID := range repoSet {
baseIDs = append(baseIDs, baseID)
}
baseRepos := make([]*Repository, 0, len(baseIDs))
if err := e.Where("id > 0").In("id", baseIDs).Find(&baseRepos); err != nil {
return fmt.Errorf("find base repositories: %v", err)
}
for i := range baseRepos {
repoSet[baseRepos[i].ID] = baseRepos[i]
}
for i := range repos {
if repos[i].IsFork {
repos[i].BaseRepo = repoSet[repos[i].ForkID]
}
}
return nil
}
func (repos RepositoryList) LoadAttributes() error {
return repos.loadAttributes(x)
}
type MirrorRepositoryList []*Repository
func (repos MirrorRepositoryList) loadAttributes(e Engine) error {
if len(repos) == 0 {
return nil
}
// Load mirrors.
repoIDs := make([]int64, 0, len(repos))
for i := range repos {
if !repos[i].IsMirror {
continue
}
repoIDs = append(repoIDs, repos[i].ID)
}
mirrors := make([]*Mirror, 0, len(repoIDs))
if err := e.Where("id > 0").In("repo_id", repoIDs).Find(&mirrors); err != nil {
return fmt.Errorf("find mirrors: %v", err)
}
set := make(map[int64]*Mirror)
for i := range mirrors {
set[mirrors[i].RepoID] = mirrors[i]
}
for i := range repos {
repos[i].Mirror = set[repos[i].ID]
}
return nil
}
func (repos MirrorRepositoryList) LoadAttributes() error {
return repos.loadAttributes(x)
}
// __ __ __ .__
// / \ / \_____ _/ |_ ____ | |__
// \ \/\/ /\__ \\ __\/ ___\| | \
// \ / / __ \| | \ \___| Y \
// \__/\ / (____ /__| \___ >___| /
// \/ \/ \/ \/
// Watch is connection request for receiving repository notification.
type Watch struct {
ID int64
UserID int64 `xorm:"UNIQUE(watch)"`
RepoID int64 `xorm:"UNIQUE(watch)"`
}
func isWatching(e Engine, userID, repoID int64) bool {
has, _ := e.Get(&Watch{0, userID, repoID})
return has
}
// IsWatching checks if user has watched given repository.
func IsWatching(userID, repoID int64) bool {
return isWatching(x, userID, repoID)
}
func watchRepo(e Engine, userID, repoID int64, watch bool) (err error) {
if watch {
if isWatching(e, userID, repoID) {
return nil
}
if _, err = e.Insert(&Watch{RepoID: repoID, UserID: userID}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?", repoID)
} else {
if !isWatching(e, userID, repoID) {
return nil
}
if _, err = e.Delete(&Watch{0, userID, repoID}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches - 1 WHERE id = ?", repoID)
}
return err
}
// Watch or unwatch repository.
func WatchRepo(userID, repoID int64, watch bool) (err error) {
return watchRepo(x, userID, repoID, watch)
}
func getWatchers(e Engine, repoID int64) ([]*Watch, error) {
watches := make([]*Watch, 0, 10)
return watches, e.Find(&watches, &Watch{RepoID: repoID})
}
// GetWatchers returns all watchers of given repository.
func GetWatchers(repoID int64) ([]*Watch, error) {
return getWatchers(x, repoID)
}
// Repository.GetWatchers returns range of users watching given repository.
func (repo *Repository) GetWatchers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("watch.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "watch", `"user".id=watch.user_id`)
} else {
sess = sess.Join("LEFT", "watch", "user.id=watch.user_id")
}
return users, sess.Find(&users)
}
func notifyWatchers(e Engine, act *Action) error {
// Add feeds for user self and all watchers.
watchers, err := getWatchers(e, act.RepoID)
if err != nil {
return fmt.Errorf("getWatchers: %v", err)
}
// Reset ID to reuse Action object
act.ID = 0
// Add feed for actioner.
act.UserID = act.ActUserID
if _, err = e.Insert(act); err != nil {
return fmt.Errorf("insert new action: %v", err)
}
for i := range watchers {
if act.ActUserID == watchers[i].UserID {
continue
}
act.ID = 0
act.UserID = watchers[i].UserID
if _, err = e.Insert(act); err != nil {
return fmt.Errorf("insert new action: %v", err)
}
}
return nil
}
// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(act *Action) error {
return notifyWatchers(x, act)
}
// _________ __
// / _____// |______ _______
// \_____ \\ __\__ \\_ __ \
// / \| | / __ \| | \/
// /_______ /|__| (____ /__|
// \/ \/
type Star struct {
ID int64
UID int64 `xorm:"UNIQUE(s)"`
RepoID int64 `xorm:"UNIQUE(s)"`
}
// Star or unstar repository.
func StarRepo(userID, repoID int64, star bool) (err error) {
if star {
if IsStaring(userID, repoID) {
return nil
}
if _, err = x.Insert(&Star{UID: userID, RepoID: repoID}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoID); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", userID)
} else {
if !IsStaring(userID, repoID) {
return nil
}
if _, err = x.Delete(&Star{0, userID, repoID}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoID); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", userID)
}
return err
}
// IsStaring checks if user has starred given repository.
func IsStaring(userID, repoID int64) bool {
has, _ := x.Get(&Star{0, userID, repoID})
return has
}
func (repo *Repository) GetStargazers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("star.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "star", `"user".id=star.uid`)
} else {
sess = sess.Join("LEFT", "star", "user.id=star.uid")
}
return users, sess.Find(&users)
}
// ___________ __
// \_ _____/__________| | __
// | __)/ _ \_ __ \ |/ /
// | \( <_> ) | \/ <
// \___ / \____/|__| |__|_ \
// \/ \/
// HasForkedRepo checks if given user has already forked a repository.
// When user has already forked, it returns true along with the repository.
func HasForkedRepo(ownerID, repoID int64) (*Repository, bool, error) {
repo := new(Repository)
has, err := x.Where("owner_id = ? AND fork_id = ?", ownerID, repoID).Get(repo)
if err != nil {
return nil, false, err
} else if !has {
return nil, false, nil
}
return repo, true, repo.LoadAttributes()
}
// ForkRepository creates a fork of target repository under another user domain.
func ForkRepository(doer, owner *User, baseRepo *Repository, name, desc string) (_ *Repository, err error) {
if !owner.CanCreateRepo() {
return nil, errors.ReachLimitOfRepo{owner.RepoCreationNum()}
}
repo := &Repository{
OwnerID: owner.ID,
Owner: owner,
Name: name,
LowerName: strings.ToLower(name),
Description: desc,
DefaultBranch: baseRepo.DefaultBranch,
IsPrivate: baseRepo.IsPrivate,
IsFork: true,
ForkID: baseRepo.ID,
}
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, doer, owner, repo); err != nil {
return nil, err
} else if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", baseRepo.ID); err != nil {
return nil, err
}
repoPath := repo.repoPath(sess)
RemoveAllWithNotice("Repository path erase before creation", repoPath)
_, stderr, err := process.ExecTimeout(10*time.Minute,
fmt.Sprintf("ForkRepository 'git clone': %s/%s", owner.Name, repo.Name),
"git", "clone", "--bare", baseRepo.RepoPath(), repoPath)
if err != nil {
return nil, fmt.Errorf("git clone: %v", stderr)
}
_, stderr, err = process.ExecDir(-1,
repoPath, fmt.Sprintf("ForkRepository 'git update-server-info': %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, fmt.Errorf("git update-server-info: %v", err)
}
if err = createDelegateHooks(repoPath); err != nil {
return nil, fmt.Errorf("createDelegateHooks: %v", err)
}
if err = sess.Commit(); err != nil {
return nil, fmt.Errorf("Commit: %v", err)
}
if err = repo.UpdateSize(); err != nil {
log.Error(2, "UpdateSize [repo_id: %d]: %v", repo.ID, err)
}
if err = PrepareWebhooks(baseRepo, HOOK_EVENT_FORK, &api.ForkPayload{
Forkee: repo.APIFormat(nil),
Repo: baseRepo.APIFormat(nil),
Sender: doer.APIFormat(),
}); err != nil {
log.Error(2, "PrepareWebhooks [repo_id: %d]: %v", baseRepo.ID, err)
}
return repo, nil
}
func (repo *Repository) GetForks() ([]*Repository, error) {
forks := make([]*Repository, 0, repo.NumForks)
if err := x.Find(&forks, &Repository{ForkID: repo.ID}); err != nil {
return nil, err
}
for _, fork := range forks {
fork.BaseRepo = repo
}
return forks, nil
}
// __________ .__
// \______ \____________ ____ ____ | |__
// | | _/\_ __ \__ \ / \_/ ___\| | \
// | | \ | | \// __ \| | \ \___| Y \
// |______ / |__| (____ /___| /\___ >___| /
// \/ \/ \/ \/ \/
//
func (repo *Repository) CreateNewBranch(doer *User, oldBranchName, branchName string) (err error) {
repoWorkingPool.CheckIn(com.ToStr(repo.ID))
defer repoWorkingPool.CheckOut(com.ToStr(repo.ID))
localPath := repo.LocalCopyPath()
if err = discardLocalRepoBranchChanges(localPath, oldBranchName); err != nil {
return fmt.Errorf("discardLocalRepoChanges: %v", err)
} else if err = repo.UpdateLocalCopyBranch(oldBranchName); err != nil {
return fmt.Errorf("UpdateLocalCopyBranch: %v", err)
}
if err = repo.CheckoutNewBranch(oldBranchName, branchName); err != nil {
return fmt.Errorf("CreateNewBranch: %v", err)
}
if err = git.Push(localPath, "origin", branchName); err != nil {
return fmt.Errorf("Push: %v", err)
}
return nil
}
| 1 | 13,122 | I prefer starts with `Failed to xxx`, how about `Failed to perform health check on xxx`? | gogs-gogs | go |
@@ -141,9 +141,15 @@ namespace Datadog.Trace
if (AzureAppServices.Metadata?.IsRelevant ?? false)
{
span.SetTag(Tags.AzureAppServicesSiteName, AzureAppServices.Metadata.SiteName);
+ span.SetTag(Tags.AzureAppServicesSiteKind, AzureAppServices.Metadata.SiteKind);
+ span.SetTag(Tags.AzureAppServicesSiteType, AzureAppServices.Metadata.SiteType);
span.SetTag(Tags.AzureAppServicesResourceGroup, AzureAppServices.Metadata.ResourceGroup);
span.SetTag(Tags.AzureAppServicesSubscriptionId, AzureAppServices.Metadata.SubscriptionId);
span.SetTag(Tags.AzureAppServicesResourceId, AzureAppServices.Metadata.ResourceId);
+ span.SetTag(Tags.AzureAppServicesInstanceId, AzureAppServices.Metadata.InstanceId);
+ span.SetTag(Tags.AzureAppServicesInstanceName, AzureAppServices.Metadata.InstanceName);
+ span.SetTag(Tags.AzureAppServicesOperatingSystem, AzureAppServices.Metadata.OperatingSystem);
+ span.SetTag(Tags.AzureAppServicesRuntime, AzureAppServices.Metadata.Runtime);
}
// set the origin tag to the root span of each trace/subtrace | 1 | using System;
using System.Collections.Generic;
using System.Diagnostics;
using Datadog.Trace.Logging;
using Datadog.Trace.PlatformHelpers;
using Datadog.Trace.Util;
namespace Datadog.Trace
{
internal class TraceContext : ITraceContext
{
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.For<TraceContext>();
private readonly DateTimeOffset _utcStart = DateTimeOffset.UtcNow;
private readonly long _timestamp = Stopwatch.GetTimestamp();
private readonly List<Span> _spans = new List<Span>();
private int _openSpans;
private SamplingPriority? _samplingPriority;
private bool _samplingPriorityLocked;
public TraceContext(IDatadogTracer tracer)
{
Tracer = tracer;
}
public Span RootSpan { get; private set; }
public DateTimeOffset UtcNow => _utcStart.Add(Elapsed);
public IDatadogTracer Tracer { get; }
/// <summary>
/// Gets or sets sampling priority.
/// Once the sampling priority is locked with <see cref="LockSamplingPriority"/>,
/// further attempts to set this are ignored.
/// </summary>
public SamplingPriority? SamplingPriority
{
get => _samplingPriority;
set
{
if (!_samplingPriorityLocked)
{
_samplingPriority = value;
}
}
}
private TimeSpan Elapsed => StopwatchHelpers.GetElapsed(Stopwatch.GetTimestamp() - _timestamp);
public void AddSpan(Span span)
{
lock (_spans)
{
if (RootSpan == null)
{
// first span added is the root span
RootSpan = span;
DecorateRootSpan(span);
if (_samplingPriority == null)
{
if (span.Context.Parent is SpanContext context && context.SamplingPriority != null)
{
// this is a root span created from a propagated context that contains a sampling priority.
// lock sampling priority when a span is started from a propagated trace.
_samplingPriority = context.SamplingPriority;
LockSamplingPriority();
}
else
{
// this is a local root span (i.e. not propagated).
// determine an initial sampling priority for this trace, but don't lock it yet
_samplingPriority =
Tracer.Sampler?.GetSamplingPriority(RootSpan);
}
}
}
_spans.Add(span);
_openSpans++;
}
}
public void CloseSpan(Span span)
{
if (span == RootSpan)
{
// lock sampling priority and set metric when root span finishes
LockSamplingPriority();
if (_samplingPriority == null)
{
Log.Warning("Cannot set span metric for sampling priority before it has been set.");
}
else
{
span.SetMetric(Metrics.SamplingPriority, (int)_samplingPriority);
}
}
Span[] spansToWrite = null;
lock (_spans)
{
_openSpans--;
if (_openSpans == 0)
{
spansToWrite = _spans.ToArray();
_spans.Clear();
}
}
if (spansToWrite != null)
{
Tracer.Write(spansToWrite);
}
}
public void LockSamplingPriority()
{
if (_samplingPriority == null)
{
Log.Warning("Cannot lock sampling priority before it has been set.");
}
else
{
_samplingPriorityLocked = true;
}
}
public TimeSpan ElapsedSince(DateTimeOffset date)
{
return Elapsed + (_utcStart - date);
}
private void DecorateRootSpan(Span span)
{
if (AzureAppServices.Metadata?.IsRelevant ?? false)
{
span.SetTag(Tags.AzureAppServicesSiteName, AzureAppServices.Metadata.SiteName);
span.SetTag(Tags.AzureAppServicesResourceGroup, AzureAppServices.Metadata.ResourceGroup);
span.SetTag(Tags.AzureAppServicesSubscriptionId, AzureAppServices.Metadata.SubscriptionId);
span.SetTag(Tags.AzureAppServicesResourceId, AzureAppServices.Metadata.ResourceId);
}
// set the origin tag to the root span of each trace/subtrace
if (span.Context.Origin != null)
{
span.SetTag(Tags.Origin, span.Context.Origin);
}
}
}
}
| 1 | 18,658 | All these calls make me think we should refactor how traces are started and allow "source tags" that we initialize a trace with. I'd like to do this in a follow up. | DataDog-dd-trace-dotnet | .cs |
@@ -1,6 +1,6 @@
// DO NOT EDIT: This file is autogenerated via the builtin command.
-package v1_test
+package v1
import (
ast "github.com/influxdata/flux/ast" | 1 | // DO NOT EDIT: This file is autogenerated via the builtin command.
package v1_test
import (
ast "github.com/influxdata/flux/ast"
parser "github.com/influxdata/flux/internal/parser"
)
var FluxTestPackages = []*ast.Package{&ast.Package{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: nil,
},
Files: []*ast.File{&ast.File{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 111,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "package v1_test\n\nimport \"testing\"\n\ninput = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,sys,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,sys,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,sys,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,sys,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,sys,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,sys,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,sys,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,sys,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,sys,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,reg,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.local,load1,17\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load1,17\n\n,,1,2018-05-22T19:53:26Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:36Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:46Z,swp,us-east,host.global,load3,15\n,,1,2018-05-22T19:53:56Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:06Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:16Z,swp,us-east,host.global,load3,19\n\n,,2,2018-05-22T19:53:26Z,swp,us-east,host.global,load5,19\n,,2,2018-05-22T19:53:36Z,swp,us-east,host.global,load5,22\n,,2,2018-05-22T19:53:46Z,swp,us-east,host.global,load5,11\n,,2,2018-05-22T19:53:56Z,swp,us-east,host.global,load5,12\n,,2,2018-05-22T19:54:06Z,swp,us-east,host.global,load5,13\n,,2,2018-05-22T19:54:16Z,swp,us-east,host.global,load5,13\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,double\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load2,10.003\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load2,11.873\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load2,18.832\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load2,19.777\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load2,17.190\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load2,17.192\n\"\n\noutput = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n\"\n\nmeasurement_tag_keys_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()\n\ntest measurement_tag_keys = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_keys_fn}",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Body: []ast.Statement{&ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 77,
},
File: "measurement_tag_keys_test.flux",
Source: "input = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,sys,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,sys,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,sys,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,sys,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,sys,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,sys,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,sys,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,sys,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,sys,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,reg,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.local,load1,17\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load1,17\n\n,,1,2018-05-22T19:53:26Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:36Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:46Z,swp,us-east,host.global,load3,15\n,,1,2018-05-22T19:53:56Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:06Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:16Z,swp,us-east,host.global,load3,19\n\n,,2,2018-05-22T19:53:26Z,swp,us-east,host.global,load5,19\n,,2,2018-05-22T19:53:36Z,swp,us-east,host.global,load5,22\n,,2,2018-05-22T19:53:46Z,swp,us-east,host.global,load5,11\n,,2,2018-05-22T19:53:56Z,swp,us-east,host.global,load5,12\n,,2,2018-05-22T19:54:06Z,swp,us-east,host.global,load5,13\n,,2,2018-05-22T19:54:16Z,swp,us-east,host.global,load5,13\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,double\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load2,10.003\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load2,11.873\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load2,18.832\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load2,19.777\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load2,17.190\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load2,17.192\n\"",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 6,
Line: 5,
},
File: "measurement_tag_keys_test.flux",
Source: "input",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
Name: "input",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 77,
},
File: "measurement_tag_keys_test.flux",
Source: "\"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,sys,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,sys,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,sys,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,sys,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,sys,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,sys,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,sys,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,sys,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,sys,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,reg,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.local,load1,17\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load1,17\n\n,,1,2018-05-22T19:53:26Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:36Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:46Z,swp,us-east,host.global,load3,15\n,,1,2018-05-22T19:53:56Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:06Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:16Z,swp,us-east,host.global,load3,19\n\n,,2,2018-05-22T19:53:26Z,swp,us-east,host.global,load5,19\n,,2,2018-05-22T19:53:36Z,swp,us-east,host.global,load5,22\n,,2,2018-05-22T19:53:46Z,swp,us-east,host.global,load5,11\n,,2,2018-05-22T19:53:56Z,swp,us-east,host.global,load5,12\n,,2,2018-05-22T19:54:06Z,swp,us-east,host.global,load5,13\n,,2,2018-05-22T19:54:16Z,swp,us-east,host.global,load5,13\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,double\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load2,10.003\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load2,11.873\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load2,18.832\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load2,19.777\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load2,17.190\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load2,17.192\n\"",
Start: ast.Position{
Column: 9,
Line: 5,
},
},
},
Value: "\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,sys,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,sys,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,sys,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,sys,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,sys,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,sys,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,sys,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,sys,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,sys,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,sys,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,reg,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.local,load1,17\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load1,10\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load1,11\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load1,18\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load1,19\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load1,17\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load1,17\n\n,,1,2018-05-22T19:53:26Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:36Z,swp,us-east,host.global,load3,16\n,,1,2018-05-22T19:53:46Z,swp,us-east,host.global,load3,15\n,,1,2018-05-22T19:53:56Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:06Z,swp,us-east,host.global,load3,19\n,,1,2018-05-22T19:54:16Z,swp,us-east,host.global,load3,19\n\n,,2,2018-05-22T19:53:26Z,swp,us-east,host.global,load5,19\n,,2,2018-05-22T19:53:36Z,swp,us-east,host.global,load5,22\n,,2,2018-05-22T19:53:46Z,swp,us-east,host.global,load5,11\n,,2,2018-05-22T19:53:56Z,swp,us-east,host.global,load5,12\n,,2,2018-05-22T19:54:06Z,swp,us-east,host.global,load5,13\n,,2,2018-05-22T19:54:16Z,swp,us-east,host.global,load5,13\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,double\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,region,host,_field,_value\n,,0,2018-05-22T19:53:26Z,swp,us-east,host.global,load2,10.003\n,,0,2018-05-22T19:53:36Z,swp,us-east,host.global,load2,11.873\n,,0,2018-05-22T19:53:46Z,swp,us-east,host.global,load2,18.832\n,,0,2018-05-22T19:53:56Z,swp,us-east,host.global,load2,19.777\n,,0,2018-05-22T19:54:06Z,swp,us-east,host.global,load2,17.190\n,,0,2018-05-22T19:54:16Z,swp,us-east,host.global,load2,17.192\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 90,
},
File: "measurement_tag_keys_test.flux",
Source: "output = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n\"",
Start: ast.Position{
Column: 1,
Line: 79,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 7,
Line: 79,
},
File: "measurement_tag_keys_test.flux",
Source: "output",
Start: ast.Position{
Column: 1,
Line: 79,
},
},
},
Name: "output",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 90,
},
File: "measurement_tag_keys_test.flux",
Source: "\"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n\"",
Start: ast.Position{
Column: 10,
Line: 79,
},
},
},
Value: "\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 99,
},
File: "measurement_tag_keys_test.flux",
Source: "measurement_tag_keys_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()",
Start: ast.Position{
Column: 1,
Line: 92,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 24,
Line: 92,
},
File: "measurement_tag_keys_test.flux",
Source: "measurement_tag_keys_fn",
Start: ast.Position{
Column: 1,
Line: 92,
},
},
},
Name: "measurement_tag_keys_fn",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 99,
},
File: "measurement_tag_keys_test.flux",
Source: "(tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()",
Start: ast.Position{
Column: 27,
Line: 92,
},
},
},
Body: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 48,
Line: 92,
},
File: "measurement_tag_keys_test.flux",
Source: "tables",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Name: "tables",
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 93,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "start: 2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 93,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 19,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "start",
Start: ast.Position{
Column: 14,
Line: 93,
},
},
},
Name: "start",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 21,
Line: 93,
},
},
},
Value: parser.MustParseTime("2018-01-01T00:00:00Z"),
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 43,
Line: 93,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 47,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "stop",
Start: ast.Position{
Column: 43,
Line: 93,
},
},
},
Name: "stop",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 49,
Line: 93,
},
},
},
Value: parser.MustParseTime("2019-01-01T00:00:00Z"),
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 8,
Line: 93,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 93,
},
File: "measurement_tag_keys_test.flux",
Source: "range",
Start: ast.Position{
Column: 8,
Line: 93,
},
},
},
Name: "range",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 50,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "fn: (r) => r._measurement == \"swp\"",
Start: ast.Position{
Column: 15,
Line: 94,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "fn: (r) => r._measurement == \"swp\"",
Start: ast.Position{
Column: 15,
Line: 94,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "fn",
Start: ast.Position{
Column: 15,
Line: 94,
},
},
},
Name: "fn",
},
Value: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "(r) => r._measurement == \"swp\"",
Start: ast.Position{
Column: 19,
Line: 94,
},
},
},
Body: &ast.BinaryExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "r._measurement == \"swp\"",
Start: ast.Position{
Column: 26,
Line: 94,
},
},
},
Left: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 40,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "r._measurement",
Start: ast.Position{
Column: 26,
Line: 94,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 27,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 26,
Line: 94,
},
},
},
Name: "r",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 40,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "_measurement",
Start: ast.Position{
Column: 28,
Line: 94,
},
},
},
Name: "_measurement",
},
},
Operator: 14,
Right: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "\"swp\"",
Start: ast.Position{
Column: 44,
Line: 94,
},
},
},
Value: "swp",
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 94,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 94,
},
},
},
Name: "r",
},
Value: nil,
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 50,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "filter(fn: (r) => r._measurement == \"swp\")",
Start: ast.Position{
Column: 8,
Line: 94,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 94,
},
File: "measurement_tag_keys_test.flux",
Source: "filter",
Start: ast.Position{
Column: 8,
Line: 94,
},
},
},
Name: "filter",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 50,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "fn: (r) => r.host == \"host.global\"",
Start: ast.Position{
Column: 15,
Line: 95,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "fn: (r) => r.host == \"host.global\"",
Start: ast.Position{
Column: 15,
Line: 95,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "fn",
Start: ast.Position{
Column: 15,
Line: 95,
},
},
},
Name: "fn",
},
Value: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "(r) => r.host == \"host.global\"",
Start: ast.Position{
Column: 19,
Line: 95,
},
},
},
Body: &ast.BinaryExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "r.host == \"host.global\"",
Start: ast.Position{
Column: 26,
Line: 95,
},
},
},
Left: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "r.host",
Start: ast.Position{
Column: 26,
Line: 95,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 27,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 26,
Line: 95,
},
},
},
Name: "r",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "host",
Start: ast.Position{
Column: 28,
Line: 95,
},
},
},
Name: "host",
},
},
Operator: 14,
Right: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "\"host.global\"",
Start: ast.Position{
Column: 36,
Line: 95,
},
},
},
Value: "host.global",
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 95,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 95,
},
},
},
Name: "r",
},
Value: nil,
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 50,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "filter(fn: (r) => r.host == \"host.global\")",
Start: ast.Position{
Column: 8,
Line: 95,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 95,
},
File: "measurement_tag_keys_test.flux",
Source: "filter",
Start: ast.Position{
Column: 8,
Line: 95,
},
},
},
Name: "filter",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 96,
},
File: "measurement_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")\n |> keys()",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 96,
},
File: "measurement_tag_keys_test.flux",
Source: "keys()",
Start: ast.Position{
Column: 8,
Line: 96,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 96,
},
File: "measurement_tag_keys_test.flux",
Source: "keys",
Start: ast.Position{
Column: 8,
Line: 96,
},
},
},
Name: "keys",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")\n |> keys()\n |> keep(columns: [\"_value\"])",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "columns: [\"_value\"]",
Start: ast.Position{
Column: 13,
Line: 97,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "columns: [\"_value\"]",
Start: ast.Position{
Column: 13,
Line: 97,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 20,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "columns",
Start: ast.Position{
Column: 13,
Line: 97,
},
},
},
Name: "columns",
},
Value: &ast.ArrayExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "[\"_value\"]",
Start: ast.Position{
Column: 22,
Line: 97,
},
},
},
Elements: []ast.Expression{&ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "\"_value\"",
Start: ast.Position{
Column: 23,
Line: 97,
},
},
},
Value: "_value",
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "keep(columns: [\"_value\"])",
Start: ast.Position{
Column: 8,
Line: 97,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 97,
},
File: "measurement_tag_keys_test.flux",
Source: "keep",
Start: ast.Position{
Column: 8,
Line: 97,
},
},
},
Name: "keep",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 18,
Line: 98,
},
File: "measurement_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 18,
Line: 98,
},
File: "measurement_tag_keys_test.flux",
Source: "distinct()",
Start: ast.Position{
Column: 8,
Line: 98,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 98,
},
File: "measurement_tag_keys_test.flux",
Source: "distinct",
Start: ast.Position{
Column: 8,
Line: 98,
},
},
},
Name: "distinct",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 99,
},
File: "measurement_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"swp\")\n |> filter(fn: (r) => r.host == \"host.global\")\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()",
Start: ast.Position{
Column: 42,
Line: 92,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 99,
},
File: "measurement_tag_keys_test.flux",
Source: "sort()",
Start: ast.Position{
Column: 8,
Line: 99,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 99,
},
File: "measurement_tag_keys_test.flux",
Source: "sort",
Start: ast.Position{
Column: 8,
Line: 99,
},
},
},
Name: "sort",
},
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 92,
},
File: "measurement_tag_keys_test.flux",
Source: "tables=<-",
Start: ast.Position{
Column: 28,
Line: 92,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 34,
Line: 92,
},
File: "measurement_tag_keys_test.flux",
Source: "tables",
Start: ast.Position{
Column: 28,
Line: 92,
},
},
},
Name: "tables",
},
Value: &ast.PipeLiteral{BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 92,
},
File: "measurement_tag_keys_test.flux",
Source: "<-",
Start: ast.Position{
Column: 35,
Line: 92,
},
},
}},
}},
},
}, &ast.TestStatement{
Assignment: &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 111,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "measurement_tag_keys = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_keys_fn}",
Start: ast.Position{
Column: 6,
Line: 101,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 26,
Line: 101,
},
File: "measurement_tag_keys_test.flux",
Source: "measurement_tag_keys",
Start: ast.Position{
Column: 6,
Line: 101,
},
},
},
Name: "measurement_tag_keys",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 111,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "() =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_keys_fn}",
Start: ast.Position{
Column: 29,
Line: 101,
},
},
},
Body: &ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 111,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "{input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_keys_fn}",
Start: ast.Position{
Column: 6,
Line: 102,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "input: testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 7,
Line: 102,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "input",
Start: ast.Position{
Column: 7,
Line: 102,
},
},
},
Name: "input",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 102,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 102,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "csv",
Start: ast.Position{
Column: 34,
Line: 102,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "input",
Start: ast.Position{
Column: 39,
Line: 102,
},
},
},
Name: "input",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 14,
Line: 102,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "testing.loadStorage",
Start: ast.Position{
Column: 14,
Line: 102,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "testing",
Start: ast.Position{
Column: 14,
Line: 102,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "loadStorage",
Start: ast.Position{
Column: 22,
Line: 102,
},
},
},
Name: "loadStorage",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "want: testing.loadMem(csv: output)",
Start: ast.Position{
Column: 47,
Line: 102,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 51,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "want",
Start: ast.Position{
Column: 47,
Line: 102,
},
},
},
Name: "want",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 102,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 102,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 72,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "csv",
Start: ast.Position{
Column: 69,
Line: 102,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "output",
Start: ast.Position{
Column: 74,
Line: 102,
},
},
},
Name: "output",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "testing.loadMem(csv: output)",
Start: ast.Position{
Column: 53,
Line: 102,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "testing.loadMem",
Start: ast.Position{
Column: 53,
Line: 102,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 60,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "testing",
Start: ast.Position{
Column: 53,
Line: 102,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "loadMem",
Start: ast.Position{
Column: 61,
Line: 102,
},
},
},
Name: "loadMem",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 110,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "fn: measurement_tag_keys_fn",
Start: ast.Position{
Column: 83,
Line: 102,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 85,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "fn",
Start: ast.Position{
Column: 83,
Line: 102,
},
},
},
Name: "fn",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 110,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "measurement_tag_keys_fn",
Start: ast.Position{
Column: 87,
Line: 102,
},
},
},
Name: "measurement_tag_keys_fn",
},
}},
},
Params: nil,
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 111,
Line: 102,
},
File: "measurement_tag_keys_test.flux",
Source: "test measurement_tag_keys = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_keys_fn}",
Start: ast.Position{
Column: 1,
Line: 101,
},
},
},
}},
Imports: []*ast.ImportDeclaration{&ast.ImportDeclaration{
As: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "measurement_tag_keys_test.flux",
Source: "import \"testing\"",
Start: ast.Position{
Column: 1,
Line: 3,
},
},
},
Path: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "measurement_tag_keys_test.flux",
Source: "\"testing\"",
Start: ast.Position{
Column: 8,
Line: 3,
},
},
},
Value: "testing",
},
}},
Name: "measurement_tag_keys_test.flux",
Package: &ast.PackageClause{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "measurement_tag_keys_test.flux",
Source: "package v1_test",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Name: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "measurement_tag_keys_test.flux",
Source: "v1_test",
Start: ast.Position{
Column: 9,
Line: 1,
},
},
},
Name: "main",
},
},
}},
Package: "main",
Path: "",
}, &ast.Package{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: nil,
},
Files: []*ast.File{&ast.File{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 113,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "package v1_test\n\nimport \"testing\"\n\ninput = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,long\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.global,load7,183\n,,0,2018-05-22T19:53:36Z,sys,host.global,load7,172\n,,0,2018-05-22T19:53:46Z,sys,host.global,load7,174\n,,0,2018-05-22T19:53:56Z,sys,host.global,load7,163\n,,0,2018-05-22T19:54:06Z,sys,host.global,load7,191\n,,0,2018-05-22T19:54:16Z,sys,host.global,load7,184\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load8,198\n,,1,2018-05-22T19:53:36Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:46Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:56Z,sys,host.local,load8,196\n,,1,2018-05-22T19:54:06Z,sys,host.local,load8,198\n,,1,2018-05-22T19:54:16Z,sys,host.local,load8,197\n\n,,2,2018-05-22T19:53:26Z,sys,host.global,load9,195\n,,2,2018-05-22T19:53:36Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:46Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:56Z,sys,host.global,load9,189\n,,2,2018-05-22T19:54:06Z,sys,host.global,load9,194\n,,2,2018-05-22T19:54:16Z,sys,host.global,load9,193\n\n,,3,2018-05-22T19:53:26Z,swp,host.global,used_percent,8298\n,,3,2018-05-22T19:53:36Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:46Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:56Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:06Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:16Z,swp,host.global,used_percent,8264\n\"\n\noutput = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load3\n,,0,load8\n\"\n\nmeasurement_tag_values_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()\n\ntest measurement_tag_values = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_values_fn}",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Body: []ast.Statement{&ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 69,
},
File: "measurement_tag_values_test.flux",
Source: "input = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,long\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.global,load7,183\n,,0,2018-05-22T19:53:36Z,sys,host.global,load7,172\n,,0,2018-05-22T19:53:46Z,sys,host.global,load7,174\n,,0,2018-05-22T19:53:56Z,sys,host.global,load7,163\n,,0,2018-05-22T19:54:06Z,sys,host.global,load7,191\n,,0,2018-05-22T19:54:16Z,sys,host.global,load7,184\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load8,198\n,,1,2018-05-22T19:53:36Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:46Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:56Z,sys,host.local,load8,196\n,,1,2018-05-22T19:54:06Z,sys,host.local,load8,198\n,,1,2018-05-22T19:54:16Z,sys,host.local,load8,197\n\n,,2,2018-05-22T19:53:26Z,sys,host.global,load9,195\n,,2,2018-05-22T19:53:36Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:46Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:56Z,sys,host.global,load9,189\n,,2,2018-05-22T19:54:06Z,sys,host.global,load9,194\n,,2,2018-05-22T19:54:16Z,sys,host.global,load9,193\n\n,,3,2018-05-22T19:53:26Z,swp,host.global,used_percent,8298\n,,3,2018-05-22T19:53:36Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:46Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:56Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:06Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:16Z,swp,host.global,used_percent,8264\n\"",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 6,
Line: 5,
},
File: "measurement_tag_values_test.flux",
Source: "input",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
Name: "input",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 69,
},
File: "measurement_tag_values_test.flux",
Source: "\"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,long\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.global,load7,183\n,,0,2018-05-22T19:53:36Z,sys,host.global,load7,172\n,,0,2018-05-22T19:53:46Z,sys,host.global,load7,174\n,,0,2018-05-22T19:53:56Z,sys,host.global,load7,163\n,,0,2018-05-22T19:54:06Z,sys,host.global,load7,191\n,,0,2018-05-22T19:54:16Z,sys,host.global,load7,184\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load8,198\n,,1,2018-05-22T19:53:36Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:46Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:56Z,sys,host.local,load8,196\n,,1,2018-05-22T19:54:06Z,sys,host.local,load8,198\n,,1,2018-05-22T19:54:16Z,sys,host.local,load8,197\n\n,,2,2018-05-22T19:53:26Z,sys,host.global,load9,195\n,,2,2018-05-22T19:53:36Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:46Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:56Z,sys,host.global,load9,189\n,,2,2018-05-22T19:54:06Z,sys,host.global,load9,194\n,,2,2018-05-22T19:54:16Z,sys,host.global,load9,193\n\n,,3,2018-05-22T19:53:26Z,swp,host.global,used_percent,8298\n,,3,2018-05-22T19:53:36Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:46Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:56Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:06Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:16Z,swp,host.global,used_percent,8264\n\"",
Start: ast.Position{
Column: 9,
Line: 5,
},
},
},
Value: "\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,long\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,sys,host.global,load7,183\n,,0,2018-05-22T19:53:36Z,sys,host.global,load7,172\n,,0,2018-05-22T19:53:46Z,sys,host.global,load7,174\n,,0,2018-05-22T19:53:56Z,sys,host.global,load7,163\n,,0,2018-05-22T19:54:06Z,sys,host.global,load7,191\n,,0,2018-05-22T19:54:16Z,sys,host.global,load7,184\n\n,,1,2018-05-22T19:53:26Z,sys,host.local,load8,198\n,,1,2018-05-22T19:53:36Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:46Z,sys,host.local,load8,197\n,,1,2018-05-22T19:53:56Z,sys,host.local,load8,196\n,,1,2018-05-22T19:54:06Z,sys,host.local,load8,198\n,,1,2018-05-22T19:54:16Z,sys,host.local,load8,197\n\n,,2,2018-05-22T19:53:26Z,sys,host.global,load9,195\n,,2,2018-05-22T19:53:36Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:46Z,sys,host.global,load9,192\n,,2,2018-05-22T19:53:56Z,sys,host.global,load9,189\n,,2,2018-05-22T19:54:06Z,sys,host.global,load9,194\n,,2,2018-05-22T19:54:16Z,sys,host.global,load9,193\n\n,,3,2018-05-22T19:53:26Z,swp,host.global,used_percent,8298\n,,3,2018-05-22T19:53:36Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:46Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:53:56Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:06Z,swp,host.global,used_percent,8259\n,,3,2018-05-22T19:54:16Z,swp,host.global,used_percent,8264\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 78,
},
File: "measurement_tag_values_test.flux",
Source: "output = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load3\n,,0,load8\n\"",
Start: ast.Position{
Column: 1,
Line: 71,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 7,
Line: 71,
},
File: "measurement_tag_values_test.flux",
Source: "output",
Start: ast.Position{
Column: 1,
Line: 71,
},
},
},
Name: "output",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 78,
},
File: "measurement_tag_values_test.flux",
Source: "\"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load3\n,,0,load8\n\"",
Start: ast.Position{
Column: 10,
Line: 71,
},
},
},
Value: "\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load3\n,,0,load8\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 87,
},
File: "measurement_tag_values_test.flux",
Source: "measurement_tag_values_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()",
Start: ast.Position{
Column: 1,
Line: 80,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 26,
Line: 80,
},
File: "measurement_tag_values_test.flux",
Source: "measurement_tag_values_fn",
Start: ast.Position{
Column: 1,
Line: 80,
},
},
},
Name: "measurement_tag_values_fn",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 87,
},
File: "measurement_tag_values_test.flux",
Source: "(tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()",
Start: ast.Position{
Column: 29,
Line: 80,
},
},
},
Body: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 50,
Line: 80,
},
File: "measurement_tag_values_test.flux",
Source: "tables",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Name: "tables",
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 81,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "start: 2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 81,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 19,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "start",
Start: ast.Position{
Column: 14,
Line: 81,
},
},
},
Name: "start",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 21,
Line: 81,
},
},
},
Value: parser.MustParseTime("2018-01-01T00:00:00Z"),
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 43,
Line: 81,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 47,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "stop",
Start: ast.Position{
Column: 43,
Line: 81,
},
},
},
Name: "stop",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 49,
Line: 81,
},
},
},
Value: parser.MustParseTime("2019-01-01T00:00:00Z"),
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 8,
Line: 81,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 81,
},
File: "measurement_tag_values_test.flux",
Source: "range",
Start: ast.Position{
Column: 8,
Line: 81,
},
},
},
Name: "range",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 50,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "fn: (r) => r._measurement == \"sys\"",
Start: ast.Position{
Column: 15,
Line: 82,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "fn: (r) => r._measurement == \"sys\"",
Start: ast.Position{
Column: 15,
Line: 82,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "fn",
Start: ast.Position{
Column: 15,
Line: 82,
},
},
},
Name: "fn",
},
Value: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "(r) => r._measurement == \"sys\"",
Start: ast.Position{
Column: 19,
Line: 82,
},
},
},
Body: &ast.BinaryExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "r._measurement == \"sys\"",
Start: ast.Position{
Column: 26,
Line: 82,
},
},
},
Left: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 40,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "r._measurement",
Start: ast.Position{
Column: 26,
Line: 82,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 27,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 26,
Line: 82,
},
},
},
Name: "r",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 40,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "_measurement",
Start: ast.Position{
Column: 28,
Line: 82,
},
},
},
Name: "_measurement",
},
},
Operator: 14,
Right: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "\"sys\"",
Start: ast.Position{
Column: 44,
Line: 82,
},
},
},
Value: "sys",
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 82,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 82,
},
},
},
Name: "r",
},
Value: nil,
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 50,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "filter(fn: (r) => r._measurement == \"sys\")",
Start: ast.Position{
Column: 8,
Line: 82,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 82,
},
File: "measurement_tag_values_test.flux",
Source: "filter",
Start: ast.Position{
Column: 8,
Line: 82,
},
},
},
Name: "filter",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 48,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "fn: (r) => r.host == \"host.local\"",
Start: ast.Position{
Column: 15,
Line: 83,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 48,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "fn: (r) => r.host == \"host.local\"",
Start: ast.Position{
Column: 15,
Line: 83,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "fn",
Start: ast.Position{
Column: 15,
Line: 83,
},
},
},
Name: "fn",
},
Value: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 48,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "(r) => r.host == \"host.local\"",
Start: ast.Position{
Column: 19,
Line: 83,
},
},
},
Body: &ast.BinaryExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 48,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "r.host == \"host.local\"",
Start: ast.Position{
Column: 26,
Line: 83,
},
},
},
Left: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "r.host",
Start: ast.Position{
Column: 26,
Line: 83,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 27,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 26,
Line: 83,
},
},
},
Name: "r",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "host",
Start: ast.Position{
Column: 28,
Line: 83,
},
},
},
Name: "host",
},
},
Operator: 14,
Right: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 48,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "\"host.local\"",
Start: ast.Position{
Column: 36,
Line: 83,
},
},
},
Value: "host.local",
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 83,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 83,
},
},
},
Name: "r",
},
Value: nil,
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 49,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "filter(fn: (r) => r.host == \"host.local\")",
Start: ast.Position{
Column: 8,
Line: 83,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 83,
},
File: "measurement_tag_values_test.flux",
Source: "filter",
Start: ast.Position{
Column: 8,
Line: 83,
},
},
},
Name: "filter",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")\n |> keep(columns: [\"_field\"])",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "columns: [\"_field\"]",
Start: ast.Position{
Column: 13,
Line: 84,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "columns: [\"_field\"]",
Start: ast.Position{
Column: 13,
Line: 84,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 20,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "columns",
Start: ast.Position{
Column: 13,
Line: 84,
},
},
},
Name: "columns",
},
Value: &ast.ArrayExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "[\"_field\"]",
Start: ast.Position{
Column: 22,
Line: 84,
},
},
},
Elements: []ast.Expression{&ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "\"_field\"",
Start: ast.Position{
Column: 23,
Line: 84,
},
},
},
Value: "_field",
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "keep(columns: [\"_field\"])",
Start: ast.Position{
Column: 8,
Line: 84,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 84,
},
File: "measurement_tag_values_test.flux",
Source: "keep",
Start: ast.Position{
Column: 8,
Line: 84,
},
},
},
Name: "keep",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 15,
Line: 85,
},
File: "measurement_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")\n |> keep(columns: [\"_field\"])\n |> group()",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 15,
Line: 85,
},
File: "measurement_tag_values_test.flux",
Source: "group()",
Start: ast.Position{
Column: 8,
Line: 85,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 85,
},
File: "measurement_tag_values_test.flux",
Source: "group",
Start: ast.Position{
Column: 8,
Line: 85,
},
},
},
Name: "group",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 34,
Line: 86,
},
File: "measurement_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 86,
},
File: "measurement_tag_values_test.flux",
Source: "column: \"_field\"",
Start: ast.Position{
Column: 17,
Line: 86,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 86,
},
File: "measurement_tag_values_test.flux",
Source: "column: \"_field\"",
Start: ast.Position{
Column: 17,
Line: 86,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 23,
Line: 86,
},
File: "measurement_tag_values_test.flux",
Source: "column",
Start: ast.Position{
Column: 17,
Line: 86,
},
},
},
Name: "column",
},
Value: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 86,
},
File: "measurement_tag_values_test.flux",
Source: "\"_field\"",
Start: ast.Position{
Column: 25,
Line: 86,
},
},
},
Value: "_field",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 34,
Line: 86,
},
File: "measurement_tag_values_test.flux",
Source: "distinct(column: \"_field\")",
Start: ast.Position{
Column: 8,
Line: 86,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 86,
},
File: "measurement_tag_values_test.flux",
Source: "distinct",
Start: ast.Position{
Column: 8,
Line: 86,
},
},
},
Name: "distinct",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 87,
},
File: "measurement_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => r._measurement == \"sys\")\n |> filter(fn: (r) => r.host == \"host.local\")\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()",
Start: ast.Position{
Column: 44,
Line: 80,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 87,
},
File: "measurement_tag_values_test.flux",
Source: "sort()",
Start: ast.Position{
Column: 8,
Line: 87,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 87,
},
File: "measurement_tag_values_test.flux",
Source: "sort",
Start: ast.Position{
Column: 8,
Line: 87,
},
},
},
Name: "sort",
},
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 39,
Line: 80,
},
File: "measurement_tag_values_test.flux",
Source: "tables=<-",
Start: ast.Position{
Column: 30,
Line: 80,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 36,
Line: 80,
},
File: "measurement_tag_values_test.flux",
Source: "tables",
Start: ast.Position{
Column: 30,
Line: 80,
},
},
},
Name: "tables",
},
Value: &ast.PipeLiteral{BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 39,
Line: 80,
},
File: "measurement_tag_values_test.flux",
Source: "<-",
Start: ast.Position{
Column: 37,
Line: 80,
},
},
}},
}},
},
}, &ast.TestStatement{
Assignment: &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 113,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "measurement_tag_values = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_values_fn}",
Start: ast.Position{
Column: 6,
Line: 89,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 28,
Line: 89,
},
File: "measurement_tag_values_test.flux",
Source: "measurement_tag_values",
Start: ast.Position{
Column: 6,
Line: 89,
},
},
},
Name: "measurement_tag_values",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 113,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "() =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_values_fn}",
Start: ast.Position{
Column: 31,
Line: 89,
},
},
},
Body: &ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 113,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "{input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_values_fn}",
Start: ast.Position{
Column: 6,
Line: 90,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "input: testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 7,
Line: 90,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "input",
Start: ast.Position{
Column: 7,
Line: 90,
},
},
},
Name: "input",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 90,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 90,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "csv",
Start: ast.Position{
Column: 34,
Line: 90,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "input",
Start: ast.Position{
Column: 39,
Line: 90,
},
},
},
Name: "input",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 14,
Line: 90,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "testing.loadStorage",
Start: ast.Position{
Column: 14,
Line: 90,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "testing",
Start: ast.Position{
Column: 14,
Line: 90,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "loadStorage",
Start: ast.Position{
Column: 22,
Line: 90,
},
},
},
Name: "loadStorage",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "want: testing.loadMem(csv: output)",
Start: ast.Position{
Column: 47,
Line: 90,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 51,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "want",
Start: ast.Position{
Column: 47,
Line: 90,
},
},
},
Name: "want",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 90,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 90,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 72,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "csv",
Start: ast.Position{
Column: 69,
Line: 90,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "output",
Start: ast.Position{
Column: 74,
Line: 90,
},
},
},
Name: "output",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "testing.loadMem(csv: output)",
Start: ast.Position{
Column: 53,
Line: 90,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "testing.loadMem",
Start: ast.Position{
Column: 53,
Line: 90,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 60,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "testing",
Start: ast.Position{
Column: 53,
Line: 90,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "loadMem",
Start: ast.Position{
Column: 61,
Line: 90,
},
},
},
Name: "loadMem",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 112,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "fn: measurement_tag_values_fn",
Start: ast.Position{
Column: 83,
Line: 90,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 85,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "fn",
Start: ast.Position{
Column: 83,
Line: 90,
},
},
},
Name: "fn",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 112,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "measurement_tag_values_fn",
Start: ast.Position{
Column: 87,
Line: 90,
},
},
},
Name: "measurement_tag_values_fn",
},
}},
},
Params: nil,
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 113,
Line: 90,
},
File: "measurement_tag_values_test.flux",
Source: "test measurement_tag_values = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: measurement_tag_values_fn}",
Start: ast.Position{
Column: 1,
Line: 89,
},
},
},
}},
Imports: []*ast.ImportDeclaration{&ast.ImportDeclaration{
As: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "measurement_tag_values_test.flux",
Source: "import \"testing\"",
Start: ast.Position{
Column: 1,
Line: 3,
},
},
},
Path: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "measurement_tag_values_test.flux",
Source: "\"testing\"",
Start: ast.Position{
Column: 8,
Line: 3,
},
},
},
Value: "testing",
},
}},
Name: "measurement_tag_values_test.flux",
Package: &ast.PackageClause{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "measurement_tag_values_test.flux",
Source: "package v1_test",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Name: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "measurement_tag_values_test.flux",
Source: "v1_test",
Start: ast.Position{
Column: 9,
Line: 1,
},
},
},
Name: "main",
},
},
}},
Package: "main",
Path: "",
}, &ast.Package{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: nil,
},
Files: []*ast.File{&ast.File{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 108,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "package v1_test\n\nimport \"testing\"\n\ninput = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n\"\n\noutput = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,swap\n,,0,system\n\"\n\nshow_measurements_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_measurement\"])\n |> group()\n |> distinct(column: \"_measurement\")\n |> sort()\n\ntest show_measurements = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_measurements_fn}",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Body: []ast.Statement{&ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 37,
},
File: "show_measurements_test.flux",
Source: "input = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n\"",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 6,
Line: 5,
},
File: "show_measurements_test.flux",
Source: "input",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
Name: "input",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 37,
},
File: "show_measurements_test.flux",
Source: "\"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n\"",
Start: ast.Position{
Column: 9,
Line: 5,
},
},
},
Value: "\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 46,
},
File: "show_measurements_test.flux",
Source: "output = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,swap\n,,0,system\n\"",
Start: ast.Position{
Column: 1,
Line: 39,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 7,
Line: 39,
},
File: "show_measurements_test.flux",
Source: "output",
Start: ast.Position{
Column: 1,
Line: 39,
},
},
},
Name: "output",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 46,
},
File: "show_measurements_test.flux",
Source: "\"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,swap\n,,0,system\n\"",
Start: ast.Position{
Column: 10,
Line: 39,
},
},
},
Value: "\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,swap\n,,0,system\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 54,
},
File: "show_measurements_test.flux",
Source: "show_measurements_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_measurement\"])\n |> group()\n |> distinct(column: \"_measurement\")\n |> sort()",
Start: ast.Position{
Column: 1,
Line: 48,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 48,
},
File: "show_measurements_test.flux",
Source: "show_measurements_fn",
Start: ast.Position{
Column: 1,
Line: 48,
},
},
},
Name: "show_measurements_fn",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 54,
},
File: "show_measurements_test.flux",
Source: "(tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_measurement\"])\n |> group()\n |> distinct(column: \"_measurement\")\n |> sort()",
Start: ast.Position{
Column: 24,
Line: 48,
},
},
},
Body: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 48,
},
File: "show_measurements_test.flux",
Source: "tables",
Start: ast.Position{
Column: 39,
Line: 48,
},
},
},
Name: "tables",
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 39,
Line: 48,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 49,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "start: 2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 49,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 19,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "start",
Start: ast.Position{
Column: 14,
Line: 49,
},
},
},
Name: "start",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 21,
Line: 49,
},
},
},
Value: parser.MustParseTime("2018-01-01T00:00:00Z"),
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 43,
Line: 49,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 47,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "stop",
Start: ast.Position{
Column: 43,
Line: 49,
},
},
},
Name: "stop",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 49,
Line: 49,
},
},
},
Value: parser.MustParseTime("2019-01-01T00:00:00Z"),
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 8,
Line: 49,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 49,
},
File: "show_measurements_test.flux",
Source: "range",
Start: ast.Position{
Column: 8,
Line: 49,
},
},
},
Name: "range",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)",
Start: ast.Position{
Column: 39,
Line: 48,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "fn: (r) => true",
Start: ast.Position{
Column: 15,
Line: 50,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "fn: (r) => true",
Start: ast.Position{
Column: 15,
Line: 50,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "fn",
Start: ast.Position{
Column: 15,
Line: 50,
},
},
},
Name: "fn",
},
Value: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "(r) => true",
Start: ast.Position{
Column: 19,
Line: 50,
},
},
},
Body: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "true",
Start: ast.Position{
Column: 26,
Line: 50,
},
},
},
Name: "true",
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 50,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 50,
},
},
},
Name: "r",
},
Value: nil,
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "filter(fn: (r) => true)",
Start: ast.Position{
Column: 8,
Line: 50,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 50,
},
File: "show_measurements_test.flux",
Source: "filter",
Start: ast.Position{
Column: 8,
Line: 50,
},
},
},
Name: "filter",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 39,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_measurement\"])",
Start: ast.Position{
Column: 39,
Line: 48,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 38,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "columns: [\"_measurement\"]",
Start: ast.Position{
Column: 13,
Line: 51,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 38,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "columns: [\"_measurement\"]",
Start: ast.Position{
Column: 13,
Line: 51,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 20,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "columns",
Start: ast.Position{
Column: 13,
Line: 51,
},
},
},
Name: "columns",
},
Value: &ast.ArrayExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 38,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "[\"_measurement\"]",
Start: ast.Position{
Column: 22,
Line: 51,
},
},
},
Elements: []ast.Expression{&ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "\"_measurement\"",
Start: ast.Position{
Column: 23,
Line: 51,
},
},
},
Value: "_measurement",
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 39,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "keep(columns: [\"_measurement\"])",
Start: ast.Position{
Column: 8,
Line: 51,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 51,
},
File: "show_measurements_test.flux",
Source: "keep",
Start: ast.Position{
Column: 8,
Line: 51,
},
},
},
Name: "keep",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 15,
Line: 52,
},
File: "show_measurements_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_measurement\"])\n |> group()",
Start: ast.Position{
Column: 39,
Line: 48,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 15,
Line: 52,
},
File: "show_measurements_test.flux",
Source: "group()",
Start: ast.Position{
Column: 8,
Line: 52,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 52,
},
File: "show_measurements_test.flux",
Source: "group",
Start: ast.Position{
Column: 8,
Line: 52,
},
},
},
Name: "group",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 40,
Line: 53,
},
File: "show_measurements_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_measurement\"])\n |> group()\n |> distinct(column: \"_measurement\")",
Start: ast.Position{
Column: 39,
Line: 48,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 39,
Line: 53,
},
File: "show_measurements_test.flux",
Source: "column: \"_measurement\"",
Start: ast.Position{
Column: 17,
Line: 53,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 39,
Line: 53,
},
File: "show_measurements_test.flux",
Source: "column: \"_measurement\"",
Start: ast.Position{
Column: 17,
Line: 53,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 23,
Line: 53,
},
File: "show_measurements_test.flux",
Source: "column",
Start: ast.Position{
Column: 17,
Line: 53,
},
},
},
Name: "column",
},
Value: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 39,
Line: 53,
},
File: "show_measurements_test.flux",
Source: "\"_measurement\"",
Start: ast.Position{
Column: 25,
Line: 53,
},
},
},
Value: "_measurement",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 40,
Line: 53,
},
File: "show_measurements_test.flux",
Source: "distinct(column: \"_measurement\")",
Start: ast.Position{
Column: 8,
Line: 53,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 53,
},
File: "show_measurements_test.flux",
Source: "distinct",
Start: ast.Position{
Column: 8,
Line: 53,
},
},
},
Name: "distinct",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 54,
},
File: "show_measurements_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_measurement\"])\n |> group()\n |> distinct(column: \"_measurement\")\n |> sort()",
Start: ast.Position{
Column: 39,
Line: 48,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 54,
},
File: "show_measurements_test.flux",
Source: "sort()",
Start: ast.Position{
Column: 8,
Line: 54,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 54,
},
File: "show_measurements_test.flux",
Source: "sort",
Start: ast.Position{
Column: 8,
Line: 54,
},
},
},
Name: "sort",
},
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 34,
Line: 48,
},
File: "show_measurements_test.flux",
Source: "tables=<-",
Start: ast.Position{
Column: 25,
Line: 48,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 48,
},
File: "show_measurements_test.flux",
Source: "tables",
Start: ast.Position{
Column: 25,
Line: 48,
},
},
},
Name: "tables",
},
Value: &ast.PipeLiteral{BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 34,
Line: 48,
},
File: "show_measurements_test.flux",
Source: "<-",
Start: ast.Position{
Column: 32,
Line: 48,
},
},
}},
}},
},
}, &ast.TestStatement{
Assignment: &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 108,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "show_measurements = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_measurements_fn}",
Start: ast.Position{
Column: 6,
Line: 56,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 23,
Line: 56,
},
File: "show_measurements_test.flux",
Source: "show_measurements",
Start: ast.Position{
Column: 6,
Line: 56,
},
},
},
Name: "show_measurements",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 108,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "() =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_measurements_fn}",
Start: ast.Position{
Column: 26,
Line: 56,
},
},
},
Body: &ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 108,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "{input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_measurements_fn}",
Start: ast.Position{
Column: 6,
Line: 57,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "input: testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 7,
Line: 57,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "input",
Start: ast.Position{
Column: 7,
Line: 57,
},
},
},
Name: "input",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 57,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 57,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "csv",
Start: ast.Position{
Column: 34,
Line: 57,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "input",
Start: ast.Position{
Column: 39,
Line: 57,
},
},
},
Name: "input",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 14,
Line: 57,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "testing.loadStorage",
Start: ast.Position{
Column: 14,
Line: 57,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "testing",
Start: ast.Position{
Column: 14,
Line: 57,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "loadStorage",
Start: ast.Position{
Column: 22,
Line: 57,
},
},
},
Name: "loadStorage",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "want: testing.loadMem(csv: output)",
Start: ast.Position{
Column: 47,
Line: 57,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 51,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "want",
Start: ast.Position{
Column: 47,
Line: 57,
},
},
},
Name: "want",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 57,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 57,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 72,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "csv",
Start: ast.Position{
Column: 69,
Line: 57,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "output",
Start: ast.Position{
Column: 74,
Line: 57,
},
},
},
Name: "output",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "testing.loadMem(csv: output)",
Start: ast.Position{
Column: 53,
Line: 57,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "testing.loadMem",
Start: ast.Position{
Column: 53,
Line: 57,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 60,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "testing",
Start: ast.Position{
Column: 53,
Line: 57,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "loadMem",
Start: ast.Position{
Column: 61,
Line: 57,
},
},
},
Name: "loadMem",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 107,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "fn: show_measurements_fn",
Start: ast.Position{
Column: 83,
Line: 57,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 85,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "fn",
Start: ast.Position{
Column: 83,
Line: 57,
},
},
},
Name: "fn",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 107,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "show_measurements_fn",
Start: ast.Position{
Column: 87,
Line: 57,
},
},
},
Name: "show_measurements_fn",
},
}},
},
Params: nil,
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 108,
Line: 57,
},
File: "show_measurements_test.flux",
Source: "test show_measurements = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_measurements_fn}",
Start: ast.Position{
Column: 1,
Line: 56,
},
},
},
}},
Imports: []*ast.ImportDeclaration{&ast.ImportDeclaration{
As: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "show_measurements_test.flux",
Source: "import \"testing\"",
Start: ast.Position{
Column: 1,
Line: 3,
},
},
},
Path: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "show_measurements_test.flux",
Source: "\"testing\"",
Start: ast.Position{
Column: 8,
Line: 3,
},
},
},
Value: "testing",
},
}},
Name: "show_measurements_test.flux",
Package: &ast.PackageClause{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "show_measurements_test.flux",
Source: "package v1_test",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Name: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "show_measurements_test.flux",
Source: "v1_test",
Start: ast.Position{
Column: 9,
Line: 1,
},
},
},
Name: "main",
},
},
}},
Package: "main",
Path: "",
}, &ast.Package{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: nil,
},
Files: []*ast.File{&ast.File{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 104,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "package v1_test\n\nimport \"testing\"\n\ninput = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,host,region,_field,_value\n,,0,2018-05-22T19:53:26Z,system,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,system,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,system,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,system,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,system,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,system,us-east,host.local,load1,17\n\n,,1,2018-05-22T19:53:26Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:36Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:46Z,system,us-east,host.local,load3,15\n,,1,2018-05-22T19:53:56Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:06Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:16Z,system,us-east,host.local,load3,19\n\n,,2,2018-05-22T19:53:26Z,system,us-west,host.local,load5,19\n,,2,2018-05-22T19:53:36Z,system,us-west,host.local,load5,22\n,,2,2018-05-22T19:53:46Z,system,us-west,host.local,load5,11\n,,2,2018-05-22T19:53:56Z,system,us-west,host.local,load5,12\n,,2,2018-05-22T19:54:06Z,system,us-west,host.local,load5,13\n,,2,2018-05-22T19:54:16Z,system,us-west,host.local,load5,13\n\"\n\noutput = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n\"\n\nshow_tag_keys_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()\n\ntest show_tag_keys = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_keys_fn}",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Body: []ast.Statement{&ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 55,
},
File: "show_tag_keys_test.flux",
Source: "input = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,host,region,_field,_value\n,,0,2018-05-22T19:53:26Z,system,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,system,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,system,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,system,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,system,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,system,us-east,host.local,load1,17\n\n,,1,2018-05-22T19:53:26Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:36Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:46Z,system,us-east,host.local,load3,15\n,,1,2018-05-22T19:53:56Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:06Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:16Z,system,us-east,host.local,load3,19\n\n,,2,2018-05-22T19:53:26Z,system,us-west,host.local,load5,19\n,,2,2018-05-22T19:53:36Z,system,us-west,host.local,load5,22\n,,2,2018-05-22T19:53:46Z,system,us-west,host.local,load5,11\n,,2,2018-05-22T19:53:56Z,system,us-west,host.local,load5,12\n,,2,2018-05-22T19:54:06Z,system,us-west,host.local,load5,13\n,,2,2018-05-22T19:54:16Z,system,us-west,host.local,load5,13\n\"",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 6,
Line: 5,
},
File: "show_tag_keys_test.flux",
Source: "input",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
Name: "input",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 55,
},
File: "show_tag_keys_test.flux",
Source: "\"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,host,region,_field,_value\n,,0,2018-05-22T19:53:26Z,system,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,system,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,system,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,system,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,system,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,system,us-east,host.local,load1,17\n\n,,1,2018-05-22T19:53:26Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:36Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:46Z,system,us-east,host.local,load3,15\n,,1,2018-05-22T19:53:56Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:06Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:16Z,system,us-east,host.local,load3,19\n\n,,2,2018-05-22T19:53:26Z,system,us-west,host.local,load5,19\n,,2,2018-05-22T19:53:36Z,system,us-west,host.local,load5,22\n,,2,2018-05-22T19:53:46Z,system,us-west,host.local,load5,11\n,,2,2018-05-22T19:53:56Z,system,us-west,host.local,load5,12\n,,2,2018-05-22T19:54:06Z,system,us-west,host.local,load5,13\n,,2,2018-05-22T19:54:16Z,system,us-west,host.local,load5,13\n\"",
Start: ast.Position{
Column: 9,
Line: 5,
},
},
},
Value: "\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n#datatype,string,long,dateTime:RFC3339,string,string,string,string,long\n#group,false,false,false,true,true,true,true,false\n#default,_result,,,,,,,\n,result,table,_time,_measurement,host,region,_field,_value\n,,0,2018-05-22T19:53:26Z,system,us-east,host.local,load1,10\n,,0,2018-05-22T19:53:36Z,system,us-east,host.local,load1,11\n,,0,2018-05-22T19:53:46Z,system,us-east,host.local,load1,18\n,,0,2018-05-22T19:53:56Z,system,us-east,host.local,load1,19\n,,0,2018-05-22T19:54:06Z,system,us-east,host.local,load1,17\n,,0,2018-05-22T19:54:16Z,system,us-east,host.local,load1,17\n\n,,1,2018-05-22T19:53:26Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:36Z,system,us-east,host.local,load3,16\n,,1,2018-05-22T19:53:46Z,system,us-east,host.local,load3,15\n,,1,2018-05-22T19:53:56Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:06Z,system,us-east,host.local,load3,19\n,,1,2018-05-22T19:54:16Z,system,us-east,host.local,load3,19\n\n,,2,2018-05-22T19:53:26Z,system,us-west,host.local,load5,19\n,,2,2018-05-22T19:53:36Z,system,us-west,host.local,load5,22\n,,2,2018-05-22T19:53:46Z,system,us-west,host.local,load5,11\n,,2,2018-05-22T19:53:56Z,system,us-west,host.local,load5,12\n,,2,2018-05-22T19:54:06Z,system,us-west,host.local,load5,13\n,,2,2018-05-22T19:54:16Z,system,us-west,host.local,load5,13\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 68,
},
File: "show_tag_keys_test.flux",
Source: "output = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n\"",
Start: ast.Position{
Column: 1,
Line: 57,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 7,
Line: 57,
},
File: "show_tag_keys_test.flux",
Source: "output",
Start: ast.Position{
Column: 1,
Line: 57,
},
},
},
Name: "output",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 68,
},
File: "show_tag_keys_test.flux",
Source: "\"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n\"",
Start: ast.Position{
Column: 10,
Line: 57,
},
},
},
Value: "\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,_field\n,,0,_measurement\n,,0,_start\n,,0,_stop\n,,0,host\n,,0,region\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 76,
},
File: "show_tag_keys_test.flux",
Source: "show_tag_keys_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()",
Start: ast.Position{
Column: 1,
Line: 70,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 70,
},
File: "show_tag_keys_test.flux",
Source: "show_tag_keys_fn",
Start: ast.Position{
Column: 1,
Line: 70,
},
},
},
Name: "show_tag_keys_fn",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 76,
},
File: "show_tag_keys_test.flux",
Source: "(tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()",
Start: ast.Position{
Column: 20,
Line: 70,
},
},
},
Body: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 70,
},
File: "show_tag_keys_test.flux",
Source: "tables",
Start: ast.Position{
Column: 35,
Line: 70,
},
},
},
Name: "tables",
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 35,
Line: 70,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 71,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "start: 2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 71,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 19,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "start",
Start: ast.Position{
Column: 14,
Line: 71,
},
},
},
Name: "start",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 21,
Line: 71,
},
},
},
Value: parser.MustParseTime("2018-01-01T00:00:00Z"),
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 43,
Line: 71,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 47,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "stop",
Start: ast.Position{
Column: 43,
Line: 71,
},
},
},
Name: "stop",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 49,
Line: 71,
},
},
},
Value: parser.MustParseTime("2019-01-01T00:00:00Z"),
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 8,
Line: 71,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 71,
},
File: "show_tag_keys_test.flux",
Source: "range",
Start: ast.Position{
Column: 8,
Line: 71,
},
},
},
Name: "range",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)",
Start: ast.Position{
Column: 35,
Line: 70,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "fn: (r) => true",
Start: ast.Position{
Column: 15,
Line: 72,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "fn: (r) => true",
Start: ast.Position{
Column: 15,
Line: 72,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "fn",
Start: ast.Position{
Column: 15,
Line: 72,
},
},
},
Name: "fn",
},
Value: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "(r) => true",
Start: ast.Position{
Column: 19,
Line: 72,
},
},
},
Body: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "true",
Start: ast.Position{
Column: 26,
Line: 72,
},
},
},
Name: "true",
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 72,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 72,
},
},
},
Name: "r",
},
Value: nil,
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "filter(fn: (r) => true)",
Start: ast.Position{
Column: 8,
Line: 72,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 72,
},
File: "show_tag_keys_test.flux",
Source: "filter",
Start: ast.Position{
Column: 8,
Line: 72,
},
},
},
Name: "filter",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 73,
},
File: "show_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keys()",
Start: ast.Position{
Column: 35,
Line: 70,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 73,
},
File: "show_tag_keys_test.flux",
Source: "keys()",
Start: ast.Position{
Column: 8,
Line: 73,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 73,
},
File: "show_tag_keys_test.flux",
Source: "keys",
Start: ast.Position{
Column: 8,
Line: 73,
},
},
},
Name: "keys",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keys()\n |> keep(columns: [\"_value\"])",
Start: ast.Position{
Column: 35,
Line: 70,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "columns: [\"_value\"]",
Start: ast.Position{
Column: 13,
Line: 74,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "columns: [\"_value\"]",
Start: ast.Position{
Column: 13,
Line: 74,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 20,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "columns",
Start: ast.Position{
Column: 13,
Line: 74,
},
},
},
Name: "columns",
},
Value: &ast.ArrayExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "[\"_value\"]",
Start: ast.Position{
Column: 22,
Line: 74,
},
},
},
Elements: []ast.Expression{&ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "\"_value\"",
Start: ast.Position{
Column: 23,
Line: 74,
},
},
},
Value: "_value",
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "keep(columns: [\"_value\"])",
Start: ast.Position{
Column: 8,
Line: 74,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 74,
},
File: "show_tag_keys_test.flux",
Source: "keep",
Start: ast.Position{
Column: 8,
Line: 74,
},
},
},
Name: "keep",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 18,
Line: 75,
},
File: "show_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()",
Start: ast.Position{
Column: 35,
Line: 70,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 18,
Line: 75,
},
File: "show_tag_keys_test.flux",
Source: "distinct()",
Start: ast.Position{
Column: 8,
Line: 75,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 75,
},
File: "show_tag_keys_test.flux",
Source: "distinct",
Start: ast.Position{
Column: 8,
Line: 75,
},
},
},
Name: "distinct",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 76,
},
File: "show_tag_keys_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keys()\n |> keep(columns: [\"_value\"])\n |> distinct()\n |> sort()",
Start: ast.Position{
Column: 35,
Line: 70,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 76,
},
File: "show_tag_keys_test.flux",
Source: "sort()",
Start: ast.Position{
Column: 8,
Line: 76,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 76,
},
File: "show_tag_keys_test.flux",
Source: "sort",
Start: ast.Position{
Column: 8,
Line: 76,
},
},
},
Name: "sort",
},
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 70,
},
File: "show_tag_keys_test.flux",
Source: "tables=<-",
Start: ast.Position{
Column: 21,
Line: 70,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 27,
Line: 70,
},
File: "show_tag_keys_test.flux",
Source: "tables",
Start: ast.Position{
Column: 21,
Line: 70,
},
},
},
Name: "tables",
},
Value: &ast.PipeLiteral{BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 70,
},
File: "show_tag_keys_test.flux",
Source: "<-",
Start: ast.Position{
Column: 28,
Line: 70,
},
},
}},
}},
},
}, &ast.TestStatement{
Assignment: &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 104,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "show_tag_keys = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_keys_fn}",
Start: ast.Position{
Column: 6,
Line: 78,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 19,
Line: 78,
},
File: "show_tag_keys_test.flux",
Source: "show_tag_keys",
Start: ast.Position{
Column: 6,
Line: 78,
},
},
},
Name: "show_tag_keys",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 104,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "() =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_keys_fn}",
Start: ast.Position{
Column: 22,
Line: 78,
},
},
},
Body: &ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 104,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "{input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_keys_fn}",
Start: ast.Position{
Column: 6,
Line: 79,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "input: testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 7,
Line: 79,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "input",
Start: ast.Position{
Column: 7,
Line: 79,
},
},
},
Name: "input",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 79,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 79,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "csv",
Start: ast.Position{
Column: 34,
Line: 79,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "input",
Start: ast.Position{
Column: 39,
Line: 79,
},
},
},
Name: "input",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 14,
Line: 79,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "testing.loadStorage",
Start: ast.Position{
Column: 14,
Line: 79,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "testing",
Start: ast.Position{
Column: 14,
Line: 79,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "loadStorage",
Start: ast.Position{
Column: 22,
Line: 79,
},
},
},
Name: "loadStorage",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "want: testing.loadMem(csv: output)",
Start: ast.Position{
Column: 47,
Line: 79,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 51,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "want",
Start: ast.Position{
Column: 47,
Line: 79,
},
},
},
Name: "want",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 79,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 79,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 72,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "csv",
Start: ast.Position{
Column: 69,
Line: 79,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "output",
Start: ast.Position{
Column: 74,
Line: 79,
},
},
},
Name: "output",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "testing.loadMem(csv: output)",
Start: ast.Position{
Column: 53,
Line: 79,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "testing.loadMem",
Start: ast.Position{
Column: 53,
Line: 79,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 60,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "testing",
Start: ast.Position{
Column: 53,
Line: 79,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "loadMem",
Start: ast.Position{
Column: 61,
Line: 79,
},
},
},
Name: "loadMem",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 103,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "fn: show_tag_keys_fn",
Start: ast.Position{
Column: 83,
Line: 79,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 85,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "fn",
Start: ast.Position{
Column: 83,
Line: 79,
},
},
},
Name: "fn",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 103,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "show_tag_keys_fn",
Start: ast.Position{
Column: 87,
Line: 79,
},
},
},
Name: "show_tag_keys_fn",
},
}},
},
Params: nil,
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 104,
Line: 79,
},
File: "show_tag_keys_test.flux",
Source: "test show_tag_keys = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_keys_fn}",
Start: ast.Position{
Column: 1,
Line: 78,
},
},
},
}},
Imports: []*ast.ImportDeclaration{&ast.ImportDeclaration{
As: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "show_tag_keys_test.flux",
Source: "import \"testing\"",
Start: ast.Position{
Column: 1,
Line: 3,
},
},
},
Path: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "show_tag_keys_test.flux",
Source: "\"testing\"",
Start: ast.Position{
Column: 8,
Line: 3,
},
},
},
Value: "testing",
},
}},
Name: "show_tag_keys_test.flux",
Package: &ast.PackageClause{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "show_tag_keys_test.flux",
Source: "package v1_test",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Name: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "show_tag_keys_test.flux",
Source: "v1_test",
Start: ast.Position{
Column: 9,
Line: 1,
},
},
},
Name: "main",
},
},
}},
Package: "main",
Path: "",
}, &ast.Package{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: nil,
},
Files: []*ast.File{&ast.File{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 106,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "package v1_test\n\nimport \"testing\"\n\ninput = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n\"\n\noutput = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load1\n,,0,load3\n,,0,load5\n,,0,used_percent\n\"\n\nshow_tag_values_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()\n\ntest show_tag_values = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_values_fn}",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Body: []ast.Statement{&ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 37,
},
File: "show_tag_values_test.flux",
Source: "input = \"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n\"",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 6,
Line: 5,
},
File: "show_tag_values_test.flux",
Source: "input",
Start: ast.Position{
Column: 1,
Line: 5,
},
},
},
Name: "input",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 37,
},
File: "show_tag_values_test.flux",
Source: "\"\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n\"",
Start: ast.Position{
Column: 9,
Line: 5,
},
},
},
Value: "\n#datatype,string,long,dateTime:RFC3339,string,string,string,double\n#group,false,false,false,true,true,true,false\n#default,_result,,,,,,\n,result,table,_time,_measurement,host,_field,_value\n,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83\n,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72\n,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74\n,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63\n,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91\n,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84\n\n,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:53:36Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:46Z,system,host.local,load3,1.97\n,,1,2018-05-22T19:53:56Z,system,host.local,load3,1.96\n,,1,2018-05-22T19:54:06Z,system,host.local,load3,1.98\n,,1,2018-05-22T19:54:16Z,system,host.local,load3,1.97\n\n,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95\n,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92\n,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89\n,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94\n,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93\n\n,,3,2018-05-22T19:53:26Z,swap,host.local,used_percent,82.98\n,,3,2018-05-22T19:53:36Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:46Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:53:56Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:06Z,swap,host.local,used_percent,82.59\n,,3,2018-05-22T19:54:16Z,swap,host.local,used_percent,82.64\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 48,
},
File: "show_tag_values_test.flux",
Source: "output = \"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load1\n,,0,load3\n,,0,load5\n,,0,used_percent\n\"",
Start: ast.Position{
Column: 1,
Line: 39,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 7,
Line: 39,
},
File: "show_tag_values_test.flux",
Source: "output",
Start: ast.Position{
Column: 1,
Line: 39,
},
},
},
Name: "output",
},
Init: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 2,
Line: 48,
},
File: "show_tag_values_test.flux",
Source: "\"\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load1\n,,0,load3\n,,0,load5\n,,0,used_percent\n\"",
Start: ast.Position{
Column: 10,
Line: 39,
},
},
},
Value: "\n#datatype,string,long,string\n#group,false,false,false\n#default,0,,\n,result,table,_value\n,,0,load1\n,,0,load3\n,,0,load5\n,,0,used_percent\n",
},
}, &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 56,
},
File: "show_tag_values_test.flux",
Source: "show_tag_values_fn = (tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()",
Start: ast.Position{
Column: 1,
Line: 50,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 19,
Line: 50,
},
File: "show_tag_values_test.flux",
Source: "show_tag_values_fn",
Start: ast.Position{
Column: 1,
Line: 50,
},
},
},
Name: "show_tag_values_fn",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 56,
},
File: "show_tag_values_test.flux",
Source: "(tables=<-) => tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()",
Start: ast.Position{
Column: 22,
Line: 50,
},
},
},
Body: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.PipeExpression{
Argument: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 43,
Line: 50,
},
File: "show_tag_values_test.flux",
Source: "tables",
Start: ast.Position{
Column: 37,
Line: 50,
},
},
},
Name: "tables",
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 37,
Line: 50,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 51,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "start: 2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 14,
Line: 51,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 19,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "start",
Start: ast.Position{
Column: 14,
Line: 51,
},
},
},
Name: "start",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 41,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "2018-01-01T00:00:00Z",
Start: ast.Position{
Column: 21,
Line: 51,
},
},
},
Value: parser.MustParseTime("2018-01-01T00:00:00Z"),
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "stop: 2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 43,
Line: 51,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 47,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "stop",
Start: ast.Position{
Column: 43,
Line: 51,
},
},
},
Name: "stop",
},
Value: &ast.DateTimeLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 69,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "2019-01-01T00:00:00Z",
Start: ast.Position{
Column: 49,
Line: 51,
},
},
},
Value: parser.MustParseTime("2019-01-01T00:00:00Z"),
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 70,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)",
Start: ast.Position{
Column: 8,
Line: 51,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 51,
},
File: "show_tag_values_test.flux",
Source: "range",
Start: ast.Position{
Column: 8,
Line: 51,
},
},
},
Name: "range",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)",
Start: ast.Position{
Column: 37,
Line: 50,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "fn: (r) => true",
Start: ast.Position{
Column: 15,
Line: 52,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "fn: (r) => true",
Start: ast.Position{
Column: 15,
Line: 52,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "fn",
Start: ast.Position{
Column: 15,
Line: 52,
},
},
},
Name: "fn",
},
Value: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "(r) => true",
Start: ast.Position{
Column: 19,
Line: 52,
},
},
},
Body: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 30,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "true",
Start: ast.Position{
Column: 26,
Line: 52,
},
},
},
Name: "true",
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 52,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "r",
Start: ast.Position{
Column: 20,
Line: 52,
},
},
},
Name: "r",
},
Value: nil,
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "filter(fn: (r) => true)",
Start: ast.Position{
Column: 8,
Line: 52,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 52,
},
File: "show_tag_values_test.flux",
Source: "filter",
Start: ast.Position{
Column: 8,
Line: 52,
},
},
},
Name: "filter",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_field\"])",
Start: ast.Position{
Column: 37,
Line: 50,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "columns: [\"_field\"]",
Start: ast.Position{
Column: 13,
Line: 53,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "columns: [\"_field\"]",
Start: ast.Position{
Column: 13,
Line: 53,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 20,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "columns",
Start: ast.Position{
Column: 13,
Line: 53,
},
},
},
Name: "columns",
},
Value: &ast.ArrayExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "[\"_field\"]",
Start: ast.Position{
Column: 22,
Line: 53,
},
},
},
Elements: []ast.Expression{&ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 31,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "\"_field\"",
Start: ast.Position{
Column: 23,
Line: 53,
},
},
},
Value: "_field",
}},
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "keep(columns: [\"_field\"])",
Start: ast.Position{
Column: 8,
Line: 53,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 53,
},
File: "show_tag_values_test.flux",
Source: "keep",
Start: ast.Position{
Column: 8,
Line: 53,
},
},
},
Name: "keep",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 15,
Line: 54,
},
File: "show_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_field\"])\n |> group()",
Start: ast.Position{
Column: 37,
Line: 50,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 15,
Line: 54,
},
File: "show_tag_values_test.flux",
Source: "group()",
Start: ast.Position{
Column: 8,
Line: 54,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 13,
Line: 54,
},
File: "show_tag_values_test.flux",
Source: "group",
Start: ast.Position{
Column: 8,
Line: 54,
},
},
},
Name: "group",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 34,
Line: 55,
},
File: "show_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")",
Start: ast.Position{
Column: 37,
Line: 50,
},
},
},
Call: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 55,
},
File: "show_tag_values_test.flux",
Source: "column: \"_field\"",
Start: ast.Position{
Column: 17,
Line: 55,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 55,
},
File: "show_tag_values_test.flux",
Source: "column: \"_field\"",
Start: ast.Position{
Column: 17,
Line: 55,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 23,
Line: 55,
},
File: "show_tag_values_test.flux",
Source: "column",
Start: ast.Position{
Column: 17,
Line: 55,
},
},
},
Name: "column",
},
Value: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 55,
},
File: "show_tag_values_test.flux",
Source: "\"_field\"",
Start: ast.Position{
Column: 25,
Line: 55,
},
},
},
Value: "_field",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 34,
Line: 55,
},
File: "show_tag_values_test.flux",
Source: "distinct(column: \"_field\")",
Start: ast.Position{
Column: 8,
Line: 55,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 55,
},
File: "show_tag_values_test.flux",
Source: "distinct",
Start: ast.Position{
Column: 8,
Line: 55,
},
},
},
Name: "distinct",
},
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 56,
},
File: "show_tag_values_test.flux",
Source: "tables\n |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)\n |> filter(fn: (r) => true)\n |> keep(columns: [\"_field\"])\n |> group()\n |> distinct(column: \"_field\")\n |> sort()",
Start: ast.Position{
Column: 37,
Line: 50,
},
},
},
Call: &ast.CallExpression{
Arguments: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 14,
Line: 56,
},
File: "show_tag_values_test.flux",
Source: "sort()",
Start: ast.Position{
Column: 8,
Line: 56,
},
},
},
Callee: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 56,
},
File: "show_tag_values_test.flux",
Source: "sort",
Start: ast.Position{
Column: 8,
Line: 56,
},
},
},
Name: "sort",
},
},
},
Params: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 50,
},
File: "show_tag_values_test.flux",
Source: "tables=<-",
Start: ast.Position{
Column: 23,
Line: 50,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 29,
Line: 50,
},
File: "show_tag_values_test.flux",
Source: "tables",
Start: ast.Position{
Column: 23,
Line: 50,
},
},
},
Name: "tables",
},
Value: &ast.PipeLiteral{BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 32,
Line: 50,
},
File: "show_tag_values_test.flux",
Source: "<-",
Start: ast.Position{
Column: 30,
Line: 50,
},
},
}},
}},
},
}, &ast.TestStatement{
Assignment: &ast.VariableAssignment{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 106,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "show_tag_values = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_values_fn}",
Start: ast.Position{
Column: 6,
Line: 58,
},
},
},
ID: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 58,
},
File: "show_tag_values_test.flux",
Source: "show_tag_values",
Start: ast.Position{
Column: 6,
Line: 58,
},
},
},
Name: "show_tag_values",
},
Init: &ast.FunctionExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 106,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "() =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_values_fn}",
Start: ast.Position{
Column: 24,
Line: 58,
},
},
},
Body: &ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 106,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "{input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_values_fn}",
Start: ast.Position{
Column: 6,
Line: 59,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "input: testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 7,
Line: 59,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 12,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "input",
Start: ast.Position{
Column: 7,
Line: 59,
},
},
},
Name: "input",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 59,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "csv: input",
Start: ast.Position{
Column: 34,
Line: 59,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 37,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "csv",
Start: ast.Position{
Column: 34,
Line: 59,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 44,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "input",
Start: ast.Position{
Column: 39,
Line: 59,
},
},
},
Name: "input",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 45,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "testing.loadStorage(csv: input)",
Start: ast.Position{
Column: 14,
Line: 59,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "testing.loadStorage",
Start: ast.Position{
Column: 14,
Line: 59,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 21,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "testing",
Start: ast.Position{
Column: 14,
Line: 59,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 33,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "loadStorage",
Start: ast.Position{
Column: 22,
Line: 59,
},
},
},
Name: "loadStorage",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "want: testing.loadMem(csv: output)",
Start: ast.Position{
Column: 47,
Line: 59,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 51,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "want",
Start: ast.Position{
Column: 47,
Line: 59,
},
},
},
Name: "want",
},
Value: &ast.CallExpression{
Arguments: []ast.Expression{&ast.ObjectExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 59,
},
},
},
Properties: []*ast.Property{&ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "csv: output",
Start: ast.Position{
Column: 69,
Line: 59,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 72,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "csv",
Start: ast.Position{
Column: 69,
Line: 59,
},
},
},
Name: "csv",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 80,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "output",
Start: ast.Position{
Column: 74,
Line: 59,
},
},
},
Name: "output",
},
}},
}},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 81,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "testing.loadMem(csv: output)",
Start: ast.Position{
Column: 53,
Line: 59,
},
},
},
Callee: &ast.MemberExpression{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "testing.loadMem",
Start: ast.Position{
Column: 53,
Line: 59,
},
},
},
Object: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 60,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "testing",
Start: ast.Position{
Column: 53,
Line: 59,
},
},
},
Name: "testing",
},
Property: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 68,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "loadMem",
Start: ast.Position{
Column: 61,
Line: 59,
},
},
},
Name: "loadMem",
},
},
},
}, &ast.Property{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 105,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "fn: show_tag_values_fn",
Start: ast.Position{
Column: 83,
Line: 59,
},
},
},
Key: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 85,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "fn",
Start: ast.Position{
Column: 83,
Line: 59,
},
},
},
Name: "fn",
},
Value: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 105,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "show_tag_values_fn",
Start: ast.Position{
Column: 87,
Line: 59,
},
},
},
Name: "show_tag_values_fn",
},
}},
},
Params: nil,
},
},
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 106,
Line: 59,
},
File: "show_tag_values_test.flux",
Source: "test show_tag_values = () =>\n ({input: testing.loadStorage(csv: input), want: testing.loadMem(csv: output), fn: show_tag_values_fn}",
Start: ast.Position{
Column: 1,
Line: 58,
},
},
},
}},
Imports: []*ast.ImportDeclaration{&ast.ImportDeclaration{
As: nil,
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "show_tag_values_test.flux",
Source: "import \"testing\"",
Start: ast.Position{
Column: 1,
Line: 3,
},
},
},
Path: &ast.StringLiteral{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 17,
Line: 3,
},
File: "show_tag_values_test.flux",
Source: "\"testing\"",
Start: ast.Position{
Column: 8,
Line: 3,
},
},
},
Value: "testing",
},
}},
Name: "show_tag_values_test.flux",
Package: &ast.PackageClause{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "show_tag_values_test.flux",
Source: "package v1_test",
Start: ast.Position{
Column: 1,
Line: 1,
},
},
},
Name: &ast.Identifier{
BaseNode: ast.BaseNode{
Errors: nil,
Loc: &ast.SourceLocation{
End: ast.Position{
Column: 16,
Line: 1,
},
File: "show_tag_values_test.flux",
Source: "v1_test",
Start: ast.Position{
Column: 9,
Line: 1,
},
},
},
Name: "main",
},
},
}},
Package: "main",
Path: "",
}}
| 1 | 10,620 | This file shouldn't be in this PR | influxdata-flux | go |
@@ -1881,7 +1881,7 @@ class ArrayAssignmentTest extends TestCase
takesArray($a);',
'error_message' => 'InvalidScalarArgument',
],
- 'nonEmptyAssignmentToListElementChangeType' => [
+ 'SKIPPED-nonEmptyAssignmentToListElementChangeType' => [ // Should be LessSpecificReturnStatement?
'<?php
/**
* @param non-empty-list<string> $arr | 1 | <?php
namespace Psalm\Tests;
use Psalm\Context;
use Psalm\Tests\Traits\InvalidCodeAnalysisTestTrait;
use Psalm\Tests\Traits\ValidCodeAnalysisTestTrait;
use Psalm\Type;
class ArrayAssignmentTest extends TestCase
{
use InvalidCodeAnalysisTestTrait;
use ValidCodeAnalysisTestTrait;
public function testConditionalAssignment(): void
{
$this->addFile(
'somefile.php',
'<?php
if ($b) {
$foo["a"] = "hello";
}'
);
$context = new Context();
$context->vars_in_scope['$b'] = Type::getBool();
$context->vars_in_scope['$foo'] = Type::getArray();
$this->analyzeFile('somefile.php', $context);
$this->assertFalse(isset($context->vars_in_scope['$foo[\'a\']']));
}
/**
* @return iterable<string,array{string,assertions?:array<string,string>,error_levels?:string[]}>
*/
public function providerValidCodeParse(): iterable
{
return [
'genericArrayCreationWithSingleIntValue' => [
'<?php
$out = [];
$out[] = 4;',
'assertions' => [
'$out' => 'non-empty-list<int>',
],
],
'genericArrayCreationWithInt' => [
'<?php
$out = [];
foreach ([1, 2, 3, 4, 5] as $value) {
$out[] = 4;
}',
'assertions' => [
'$out' => 'non-empty-list<int>',
],
],
'generic2dArrayCreation' => [
'<?php
$out = [];
foreach ([1, 2, 3, 4, 5] as $value) {
$out[] = [4];
}',
'assertions' => [
'$out' => 'non-empty-list<array{int}>',
],
],
'generic2dArrayCreationAddedInIf' => [
'<?php
$out = [];
$bits = [];
foreach ([1, 2, 3, 4, 5] as $value) {
if (rand(0,100) > 50) {
$out[] = $bits;
$bits = [];
}
$bits[] = 4;
}
$out[] = $bits;',
'assertions' => [
'$out' => 'non-empty-list<non-empty-list<int>>',
],
],
'genericArrayCreationWithObjectAddedInIf' => [
'<?php
class B {}
$out = [];
if (rand(0,10) === 10) {
$out[] = new B();
}',
'assertions' => [
'$out' => 'list<B>',
],
],
'genericArrayCreationWithElementAddedInSwitch' => [
'<?php
$out = [];
switch (rand(0,10)) {
case 5:
$out[] = 4;
break;
case 6:
// do nothing
}',
'assertions' => [
'$out' => 'list<int>',
],
],
'genericArrayCreationWithElementsAddedInSwitch' => [
'<?php
$out = [];
switch (rand(0,10)) {
case 5:
$out[] = 4;
break;
case 6:
$out[] = "hello";
break;
}',
'assertions' => [
'$out' => 'list<int|string>',
],
],
'genericArrayCreationWithElementsAddedInSwitchWithNothing' => [
'<?php
$out = [];
switch (rand(0,10)) {
case 5:
$out[] = 4;
break;
case 6:
$out[] = "hello";
break;
case 7:
// do nothing
}',
'assertions' => [
'$out' => 'list<int|string>',
],
],
'implicit2dIntArrayCreation' => [
'<?php
$foo = [];
$foo[][] = "hello";',
'assertions' => [
'$foo' => 'non-empty-list<array<int, string>>',
],
],
'implicit3dIntArrayCreation' => [
'<?php
$foo = [];
$foo[][][] = "hello";',
'assertions' => [
'$foo' => 'non-empty-list<list<array<int, string>>>',
],
],
'implicit4dIntArrayCreation' => [
'<?php
$foo = [];
$foo[][][][] = "hello";',
'assertions' => [
'$foo' => 'non-empty-list<list<list<array<int, string>>>>',
],
],
'implicitIndexedIntArrayCreation' => [
'<?php
$foo = [];
$foo[0] = "a";
$foo[1] = "b";
$foo[2] = "c";
$bar = [0, 1, 2];
$bat = [];
foreach ($foo as $i => $text) {
$bat[$text] = $bar[$i];
}',
'assertions' => [
'$foo' => 'array{0: string, 1: string, 2: string}',
'$bar' => 'array{int, int, int}',
'$bat' => 'non-empty-array<string, int>',
],
],
'implicitStringArrayCreation' => [
'<?php
$foo = [];
$foo["bar"] = "hello";',
'assertions' => [
'$foo' => 'array{bar: string}',
'$foo[\'bar\']' => 'string',
],
],
'implicit2dStringArrayCreation' => [
'<?php
$foo = [];
$foo["bar"]["baz"] = "hello";',
'assertions' => [
'$foo' => 'array{bar: array{baz: string}}',
'$foo[\'bar\'][\'baz\']' => 'string',
],
],
'implicit3dStringArrayCreation' => [
'<?php
$foo = [];
$foo["bar"]["baz"]["bat"] = "hello";',
'assertions' => [
'$foo' => 'array{bar: array{baz: array{bat: string}}}',
'$foo[\'bar\'][\'baz\'][\'bat\']' => 'string',
],
],
'implicit4dStringArrayCreation' => [
'<?php
$foo = [];
$foo["bar"]["baz"]["bat"]["bap"] = "hello";',
'assertions' => [
'$foo' => 'array{bar: array{baz: array{bat: array{bap: string}}}}',
'$foo[\'bar\'][\'baz\'][\'bat\'][\'bap\']' => 'string',
],
],
'2Step2dStringArrayCreation' => [
'<?php
$foo = ["bar" => []];
$foo["bar"]["baz"] = "hello";',
'assertions' => [
'$foo' => 'array{bar: array{baz: string}}',
'$foo[\'bar\'][\'baz\']' => 'string',
],
],
'2StepImplicit3dStringArrayCreation' => [
'<?php
$foo = ["bar" => []];
$foo["bar"]["baz"]["bat"] = "hello";',
'assertions' => [
'$foo' => 'array{bar: array{baz: array{bat: string}}}',
],
],
'conflictingTypesWithNoAssignment' => [
'<?php
$foo = [
"bar" => ["a" => "b"],
"baz" => [1]
];',
'assertions' => [
'$foo' => 'array{bar: array{a: string}, baz: array{int}}',
],
],
'implicitTKeyedArrayCreation' => [
'<?php
$foo = [
"bar" => 1,
];
$foo["baz"] = "a";',
'assertions' => [
'$foo' => 'array{bar: int, baz: string}',
],
],
'conflictingTypesWithAssignment' => [
'<?php
$foo = [
"bar" => ["a" => "b"],
"baz" => [1]
];
$foo["bar"]["bam"]["baz"] = "hello";',
'assertions' => [
'$foo' => 'array{bar: array{a: string, bam: array{baz: string}}, baz: array{int}}',
],
],
'conflictingTypesWithAssignment2' => [
'<?php
$foo = [];
$foo["a"] = "hello";
$foo["b"][] = "goodbye";
$bar = $foo["a"];',
'assertions' => [
'$foo' => 'array{a: string, b: non-empty-list<string>}',
'$foo[\'a\']' => 'string',
'$foo[\'b\']' => 'non-empty-list<string>',
'$bar' => 'string',
],
],
'conflictingTypesWithAssignment3' => [
'<?php
$foo = [];
$foo["a"] = "hello";
$foo["b"]["c"]["d"] = "goodbye";',
'assertions' => [
'$foo' => 'array{a: string, b: array{c: array{d: string}}}',
],
],
'nestedTKeyedArrayAssignment' => [
'<?php
$foo = [];
$foo["a"]["b"] = "hello";
$foo["a"]["c"] = 1;',
'assertions' => [
'$foo' => 'array{a: array{b: string, c: int}}',
],
],
'conditionalTKeyedArrayAssignment' => [
'<?php
$foo = ["a" => "hello"];
if (rand(0, 10) === 5) {
$foo["b"] = 1;
}
else {
$foo["b"] = 2;
}',
'assertions' => [
'$foo' => 'array{a: string, b: int}',
],
],
'arrayKey' => [
'<?php
$a = ["foo", "bar"];
$b = $a[0];
$c = ["a" => "foo", "b"=> "bar"];
$d = "a";
$e = $c[$d];',
'assertions' => [
'$b' => 'string',
'$e' => 'string',
],
],
'conditionalCheck' => [
'<?php
/**
* @param array{b:string} $a
* @return null|string
*/
function fooFoo($a) {
if ($a["b"]) {
return $a["b"];
}
}',
'assertions' => [],
],
'variableKeyArrayCreate' => [
'<?php
$a = [];
$b = "boop";
$a[$b][] = "bam";
$c = [];
$c[$b][$b][] = "bam";',
'assertions' => [
'$a' => 'array{boop: non-empty-list<string>}',
'$c' => 'array{boop: array{boop: non-empty-list<string>}}',
],
],
'assignExplicitValueToGeneric' => [
'<?php
/** @var array<string, array<string, string>> */
$a = [];
$a["foo"] = ["bar" => "baz"];',
'assertions' => [
'$a' => 'non-empty-array<string, non-empty-array<string, string>>',
],
],
'additionWithEmpty' => [
'<?php
$a = [];
$a += ["bar"];
$b = [] + ["bar"];',
'assertions' => [
'$a' => 'array{0: string}',
'$b' => 'array{0: string}',
],
],
'additionDifferentType' => [
'<?php
$a = ["bar"];
$a += [1];
$b = ["bar"] + [1];',
'assertions' => [
'$a' => 'array{0: string}',
'$b' => 'array{0: string}',
],
],
'present1dArrayTypeWithVarKeys' => [
'<?php
/** @var array<string, array<int, string>> */
$a = [];
$foo = "foo";
$a[$foo][] = "bat";',
'assertions' => [],
],
'present2dArrayTypeWithVarKeys' => [
'<?php
/** @var array<string, array<string, array<int, string>>> */
$b = [];
$foo = "foo";
$bar = "bar";
$b[$foo][$bar][] = "bat";',
'assertions' => [],
],
'objectLikeWithIntegerKeys' => [
'<?php
/** @var array{0: string, 1: int} **/
$a = ["hello", 5];
$b = $a[0]; // string
$c = $a[1]; // int
list($d, $e) = $a; // $d is string, $e is int',
'assertions' => [
'$b' => 'string',
'$c' => 'int',
'$d' => 'string',
'$e' => 'int',
],
],
'objectLikeArrayAdditionNotNested' => [
'<?php
$foo = [];
$foo["a"] = 1;
$foo += ["b" => [2, 3]];',
'assertions' => [
'$foo' => 'array{a: int, b: array{int, int}}',
],
],
'objectLikeArrayIsNonEmpty' => [
'<?php
/**
* @param array{a?: string, b: string} $arg
* @return non-empty-array<string, string>
*/
function test(array $arg): array {
return $arg;
}
',
],
'nestedTKeyedArrayAddition' => [
'<?php
$foo = [];
$foo["root"]["a"] = 1;
$foo["root"] += ["b" => [2, 3]];',
'assertions' => [
'$foo' => 'array{root: array{a: int, b: array{int, int}}}',
],
],
'updateStringIntKey1' => [
'<?php
$a = [];
$a["a"] = 5;
$a[0] = 3;',
'assertions' => [
'$a' => 'array{0: int, a: int}',
],
],
'updateStringIntKey2' => [
'<?php
$string = "c";
$b = [];
$b[$string] = 5;
$b[0] = 3;',
'assertions' => [
'$b' => 'array{0: int, c: int}',
],
],
'updateStringIntKey3' => [
'<?php
$string = "c";
$c = [];
$c[0] = 3;
$c[$string] = 5;',
'assertions' => [
'$c' => 'array{0: int, c: int}',
],
],
'updateStringIntKey4' => [
'<?php
$int = 5;
$d = [];
$d[$int] = 3;
$d["a"] = 5;',
'assertions' => [
'$d' => 'array{5: int, a: int}',
],
],
'updateStringIntKey5' => [
'<?php
$string = "c";
$int = 5;
$e = [];
$e[$int] = 3;
$e[$string] = 5;',
'assertions' => [
'$e' => 'array{5: int, c: int}',
],
],
'updateStringIntKeyWithIntRootAndNumberOffset' => [
'<?php
$string = "c";
$int = 5;
$a = [];
$a[0]["a"] = 5;
$a[0][0] = 3;',
'assertions' => [
'$a' => 'array{0: array{0: int, a: int}}',
],
],
'updateStringIntKeyWithIntRoot' => [
'<?php
$string = "c";
$int = 5;
$b = [];
$b[0][$string] = 5;
$b[0][0] = 3;
$c = [];
$c[0][0] = 3;
$c[0][$string] = 5;
$d = [];
$d[0][$int] = 3;
$d[0]["a"] = 5;
$e = [];
$e[0][$int] = 3;
$e[0][$string] = 5;',
'assertions' => [
'$b' => 'array{0: array{0: int, c: int}}',
'$c' => 'array{0: array{0: int, c: int}}',
'$d' => 'array{0: array{5: int, a: int}}',
'$e' => 'array{0: array{5: int, c: int}}',
],
],
'updateStringIntKeyWithTKeyedArrayRootAndNumberOffset' => [
'<?php
$string = "c";
$int = 5;
$a = [];
$a["root"]["a"] = 5;
$a["root"][0] = 3;',
'assertions' => [
'$a' => 'array{root: array{0: int, a: int}}',
],
],
'updateStringIntKeyWithTKeyedArrayRoot' => [
'<?php
$string = "c";
$int = 5;
$b = [];
$b["root"][$string] = 5;
$b["root"][0] = 3;
$c = [];
$c["root"][0] = 3;
$c["root"][$string] = 5;
$d = [];
$d["root"][$int] = 3;
$d["root"]["a"] = 5;
$e = [];
$e["root"][$int] = 3;
$e["root"][$string] = 5;',
'assertions' => [
'$b' => 'array{root: array{0: int, c: int}}',
'$c' => 'array{root: array{0: int, c: int}}',
'$d' => 'array{root: array{5: int, a: int}}',
'$e' => 'array{root: array{5: int, c: int}}',
],
],
'mixedArrayAssignmentWithStringKeys' => [
'<?php
/** @psalm-suppress MixedArgument */
function foo(array $a) : array {
/** @psalm-suppress MixedArrayAssignment */
$a["b"]["c"] = 5;
/** @psalm-suppress MixedArrayAccess */
echo $a["b"]["d"];
echo $a["a"];
return $a;
}',
],
'mixedArrayCoercion' => [
'<?php
/** @param int[] $arg */
function expect_int_array($arg): void { }
/** @return array */
function generic_array() { return []; }
/** @psalm-suppress MixedArgumentTypeCoercion */
expect_int_array(generic_array());
function expect_int(int $arg): void {}
/** @return mixed */
function return_mixed() { return 2; }
/** @psalm-suppress MixedArgument */
expect_int(return_mixed());',
],
'suppressMixedObjectOffset' => [
'<?php
function getThings(): array {
return [];
}
$arr = [];
foreach (getThings() as $a) {
$arr[$a->id] = $a;
}
echo $arr[0];',
'assertions' => [],
'error_levels' => ['MixedAssignment', 'MixedPropertyFetch', 'MixedArrayOffset', 'MixedArgument'],
],
'changeTKeyedArrayType' => [
'<?php
$a = ["b" => "c"];
$a["d"] = ["e" => "f"];
$a["b"] = 4;
$a["d"]["e"] = 5;',
'assertions' => [
'$a[\'b\']' => 'int',
'$a[\'d\']' => 'array{e: int}',
'$a[\'d\'][\'e\']' => 'int',
'$a' => 'array{b: int, d: array{e: int}}',
],
],
'changeTKeyedArrayTypeInIf' => [
'<?php
$a = [];
if (rand(0, 5) > 3) {
$a["b"] = new stdClass;
} else {
$a["b"] = ["e" => "f"];
}
if ($a["b"] instanceof stdClass) {
$a["b"] = [];
}
$a["b"]["e"] = "d";',
'assertions' => [
'$a' => 'array{b: array{e: string}}',
'$a[\'b\']' => 'array{e: string}',
'$a[\'b\'][\'e\']' => 'string',
],
],
'implementsArrayAccess' => [
'<?php
class A implements \ArrayAccess {
/**
* @param string|int $offset
* @param mixed $value
*/
public function offsetSet($offset, $value): void {}
/** @param string|int $offset */
public function offsetExists($offset): bool {
return true;
}
/** @param string|int $offset */
public function offsetUnset($offset): void {}
/**
* @param string $offset
* @return mixed
*/
public function offsetGet($offset) {
return 1;
}
}
$a = new A();
$a["bar"] = "cool";
$a["bar"]->foo();',
'assertions' => [
'$a' => 'A',
],
'error_levels' => ['MixedMethodCall'],
],
'mixedSwallowsArrayAssignment' => [
'<?php
/** @psalm-suppress MixedAssignment */
$a = $_GET["foo"];
/** @psalm-suppress MixedArrayAssignment */
$a["bar"] = "cool";
/** @psalm-suppress MixedMethodCall */
$a->offsetExists("baz");',
],
'implementsArrayAccessInheritingDocblock' => [
'<?php
class A implements \ArrayAccess
{
/**
* @var array<string, mixed>
*/
protected $data = [];
/**
* @param array<string, mixed> $data
*/
public function __construct(array $data = [])
{
$this->data = $data;
}
/**
* @param string $offset
*/
public function offsetExists($offset): bool
{
return isset($this->data[$offset]);
}
/**
* @param string $offset
*/
public function offsetGet($offset)
{
return $this->data[$offset];
}
/**
* @param string $offset
* @param mixed $value
*/
public function offsetSet($offset, $value): void
{
$this->data[$offset] = $value;
}
/**
* @param string $offset
*/
public function offsetUnset($offset): void
{
unset($this->data[$offset]);
}
}
class B extends A {
/**
* {@inheritdoc}
*/
public function offsetSet($offset, $value): void
{
echo "some log";
$this->data[$offset] = $value;
}
}',
'assertions' => [],
'error_levels' => ['MixedAssignment', 'MixedReturnStatement'],
],
'assignToNullDontDie' => [
'<?php
$a = null;
$a[0][] = 1;',
'assertions' => [
'$a' => 'array{0: non-empty-list<int>}',
],
'error_levels' => ['PossiblyNullArrayAssignment'],
],
'stringAssignment' => [
'<?php
$str = "hello";
$str[0] = "i";',
'assertions' => [
'$str' => 'string',
],
],
'ignoreInvalidArrayOffset' => [
'<?php
$a = [
"b" => [],
];
$a["b"]["c"] = 0;
foreach ([1, 2, 3] as $i) {
/**
* @psalm-suppress InvalidArrayOffset
* @psalm-suppress MixedOperand
* @psalm-suppress PossiblyUndefinedArrayOffset
* @psalm-suppress MixedAssignment
*/
$a["b"]["d"] += $a["b"][$i];
}',
'assertions' => [],
],
'keyedIntOffsetArrayValues' => [
'<?php
$a = ["hello", 5];
/** @psalm-suppress RedundantCast */
$a_values = array_values($a);
$a_keys = array_keys($a);',
'assertions' => [
'$a' => 'array{string, int}',
'$a_values' => 'non-empty-list<int|string>',
'$a_keys' => 'non-empty-list<int>',
],
],
'changeIntOffsetKeyValuesWithDirectAssignment' => [
'<?php
$b = ["hello", 5];
$b[0] = 3;',
'assertions' => [
'$b' => 'array{int, int}',
],
],
'changeIntOffsetKeyValuesAfterCopy' => [
'<?php
$b = ["hello", 5];
$c = $b;
$c[0] = 3;',
'assertions' => [
'$b' => 'array{string, int}',
'$c' => 'array{int, int}',
],
],
'mergeIntOffsetValues' => [
'<?php
$d = array_merge(["hello", 5], []);
$e = array_merge(["hello", 5], ["hello again"]);',
'assertions' => [
'$d' => 'array{0: string, 1: int}',
'$e' => 'array{0: string, 1: int, 2: string}',
],
],
'addIntOffsetToEmptyArray' => [
'<?php
$f = [];
$f[0] = "hello";',
'assertions' => [
'$f' => 'array{0: string}',
],
],
'dontIncrementIntOffsetForKeyedItems' => [
'<?php
$a = [1, "a" => 2, 3];',
'assertions' => [
'$a' => 'array{0: int, 1: int, a: int}',
],
],
'assignArrayOrSetNull' => [
'<?php
$a = [];
if (rand(0, 1)) {
$a[] = 4;
}
if (!$a) {
$a = null;
}',
'assertions' => [
'$a' => 'non-empty-list<int>|null',
],
],
'assignArrayOrSetNullInElseIf' => [
'<?php
$a = [];
if (rand(0, 1)) {
$a[] = 4;
}
if ($a) {
} elseif (rand(0, 1)) {
$a = null;
}',
'assertions' => [
'$a' => 'list<int>|null',
],
],
'assignArrayOrSetNullInElse' => [
'<?php
$a = [];
if (rand(0, 1)) {
$a[] = 4;
}
if ($a) {
} else {
$a = null;
}',
'assertions' => [
'$a' => 'non-empty-list<int>|null',
],
],
'mixedMethodCallArrayAccess' => [
'<?php
function foo(object $obj) : array {
$ret = [];
$ret["a"][$obj->foo()] = 1;
return $ret["a"];
}',
'assertions' => [],
'error_levels' => ['MixedMethodCall', 'MixedArrayOffset'],
],
'mixedAccessNestedKeys' => [
'<?php
function takesString(string $s) : string { return "hello"; }
function updateArray(array $arr) : array {
foreach ($arr as $i => $item) {
$arr[$i]["a"]["b"] = 5;
$arr[$i]["a"]["c"] = takesString($arr[$i]["a"]["c"]);
}
return $arr;
}',
'assertions' => [],
'error_levels' => [
'MixedArrayAccess', 'MixedAssignment', 'MixedArrayOffset', 'MixedArrayAssignment', 'MixedArgument',
],
],
'possiblyUndefinedArrayAccessWithIsset' => [
'<?php
if (rand(0,1)) {
$a = ["a" => 1];
} else {
$a = [2, 3];
}
if (isset($a[0])) {
echo $a[0];
}',
],
'accessArrayAfterSuppressingBugs' => [
'<?php
$a = [];
foreach (["one", "two", "three"] as $key) {
$a[$key] += rand(0, 10);
}
$a["four"] = true;
if ($a["one"]) {}',
],
'noDuplicateImplicitIntArrayKey' => [
'<?php
$arr = [1 => 0, 1, 2, 3];
$arr = [1 => "one", 2 => "two", "three"];',
],
'noDuplicateImplicitIntArrayKeyLargeOffset' => [
'<?php
$arr = [
48 => "A",
95 => "a", "b",
];',
],
'constArrayAssignment' => [
'<?php
const BAR = 2;
$arr = [1 => 2];
$arr[BAR] = [6];
$bar = $arr[BAR][0];',
],
'castToArray' => [
'<?php
$a = (array) (rand(0, 1) ? [1 => "one"] : 0);
$b = (array) null;',
'assertions' => [
'$a' => 'array{0?: int, 1?: string}',
'$b' => 'array<empty, empty>',
],
],
'getOnCoercedArray' => [
'<?php
function getArray() : array {
return rand(0, 1) ? ["attr" => []] : [];
}
$out = getArray();
$out["attr"] = (array) ($out["attr"] ?? []);
$out["attr"]["bar"] = 1;',
'assertions' => [
'$out[\'attr\'][\'bar\']' => 'int',
],
],
'arrayAssignmentOnMixedArray' => [
'<?php
function foo(array $arr) : void {
$arr["a"] = 1;
foreach ($arr["b"] as $b) {}
}',
'assertions' => [],
'error_levels' => ['MixedAssignment'],
],
'implementsArrayAccessAllowNullOffset' => [
'<?php
/**
* @template-implements ArrayAccess<?int, string>
*/
class C implements ArrayAccess {
public function offsetExists(int $offset) : bool { return true; }
public function offsetGet($offset) : string { return "";}
public function offsetSet(?int $offset, string $value) : void {}
public function offsetUnset(int $offset) : void { }
}
$c = new C();
$c[] = "hello";',
],
'checkEmptinessAfterConditionalArrayAdjustment' => [
'<?php
class A {
public array $arr = [];
public function foo() : void {
if (rand(0, 1)) {
$this->arr["a"] = "hello";
}
if (!$this->arr) {}
}
}'
],
'arrayAssignmentAddsTypePossibilities' => [
'<?php
function bar(array $value): void {
$value["b"] = "hello";
$value = $value + ["a" => 0];
if (is_int($value["a"])) {}
}'
],
'coercePossiblyNullKeyToZero' => [
'<?php
function int_or_null(): ?int {
return rand(0, 1) !== 0 ? 42 : null;
}
/**
* @return array<array-key, null>
*/
function foo(): array {
$array = [];
/** @psalm-suppress PossiblyNullArrayOffset */
$array[int_or_null()] = null;
return $array;
}'
],
'coerceNullKeyToZero' => [
'<?php
/**
* @return array<int, null>
*/
function foo(): array {
$array = [];
/** @psalm-suppress NullArrayOffset */
$array[null] = null;
return $array;
}'
],
'listUsedAsArray' => [
'<?php
function takesArray(array $arr) : void {}
$a = [];
$a[] = 1;
$a[] = 2;
takesArray($a);',
'assertions' => [
'$a' => 'non-empty-list<int>'
],
],
'listTakesEmptyArray' => [
'<?php
/** @param list<int> $arr */
function takesList(array $arr) : void {}
$a = [];
takesList($a);',
'assertions' => [
'$a' => 'array<empty, empty>'
],
],
'listCreatedInSingleStatementUsedAsArray' => [
'<?php
function takesArray(array $arr) : void {}
/** @param list<int> $arr */
function takesList(array $arr) : void {}
$a = [1, 2];
takesArray($a);
takesList($a);
$a[] = 3;
takesArray($a);
takesList($a);
$b = $a;
$b[] = rand(0, 10);',
'assertions' => [
'$a' => 'array{int, int, int}',
'$b' => 'array{int, int, int, int<0, 10>}',
],
],
'listMergedWithTKeyedArrayList' => [
'<?php
/** @param list<int> $arr */
function takesAnotherList(array $arr) : void {}
/** @param list<int> $arr */
function takesList(array $arr) : void {
if (rand(0, 1)) {
$arr = [1, 2, 3];
}
takesAnotherList($arr);
}',
],
'listMergedWithTKeyedArrayListAfterAssertion' => [
'<?php
/** @param list<int> $arr */
function takesAnotherList(array $arr) : void {}
/** @param list<int> $arr */
function takesList(array $arr) : void {
if ($arr) {
$arr = [4, 5, 6];
}
takesAnotherList($arr);
}',
],
'nonEmptyAssertionOnListElement' => [
'<?php
/** @param list<array<string, string>> $arr */
function takesList(array $arr) : void {
if (!empty($arr[0])) {
foreach ($arr[0] as $k => $v) {}
}
}',
],
'nonEmptyAssignmentToListElement' => [
'<?php
/**
* @param non-empty-list<string> $arr
* @return non-empty-list<string>
*/
function takesList(array $arr) : array {
$arr[0] = "food";
return $arr;
}',
],
'unpackedArgIsList' => [
'<?php
final class Values
{
/**
* @psalm-var list<int>
*/
private $ints = [];
/** @no-named-arguments */
public function set(int ...$ints): void {
$this->ints = $ints;
}
}'
],
'assignStringFirstChar' => [
'<?php
/** @param non-empty-list<string> $arr */
function foo(array $arr) : string {
$arr[0][0] = "a";
return $arr[0];
}'
],
'arraySpread' => [
'<?php
$arrayA = [1, 2, 3];
$arrayB = [4, 5];
$result = [0, ...$arrayA, ...$arrayB, 6 ,7];
$arr1 = [3 => 1, 1 => 2, 3];
$arr2 = [...$arr1];
$arr3 = [1 => 0, ...$arr1];',
[
'$result' => 'array{int, int, int, int, int, int, int, int}',
'$arr2' => 'array{int, int, int}',
'$arr3' => 'array{1: int, 2: int, 3: int, 4: int}',
]
],
'arraySpreadWithString' => [
'<?php
$x = [
"a" => 0,
...["a" => 1],
...["b" => 2]
];',
[
'$x===' => 'array{a: 1, b: 2}',
],
[],
'8.1'
],
'listPropertyAssignmentAfterIsset' => [
'<?php
class Collection {
/** @var list<string> */
private $list = [];
public function override(int $offset): void {
if (isset($this->list[$offset])) {
$this->list[$offset] = "a";
}
}
}',
],
'propertyAssignmentToTKeyedArrayIntKeys' => [
'<?php
class Bar {
/** @var array{0: string, 1:string} */
private array $baz = ["a", "b"];
public function append(string $str) : void {
$this->baz[rand(0, 1) ? 0 : 1] = $str;
}
}'
],
'propertyAssignmentToTKeyedArrayStringKeys' => [
'<?php
class Bar {
/** @var array{a: string, b:string} */
private array $baz = ["a" => "c", "b" => "d"];
public function append(string $str) : void {
$this->baz[rand(0, 1) ? "a" : "b"] = $str;
}
}',
],
'arrayMixedMixedNotAllowedFromObject' => [
'<?php
function foo(ArrayObject $a) : array {
$arr = [];
/**
* @psalm-suppress MixedAssignment
* @psalm-suppress MixedArrayOffset
*/
foreach ($a as $k => $v) {
$arr[$k] = $v;
}
return $arr;
}',
],
'arrayMixedMixedNotAllowedFromMixed' => [
'<?php
/** @psalm-suppress MissingParamType */
function foo($a) : array {
$arr = ["a" => "foo"];
/**
* @psalm-suppress MixedAssignment
* @psalm-suppress MixedArrayOffset
*/
foreach ($a as $k => $v) {
$arr[$k] = $v;
}
return $arr;
}',
],
'assignNestedKey' => [
'<?php
/**
* @psalm-suppress MixedAssignment
* @psalm-suppress MixedArrayOffset
*
* @psalm-return array<true>
*/
function getAutoComplete(array $data): array {
$response = ["s" => []];
foreach ($data as $suggestion) {
$response["s"][$suggestion] = true;
}
return $response["s"];
}'
],
'assignArrayUnion' => [
'<?php
/**
* @psalm-suppress MixedArrayOffset
*/
function foo(array $out) : array {
$key = 1;
if (rand(0, 1)) {
/** @var mixed */
$key = null;
}
$out[$key] = 5;
return $out;
}'
],
'mergeWithNestedMixed' => [
'<?php
function getArray() : array {
return [];
}
$arr = getArray();
if (rand(0, 1)) {
/** @psalm-suppress MixedArrayAssignment */
$arr["hello"]["goodbye"] = 5;
}',
[
'$arr' => 'array<array-key, mixed>',
]
],
'dontUpdateMixedArrayWithStringKey' => [
'<?php
class A {}
/**
* @psalm-suppress MixedArgument
*/
function run1(array $arguments): void {
if (rand(0, 1)) {
$arguments["c"] = new A();
}
if ($arguments["b"]) {
echo $arguments["b"];
}
}',
],
'manipulateArrayTwice' => [
'<?php
/** @var array */
$options = [];
$options[\'a\'] = 1;
/** @psalm-suppress MixedArrayAssignment */
$options[\'b\'][\'c\'] = 2;',
[
'$options[\'b\']' => 'mixed'
]
],
'assignWithLiteralStringKey' => [
'<?php
/**
* @param array<int, array{internal: bool, ported: bool}> $i
* @return array<int, array{internal: bool, ported: bool}>
*/
function addOneEntry(array $i, int $id): array {
$i[$id][rand(0, 1) ? "internal" : "ported"] = true;
return $i;
}'
],
'binaryOperation' => [
'<?php
$a = array_map(
function (string $x) {
return new RuntimeException($x);
},
["c" => ""]
);
$a += ["e" => new RuntimeException()];',
[
'$a' => 'array{c: RuntimeException, e: RuntimeException}',
]
],
'mergeArrayKeysProperly' => [
'<?php
interface EntityInterface {}
class SomeEntity implements EntityInterface {}
/**
* @param array<class-string<EntityInterface>, bool> $arr
* @return array<class-string<EntityInterface>, bool>
*/
function createForEntity(array $arr)
{
$arr[SomeEntity::class] = true;
return $arr;
}'
],
'lowercaseStringMergeWithLiteral' => [
'<?php
/**
* @param array<lowercase-string, bool> $foo
* @return array<lowercase-string, bool>
*/
function foo(array $foo) : array {
$foo["hello"] = true;
return $foo;
}'
],
'updateListValueAndMaintainListnessAfterGreaterThanOrEqual' => [
'<?php
/**
* @param list<int> $l
* @return list<int>
*/
function takesList(array $l) {
if (count($l) < 2) {
throw new \Exception("bad");
}
$l[1] = $l[1] + 1;
return $l;
}'
],
'updateListValueAndMaintainListnessAfterNotIdentical' => [
'<?php
/**
* @param list<int> $l
* @return list<int>
*/
function takesList(array $l) {
if (count($l) !== 2) {
throw new \Exception("bad");
}
$l[1] = $l[1] + 1;
return $l;
}'
],
'unpackTypedIterableIntoArray' => [
'<?php
/**
* @param iterable<int, string> $data
* @return list<string>
*/
function unpackIterable(iterable $data): array
{
return [...$data];
}'
],
'unpackTypedTraversableIntoArray' => [
'<?php
/**
* @param Traversable<int, string> $data
* @return list<string>
*/
function unpackIterable(Traversable $data): array
{
return [...$data];
}'
],
'unpackCanBeEmpty' => [
'<?php
$x = [];
$y = [];
$x = [...$x, ...$y];
$x ? 1 : 0;
',
],
'unpackEmptyKeepsCorrectKeys' => [
'<?php
$a = [];
$b = [1];
$c = [];
$d = [2];
$e = [...$a, ...$b, ...$c, ...$d, 3];
',
'assertions' => ['$e' => 'array{int, int, int}']
],
'unpackNonObjectlikePreventsObjectlikeArray' => [
'<?php
/** @return list<mixed> */
function test(): array {
return [];
}
$x = [...test(), "a" => "b"];
',
'assertions' => ['$x' => 'non-empty-array<int|string, mixed|string>']
],
'ArrayOffsetNumericSupPHPINTMAX' => [
'<?php
$_a = [
"9223372036854775808" => 1,
"9223372036854775809" => 2
];
',
],
'assignToListWithForeachKey' => [
'<?php
/**
* @param list<string> $list
* @return list<string>
*/
function getList(array $list): array {
foreach ($list as $key => $value) {
$list[$key] = $value . "!";
}
return $list;
}'
],
'ArrayCreateTemplateArrayKey' => [
'<?php
/**
* @template K of array-key
* @param K $key
*/
function with($key): void
{
[$key => 123];
}',
],
'assignStringIndexed' => [
'<?php
/**
* @param array<string, mixed> $array
* @return non-empty-array<string, mixed>
*/
function getArray(array $array): array {
if (rand(0, 1)) {
$array["a"] = 2;
} else {
$array["b"] = 1;
}
return $array;
}'
],
'castPossiblyArray' => [
'<?php
/**
* @psalm-param string|list<string> $a
* @return list<string>
*/
function addHeaders($a): array {
return (array)$a;
}',
],
'ClassConstantAsKey' => [
'<?php
/**
* @property Foo::C_* $aprop
*/
class Foo {
public const C_ONE = 1;
public const C_TWO = 2;
public function __get(string $prop) {
if ($prop === "aprop")
return self::C_ONE;
throw new \RuntimeException("Unsupported property: $prop");
}
/** @return array<Foo::C_*, string> */
public static function getNames(): array {
return [
self::C_ONE => "One",
self::C_TWO => "Two",
];
}
public function getThisName(): string {
$names = self::getNames();
$aprop = $this->aprop;
return $names[$aprop];
}
}',
],
'AddTwoSealedArrays' => [
'<?php
final class Token
{
public const ONE = [
16 => 16,
];
public const TWO = [
17 => 17,
];
public const THREE = [
18 => 18,
];
}
$_a = Token::ONE + Token::TWO + Token::THREE;
',
'assertions' => ['$_a===' => 'array{16: 16, 17: 17, 18: 18}']
],
'unpackTypedIterableWithStringKeysIntoArray' => [
'<?php
/**
* @param iterable<string, string> $data
* @return list<string>
*/
function unpackIterable(iterable $data): array
{
return [...$data];
}',
[],
[],
'8.1'
],
'unpackTypedTraversableWithStringKeysIntoArray' => [
'<?php
/**
* @param Traversable<string, string> $data
* @return list<string>
*/
function unpackIterable(Traversable $data): array
{
return [...$data];
}',
[],
[],
'8.1'
],
'unpackArrayWithArrayKeyIntoArray' => [
'<?php
/**
* @param array<array-key, mixed> $data
* @return list<mixed>
*/
function unpackArray(array $data): array
{
return [...$data];
}',
[],
[],
'8.1'
],
'unpackArrayWithTwoTypesNotObjectLike' => [
'<?php
function int(): int
{
return 0;
}
/**
* @return list<positive-int>
*/
function posiviteIntegers(): array
{
return [1];
}
$_a = [...posiviteIntegers(), int()];',
'assertions' => [
'$_a' => 'non-empty-list<int>',
],
[],
'8.1'
],
];
}
/**
* @return iterable<string,array{string,error_message:string,1?:string[],2?:bool,3?:string}>
*/
public function providerInvalidCodeParse(): iterable
{
return [
'objectAssignment' => [
'<?php
class A {}
(new A)["b"] = 1;',
'error_message' => 'UndefinedMethod',
],
'invalidArrayAccess' => [
'<?php
$a = 5;
$a[0] = 5;',
'error_message' => 'InvalidArrayAssignment',
],
'possiblyUndefinedArrayAccess' => [
'<?php
if (rand(0,1)) {
$a = ["a" => 1];
} else {
$a = [2, 3];
}
echo $a[0];',
'error_message' => 'PossiblyUndefinedArrayOffset',
],
'mixedStringOffsetAssignment' => [
'<?php
/** @var mixed */
$a = 5;
"hello"[0] = $a;',
'error_message' => 'MixedStringOffsetAssignment',
'error_level' => ['MixedAssignment'],
],
'mixedArrayArgument' => [
'<?php
/** @param array<mixed, int|string> $foo */
function fooFoo(array $foo): void { }
function barBar(array $bar): void {
fooFoo($bar);
}
barBar([1, "2"]);',
'error_message' => 'MixedArgumentTypeCoercion',
'error_level' => ['MixedAssignment'],
],
'arrayPropertyAssignment' => [
'<?php
class A {
/** @var string[] */
public $strs = ["a", "b", "c"];
/** @return void */
public function bar() {
$this->strs = [new stdClass()]; // no issue emitted
}
}',
'error_message' => 'InvalidPropertyAssignmentValue',
],
'incrementalArrayPropertyAssignment' => [
'<?php
class A {
/** @var string[] */
public $strs = ["a", "b", "c"];
/** @return void */
public function bar() {
$this->strs[] = new stdClass(); // no issue emitted
}
}',
'error_message' => 'InvalidPropertyAssignmentValue',
],
'duplicateStringArrayKey' => [
'<?php
$arr = [
"a" => 1,
"b" => 2,
"c" => 3,
"c" => 4,
];',
'error_message' => 'DuplicateArrayKey',
],
'duplicateIntArrayKey' => [
'<?php
$arr = [
0 => 1,
1 => 2,
2 => 3,
2 => 4,
];',
'error_message' => 'DuplicateArrayKey',
],
'duplicateImplicitIntArrayKey' => [
'<?php
$arr = [
1,
2,
3,
2 => 4,
];',
'error_message' => 'DuplicateArrayKey',
],
'mixedArrayAssignmentOnVariable' => [
'<?php
function foo(array $arr) : void {
$arr["foo"][0] = "5";
}',
'error_message' => 'MixedArrayAssignment',
],
'implementsArrayAccessPreventNullOffset' => [
'<?php
/**
* @template-implements ArrayAccess<int, string>
*/
class C implements ArrayAccess {
public function offsetExists(int $offset) : bool { return true; }
public function offsetGet($offset) : string { return "";}
public function offsetSet(int $offset, string $value) : void {}
public function offsetUnset(int $offset) : void { }
}
$c = new C();
$c[] = "hello";',
'error_message' => 'NullArgument',
],
'storageKeyMustBeObject' => [
'<?php
$key = [1,2,3];
$storage = new \SplObjectStorage();
$storage[$key] = "test";',
'error_message' => 'InvalidArgument',
],
'listUsedAsArrayWrongType' => [
'<?php
/** @param string[] $arr */
function takesArray(array $arr) : void {}
$a = [];
$a[] = 1;
$a[] = 2;
takesArray($a);',
'error_message' => 'InvalidScalarArgument',
],
'listUsedAsArrayWrongListType' => [
'<?php
/** @param list<string> $arr */
function takesArray(array $arr) : void {}
$a = [];
$a[] = 1;
$a[] = 2;
takesArray($a);',
'error_message' => 'InvalidScalarArgument',
],
'nonEmptyAssignmentToListElementChangeType' => [
'<?php
/**
* @param non-empty-list<string> $arr
* @return non-empty-list<string>
*/
function takesList(array $arr) : array {
$arr[0] = 5;
return $arr;
}',
'error_message' => 'InvalidReturnStatement',
],
'preventArrayAssignmentOnReturnValue' => [
'<?php
class A {
public function foo() : array {
return [1, 2, 3];
}
}
(new A)->foo()[3] = 5;',
'error_message' => 'InvalidArrayAssignment',
],
'mergeIntWithMixed' => [
'<?php
function getCachedMixed(array $cache, string $locale) : string {
if (!isset($cache[$locale])) {
$cache[$locale] = 5;
}
/**
* @psalm-suppress MixedReturnStatement
*/
return $cache[$locale];
}',
'error_message' => 'InvalidReturnStatement',
],
'mergeIntWithNestedMixed' => [
'<?php
function getCachedMixed(array $cache, string $locale) : string {
if (!isset($cache[$locale][$locale])) {
/**
* @psalm-suppress MixedArrayAssignment
*/
$cache[$locale][$locale] = 5;
}
/**
* @psalm-suppress MixedArrayAccess
* @psalm-suppress MixedReturnStatement
*/
return $cache[$locale][$locale];
}',
'error_message' => 'InvalidReturnStatement',
],
'mergeWithDeeplyNestedArray' => [
'<?php
/**
* @psalm-suppress MixedInferredReturnType
*/
function getTwoPartsLocale(array $cache, string $a, string $b) : string
{
if (!isset($cache[$b])) {
$cache[$b] = array();
}
if (!isset($cache[$b][$a])) {
if (rand(0, 1)) {
/** @psalm-suppress MixedArrayAssignment */
$cache[$b][$a] = "hello";
} else {
/** @psalm-suppress MixedArrayAssignment */
$cache[$b][$a] = rand(0, 1) ? "string" : null;
}
}
/**
* @psalm-suppress MixedArrayAccess
* @psalm-suppress MixedReturnStatement
*/
return $cache[$b][$a];
}',
'error_message' => 'NullableReturnStatement',
],
'ArrayCreateOffsetObject' => [
'<?php
$_a = [new stdClass => "a"];
',
'error_message' => 'InvalidArrayOffset'
],
'ArrayDimOffsetObject' => [
'<?php
$_a = [];
$_a[new stdClass] = "a";
',
'error_message' => 'InvalidArrayOffset'
],
'ArrayCreateOffsetResource' => [
'<?php
$_a = [fopen("", "") => "a"];
',
'error_message' => 'InvalidArrayOffset'
],
'ArrayDimOffsetResource' => [
'<?php
$_a = [];
$_a[fopen("", "")] = "a";
',
'error_message' => 'InvalidArrayOffset'
],
'ArrayCreateOffsetBool' => [
'<?php
$_a = [true => "a"];
',
'error_message' => 'InvalidArrayOffset'
],
'ArrayDimOffsetBool' => [
'<?php
$_a = [];
$_a[true] = "a";
',
'error_message' => 'InvalidArrayOffset'
],
'ArrayCreateOffsetStringable' => [
'<?php
$a = new class{public function __toString(){return "";}};
$_a = [$a => "a"];',
'error_message' => 'InvalidArrayOffset',
],
'ArrayDimOffsetStringable' => [
'<?php
$_a = [];
$a = new class{public function __toString(){return "";}};
$_a[$a] = "a";',
'error_message' => 'InvalidArrayOffset',
],
'coerceListToArray' => [
'<?php
/**
* @param list<int> $_bar
*/
function foo(array $_bar) : void {}
/**
* @param list<int> $bar
*/
function baz(array $bar) : void {
foo((array) $bar);
}',
'error_message' => 'RedundantCast',
],
'arrayValuesOnList' => [
'<?php
/**
* @param list<int> $a
* @return list<int>
*/
function foo(array $a) : array {
return array_values($a);
}',
'error_message' => 'RedundantCast',
],
'assignToListWithUpdatedForeachKey' => [
'<?php
/**
* @param list<string> $list
* @return list<string>
*/
function getList(array $list): array {
foreach ($list as $key => $value) {
$list[$key + 1] = $value . "!";
}
return $list;
}',
'error_message' => 'LessSpecificReturnStatement',
],
'assignToListWithAlteredForeachKeyVar' => [
'<?php
/**
* @param list<string> $list
* @return list<string>
*/
function getList(array $list): array {
foreach ($list as $key => $value) {
if (rand(0, 1)) {
array_pop($list);
}
$list[$key] = $value . "!";
}
return $list;
}',
'error_message' => 'LessSpecificReturnStatement',
],
'createArrayWithMixedOffset' => [
'<?php
/**
* @param mixed $index
*/
function test($index): array {
$arr = [$index => 5];
return $arr;
}',
'error_message' => 'MixedArrayOffset'
],
'falseArrayAssignment' => [
'<?php
function foo(): array {
$array = [];
$array[false] = "";
echo $array[0];
return $array;
}',
'error_message' => 'InvalidArrayOffset'
],
'TemplateAsKey' => [
'<?php
class Foo {
/**
* @psalm-template T of array
* @param T $offset
* @param array<array, string> $weird_array
*/
public function getThisName($offset, $weird_array): string {
return $weird_array[$offset];
}
}',
'error_message' => 'InvalidArrayOffset'
],
];
}
}
| 1 | 12,276 | This now causes `LessSpecificReturnStatement - src/somefile.php:9:32 - The type 'non-empty-list<5|string>' is more general than the declared return type 'non-empty-list<string>' for takesList`, which seems correct to me. The type `non-empty-list<5|string>` contains the type `non-empty-list<string>`. Thoughts? | vimeo-psalm | php |
@@ -123,7 +123,7 @@ public class RestrictedDefaultPrivacyController implements PrivacyController {
final String privacyUserId,
final Optional<PrivacyGroup> maybePrivacyGroup) {
try {
- LOG.trace("Storing private transaction in enclave");
+ LOG.info("Storing private transaction in enclave");
final SendResponse sendResponse =
sendRequest(privateTransaction, privacyUserId, maybePrivacyGroup);
return sendResponse.getKey(); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.privacy;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.hyperledger.besu.ethereum.core.PrivacyParameters.ONCHAIN_PRIVACY_PROXY;
import static org.hyperledger.besu.ethereum.privacy.group.OnchainGroupManagement.ADD_PARTICIPANTS_METHOD_SIGNATURE;
import static org.hyperledger.besu.ethereum.privacy.group.OnchainGroupManagement.GET_PARTICIPANTS_METHOD_SIGNATURE;
import static org.hyperledger.besu.ethereum.privacy.group.OnchainGroupManagement.GET_VERSION_METHOD_SIGNATURE;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.enclave.Enclave;
import org.hyperledger.besu.enclave.EnclaveClientException;
import org.hyperledger.besu.enclave.types.PrivacyGroup;
import org.hyperledger.besu.enclave.types.ReceiveResponse;
import org.hyperledger.besu.enclave.types.SendResponse;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.privacy.storage.PrivacyGroupHeadBlockMap;
import org.hyperledger.besu.ethereum.privacy.storage.PrivateStateStorage;
import org.hyperledger.besu.ethereum.privacy.storage.PrivateTransactionMetadata;
import org.hyperledger.besu.ethereum.processing.TransactionProcessingResult;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.rlp.RLPInput;
import org.hyperledger.besu.ethereum.transaction.CallParameter;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.units.bigints.UInt256;
public class RestrictedDefaultPrivacyController implements PrivacyController {
private static final Logger LOG = LogManager.getLogger();
private final Blockchain blockchain;
private final PrivateStateStorage privateStateStorage;
private final Enclave enclave;
private final PrivateTransactionValidator privateTransactionValidator;
private final PrivateTransactionSimulator privateTransactionSimulator;
private final PrivateNonceProvider privateNonceProvider;
private final PrivateWorldStateReader privateWorldStateReader;
private final PrivateTransactionLocator privateTransactionLocator;
private final PrivateStateRootResolver privateStateRootResolver;
public RestrictedDefaultPrivacyController(
final Blockchain blockchain,
final PrivacyParameters privacyParameters,
final Optional<BigInteger> chainId,
final PrivateTransactionSimulator privateTransactionSimulator,
final PrivateNonceProvider privateNonceProvider,
final PrivateWorldStateReader privateWorldStateReader) {
this(
blockchain,
privacyParameters.getPrivateStateStorage(),
privacyParameters.getEnclave(),
new PrivateTransactionValidator(chainId),
privateTransactionSimulator,
privateNonceProvider,
privateWorldStateReader,
privacyParameters.getPrivateStateRootResolver());
}
public RestrictedDefaultPrivacyController(
final Blockchain blockchain,
final PrivateStateStorage privateStateStorage,
final Enclave enclave,
final PrivateTransactionValidator privateTransactionValidator,
final PrivateTransactionSimulator privateTransactionSimulator,
final PrivateNonceProvider privateNonceProvider,
final PrivateWorldStateReader privateWorldStateReader,
final PrivateStateRootResolver privateStateRootResolver) {
this.blockchain = blockchain;
this.privateStateStorage = privateStateStorage;
this.enclave = enclave;
this.privateTransactionValidator = privateTransactionValidator;
this.privateTransactionSimulator = privateTransactionSimulator;
this.privateNonceProvider = privateNonceProvider;
this.privateWorldStateReader = privateWorldStateReader;
this.privateTransactionLocator =
new PrivateTransactionLocator(blockchain, enclave, privateStateStorage);
this.privateStateRootResolver = privateStateRootResolver;
}
@Override
public Optional<ExecutedPrivateTransaction> findPrivateTransactionByPmtHash(
final Hash pmtHash, final String enclaveKey) {
return privateTransactionLocator.findByPmtHash(pmtHash, enclaveKey);
}
@Override
public String createPrivateMarkerTransactionPayload(
final PrivateTransaction privateTransaction,
final String privacyUserId,
final Optional<PrivacyGroup> maybePrivacyGroup) {
try {
LOG.trace("Storing private transaction in enclave");
final SendResponse sendResponse =
sendRequest(privateTransaction, privacyUserId, maybePrivacyGroup);
return sendResponse.getKey();
} catch (final Exception e) {
LOG.error("Failed to store private transaction in enclave", e);
throw e;
}
}
@Override
public ReceiveResponse retrieveTransaction(final String enclaveKey, final String privacyUserId) {
return enclave.receive(enclaveKey, privacyUserId);
}
@Override
public PrivacyGroup createPrivacyGroup(
final List<String> addresses,
final String name,
final String description,
final String privacyUserId) {
return enclave.createPrivacyGroup(addresses, privacyUserId, name, description);
}
@Override
public String deletePrivacyGroup(final String privacyGroupId, final String privacyUserId) {
return enclave.deletePrivacyGroup(privacyGroupId, privacyUserId);
}
@Override
public PrivacyGroup[] findOffchainPrivacyGroupByMembers(
final List<String> addresses, final String privacyUserId) {
return enclave.findPrivacyGroup(addresses);
}
@Override
public ValidationResult<TransactionInvalidReason> validatePrivateTransaction(
final PrivateTransaction privateTransaction, final String privacyUserId) {
final String privacyGroupId = privateTransaction.determinePrivacyGroupId().toBase64String();
return privateTransactionValidator.validate(
privateTransaction,
determineBesuNonce(privateTransaction.getSender(), privacyGroupId, privacyUserId),
true);
}
@Override
public long determineEeaNonce(
final String privateFrom,
final String[] privateFor,
final Address address,
final String privacyUserId) {
final List<String> groupMembers = Lists.asList(privateFrom, privateFor);
final List<PrivacyGroup> matchingGroups =
Lists.newArrayList(enclave.findPrivacyGroup(groupMembers));
final List<PrivacyGroup> legacyGroups =
matchingGroups.stream()
.filter(group -> group.getType() == PrivacyGroup.Type.LEGACY)
.collect(Collectors.toList());
if (legacyGroups.size() == 0) {
// the legacy group does not exist yet
return 0;
}
Preconditions.checkArgument(
legacyGroups.size() == 1,
String.format(
"Found invalid number of privacy groups (%d), expected 1.", legacyGroups.size()));
final String privacyGroupId = legacyGroups.get(0).getPrivacyGroupId();
return determineBesuNonce(address, privacyGroupId, privacyUserId);
}
@Override
public long determineBesuNonce(
final Address sender, final String privacyGroupId, final String privacyUserId) {
return privateNonceProvider.getNonce(
sender, Bytes32.wrap(Bytes.fromBase64String(privacyGroupId)));
}
@Override
public Optional<TransactionProcessingResult> simulatePrivateTransaction(
final String privacyGroupId,
final String privacyUserId,
final CallParameter callParams,
final long blockNumber) {
final Optional<TransactionProcessingResult> result =
privateTransactionSimulator.process(privacyGroupId, callParams, blockNumber);
return result;
}
@Override
public Optional<String> buildAndSendAddPayload(
final PrivateTransaction privateTransaction,
final Bytes32 privacyGroupId,
final String privacyUserId) {
if (isGroupAdditionTransaction(privateTransaction)) {
final List<PrivateTransactionMetadata> privateTransactionMetadataList =
buildTransactionMetadataList(privacyGroupId);
if (privateTransactionMetadataList.size() > 0) {
final List<PrivateTransactionWithMetadata> privateTransactionWithMetadataList =
retrievePrivateTransactions(
privacyGroupId, privateTransactionMetadataList, privacyUserId);
final Bytes bytes = serializeAddToGroupPayload(privateTransactionWithMetadataList);
final List<String> privateFor =
getParticipantsFromParameter(privateTransaction.getPayload());
return Optional.of(
enclave.send(bytes.toBase64String(), privacyUserId, privateFor).getKey());
}
}
return Optional.empty();
}
@Override
public Optional<PrivacyGroup> findPrivacyGroupByGroupId(
final String privacyGroupId, final String privacyUserId) {
try {
return findOffchainPrivacyGroupByGroupId(privacyGroupId, privacyUserId);
} catch (final EnclaveClientException ex) {
// An exception is thrown if the offchain group cannot be found
LOG.debug("Offchain privacy group not found: {}", privacyGroupId);
}
return findOnchainPrivacyGroupByGroupId(Bytes.fromBase64String(privacyGroupId), privacyUserId);
}
@Override
public Optional<PrivacyGroup> findOffchainPrivacyGroupByGroupId(
final String privacyGroupId, final String privacyUserId) {
return Optional.ofNullable(enclave.retrievePrivacyGroup(privacyGroupId));
}
@Override
public List<PrivacyGroup> findOnchainPrivacyGroupByMembers(
final List<String> addresses, final String privacyUserId) {
final ArrayList<PrivacyGroup> privacyGroups = new ArrayList<>();
final PrivacyGroupHeadBlockMap privacyGroupHeadBlockMap =
privateStateStorage
.getPrivacyGroupHeadBlockMap(blockchain.getChainHeadHash())
.orElse(PrivacyGroupHeadBlockMap.empty());
privacyGroupHeadBlockMap
.keySet()
.forEach(
c -> {
final Optional<PrivacyGroup> maybePrivacyGroup =
findOnchainPrivacyGroupByGroupId(c, privacyUserId);
if (maybePrivacyGroup.isPresent()
&& maybePrivacyGroup.get().getMembers().containsAll(addresses)) {
privacyGroups.add(maybePrivacyGroup.get());
}
});
return privacyGroups;
}
public Optional<PrivacyGroup> findOnchainPrivacyGroupByGroupId(
final Bytes privacyGroupId, final String enclaveKey) {
// get the privateFor list from the management contract
final Optional<TransactionProcessingResult> privateTransactionSimulatorResultOptional =
privateTransactionSimulator.process(
privacyGroupId.toBase64String(), buildCallParams(GET_PARTICIPANTS_METHOD_SIGNATURE));
if (privateTransactionSimulatorResultOptional.isPresent()
&& privateTransactionSimulatorResultOptional.get().isSuccessful()) {
final RLPInput rlpInput =
RLP.input(privateTransactionSimulatorResultOptional.get().getOutput());
if (rlpInput.nextSize() > 0) {
return Optional.of(
new PrivacyGroup(
privacyGroupId.toBase64String(),
PrivacyGroup.Type.ONCHAIN,
"",
"",
decodeList(rlpInput.raw())));
} else {
return Optional.empty();
}
} else {
return Optional.empty();
}
}
@Override
public Optional<PrivacyGroup> findOnchainPrivacyGroupAndAddNewMembers(
final Bytes privacyGroupId,
final String privacyUserId,
final PrivateTransaction privateTransaction) {
// get the privateFor list from the management contract
final Optional<TransactionProcessingResult> privateTransactionSimulatorResultOptional =
privateTransactionSimulator.process(
privacyGroupId.toBase64String(), buildCallParams(GET_PARTICIPANTS_METHOD_SIGNATURE));
final List<String> members = new ArrayList<>();
if (privateTransactionSimulatorResultOptional.isPresent()
&& privateTransactionSimulatorResultOptional.get().isSuccessful()) {
final RLPInput rlpInput =
RLP.input(privateTransactionSimulatorResultOptional.get().getOutput());
if (rlpInput.nextSize() > 0) {
members.addAll(decodeList(rlpInput.raw()));
if (!members.contains(privacyUserId)) {
return Optional.empty();
}
}
}
if (isGroupAdditionTransaction(privateTransaction)) {
final List<String> participantsFromParameter =
getParticipantsFromParameter(privateTransaction.getPayload());
members.addAll(participantsFromParameter);
}
if (members.isEmpty()) {
return Optional.empty();
} else {
return Optional.of(
new PrivacyGroup(
privacyGroupId.toBase64String(), PrivacyGroup.Type.ONCHAIN, "", "", members));
}
}
private List<String> decodeList(final Bytes rlpEncodedList) {
final ArrayList<String> decodedElements = new ArrayList<>();
// first 32 bytes is dynamic list offset
final UInt256 lengthOfList = UInt256.fromBytes(rlpEncodedList.slice(32, 32)); // length of list
for (int i = 0; i < lengthOfList.toLong(); ++i) {
decodedElements.add(
Bytes.wrap(rlpEncodedList.slice(64 + (32 * i), 32)).toBase64String()); // participant
}
return decodedElements;
}
private List<String> getParticipantsFromParameter(final Bytes input) {
final List<String> participants = new ArrayList<>();
final Bytes mungedParticipants = input.slice(4 + 32 + 32);
for (int i = 0; i <= mungedParticipants.size() - 32; i += 32) {
participants.add(mungedParticipants.slice(i, 32).toBase64String());
}
return participants;
}
private CallParameter buildCallParams(final Bytes methodCall) {
return new CallParameter(
Address.ZERO, ONCHAIN_PRIVACY_PROXY, 3000000, Wei.of(1000), Wei.ZERO, methodCall);
}
private List<PrivateTransactionMetadata> buildTransactionMetadataList(
final Bytes privacyGroupId) {
final List<PrivateTransactionMetadata> pmtHashes = new ArrayList<>();
PrivacyGroupHeadBlockMap privacyGroupHeadBlockMap =
privateStateStorage
.getPrivacyGroupHeadBlockMap(blockchain.getChainHeadHash())
.orElse(PrivacyGroupHeadBlockMap.empty());
if (privacyGroupHeadBlockMap.get(privacyGroupId) != null) {
Hash blockHash = privacyGroupHeadBlockMap.get(privacyGroupId);
while (blockHash != null) {
pmtHashes.addAll(
0,
privateStateStorage
.getPrivateBlockMetadata(blockHash, Bytes32.wrap(privacyGroupId))
.get()
.getPrivateTransactionMetadataList());
blockHash = blockchain.getBlockHeader(blockHash).get().getParentHash();
privacyGroupHeadBlockMap =
privateStateStorage
.getPrivacyGroupHeadBlockMap(blockHash)
.orElse(PrivacyGroupHeadBlockMap.empty());
if (privacyGroupHeadBlockMap.get(privacyGroupId) != null) {
blockHash = privacyGroupHeadBlockMap.get(privacyGroupId);
} else {
break;
}
}
}
return pmtHashes;
}
private List<PrivateTransactionWithMetadata> retrievePrivateTransactions(
final Bytes32 privacyGroupId,
final List<PrivateTransactionMetadata> privateTransactionMetadataList,
final String privacyUserId) {
final ArrayList<PrivateTransactionWithMetadata> privateTransactions = new ArrayList<>();
privateStateStorage
.getAddDataKey(privacyGroupId)
.ifPresent(key -> privateTransactions.addAll(retrieveAddBlob(key.toBase64String())));
for (int i = privateTransactions.size(); i < privateTransactionMetadataList.size(); i++) {
final PrivateTransactionMetadata privateTransactionMetadata =
privateTransactionMetadataList.get(i);
final Transaction privateMarkerTransaction =
blockchain
.getTransactionByHash(privateTransactionMetadata.getPrivateMarkerTransactionHash())
.orElseThrow();
final ReceiveResponse receiveResponse =
retrieveTransaction(
privateMarkerTransaction.getPayload().slice(0, 32).toBase64String(), privacyUserId);
final BytesValueRLPInput input =
new BytesValueRLPInput(
Bytes.fromBase64String(new String(receiveResponse.getPayload(), UTF_8)), false);
input.enterList();
privateTransactions.add(
new PrivateTransactionWithMetadata(
PrivateTransaction.readFrom(input), privateTransactionMetadata));
input.leaveListLenient();
}
return privateTransactions;
}
@Override
public boolean isGroupAdditionTransaction(final PrivateTransaction privateTransaction) {
return privateTransaction.getTo().isPresent()
&& privateTransaction.getTo().get().equals(ONCHAIN_PRIVACY_PROXY)
&& privateTransaction
.getPayload()
.toHexString()
.startsWith(ADD_PARTICIPANTS_METHOD_SIGNATURE.toHexString());
}
@Override
public Optional<Bytes> getContractCode(
final String privacyGroupId,
final Address contractAddress,
final Hash blockHash,
final String privacyUserId) {
return privateWorldStateReader.getContractCode(privacyGroupId, blockHash, contractAddress);
}
@Override
public List<PrivateTransactionWithMetadata> retrieveAddBlob(final String addDataKey) {
final ReceiveResponse addReceiveResponse = enclave.receive(addDataKey);
return PrivateTransactionWithMetadata.readListFromPayload(
Bytes.wrap(Base64.getDecoder().decode(addReceiveResponse.getPayload())));
}
private Bytes serializeAddToGroupPayload(
final List<PrivateTransactionWithMetadata> privateTransactionWithMetadataList) {
final BytesValueRLPOutput rlpOutput = new BytesValueRLPOutput();
rlpOutput.startList();
privateTransactionWithMetadataList.forEach(
privateTransactionWithMetadata -> privateTransactionWithMetadata.writeTo(rlpOutput));
rlpOutput.endList();
return rlpOutput.encoded();
}
private SendResponse sendRequest(
final PrivateTransaction privateTransaction,
final String privacyUserId,
final Optional<PrivacyGroup> maybePrivacyGroup) {
final BytesValueRLPOutput rlpOutput = new BytesValueRLPOutput();
if (maybePrivacyGroup.isPresent()) {
final PrivacyGroup privacyGroup = maybePrivacyGroup.get();
if (privacyGroup.getType() == PrivacyGroup.Type.ONCHAIN) {
// onchain privacy group
final Optional<TransactionProcessingResult> result =
privateTransactionSimulator.process(
privateTransaction.getPrivacyGroupId().get().toBase64String(),
buildCallParams(GET_VERSION_METHOD_SIGNATURE));
new VersionedPrivateTransaction(privateTransaction, result).writeTo(rlpOutput);
final List<String> onchainPrivateFor = privacyGroup.getMembers();
return enclave.send(
rlpOutput.encoded().toBase64String(),
privateTransaction.getPrivateFrom().toBase64String(),
onchainPrivateFor);
} else if (privacyGroup.getType() == PrivacyGroup.Type.PANTHEON) {
// offchain privacy group
privateTransaction.writeTo(rlpOutput);
return enclave.send(
rlpOutput.encoded().toBase64String(),
privacyUserId,
privateTransaction.getPrivacyGroupId().get().toBase64String());
} else {
// this should not happen
throw new RuntimeException();
}
}
// legacy transaction
final List<String> privateFor = resolveLegacyPrivateFor(privateTransaction);
if (privateFor.isEmpty()) {
privateFor.add(privateTransaction.getPrivateFrom().toBase64String());
}
privateTransaction.writeTo(rlpOutput);
final String payload = rlpOutput.encoded().toBase64String();
return enclave.send(payload, privateTransaction.getPrivateFrom().toBase64String(), privateFor);
}
private List<String> resolveLegacyPrivateFor(final PrivateTransaction privateTransaction) {
final ArrayList<String> privateFor = new ArrayList<>();
final boolean isLegacyTransaction = privateTransaction.getPrivateFor().isPresent();
if (isLegacyTransaction) {
privateFor.addAll(
privateTransaction.getPrivateFor().get().stream()
.map(Bytes::toBase64String)
.collect(Collectors.toList()));
}
return privateFor;
}
@Override
public void verifyPrivacyGroupContainsPrivacyUserId(
final String privacyGroupId, final String privacyUserId) {
// NO VALIDATION NEEDED
}
@Override
public void verifyPrivacyGroupContainsPrivacyUserId(
final String privacyGroupId, final String privacyUserId, final Optional<Long> blockNumber)
throws MultiTenancyValidationException {
// NO VALIDATION NEEDED
}
@Override
public PrivateTransactionSimulator getTransactionSimulator() {
return privateTransactionSimulator;
}
@Override
public Optional<Hash> getStateRootByBlockNumber(
final String privacyGroupId, final String privacyUserId, final long blockNumber) {
return blockchain
.getBlockByNumber(blockNumber)
.map(
block ->
privateStateRootResolver.resolveLastStateRoot(
Bytes32.wrap(Bytes.fromBase64String(privacyGroupId)), block.getHash()));
}
}
| 1 | 26,413 | Are you going to change that back? | hyperledger-besu | java |
@@ -51,6 +51,16 @@ tune ?= generic
cpu ?= $(arch)
fpu ?=
bits ?= $(shell getconf LONG_BIT)
+staticbuild ?= false
+
+# Set static building if requested
+ifeq (true,$(staticbuild))
+ use += llvm_link_static
+else
+ ifneq (false,$(staticbuild))
+ $(error staticbuild must be true or false)
+ endif
+endif
ifndef verbose
SILENT = @ | 1 | # Determine the operating system
OSTYPE ?=
ifeq ($(OS),Windows_NT)
OSTYPE = windows
else
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
OSTYPE = linux
ifndef AR
ifneq (,$(shell which gcc-ar 2> /dev/null))
AR = gcc-ar
endif
endif
ALPINE=$(wildcard /etc/alpine-release)
endif
ifeq ($(UNAME_S),Darwin)
OSTYPE = osx
endif
ifeq ($(UNAME_S),FreeBSD)
OSTYPE = bsd
CXX = c++
endif
ifeq ($(UNAME_S),DragonFly)
OSTYPE = bsd
CXX = c++
endif
ifeq ($(UNAME_S),OpenBSD)
OSTYPE = bsd
CXX = c++
LLVM_CONFIG = /usr/local/bin/llvm-config
default_pic = true
endif
endif
ifdef LTO_PLUGIN
lto := yes
endif
# Default settings (silent release build).
config ?= release
arch ?= native
tune ?= generic
cpu ?= $(arch)
fpu ?=
bits ?= $(shell getconf LONG_BIT)
ifndef verbose
SILENT = @
else
SILENT =
endif
# Default to version from `VERSION` file but allowing overridding on the
# make command line like:
# make version="nightly-19710702"
# overridden version *should not* contain spaces or characters that aren't
# legal in filesystem path names
ifndef version
version := $(shell cat VERSION)
ifneq ($(wildcard .git),)
sha := $(shell git rev-parse --short HEAD)
tag := $(version)-$(sha)
else
tag := $(version)
endif
else
tag := $(version)
endif
version_str = "$(tag) [$(config)]\ncompiled with: llvm $(llvm_version) \
-- "$(compiler_version)
# package_name, _version, and _iteration can be overridden by Travis or AppVeyor
package_base_version ?= $(tag)
package_iteration ?= "1"
package_name ?= "ponyc"
package_version = $(package_base_version)-$(package_iteration)
archive = $(package_name)-$(package_version).tar
package = build/$(package_name)-$(package_version)
prefix ?= /usr/local
bindir ?= $(prefix)/bin
includedir ?= $(prefix)/include
libdir ?= $(prefix)/lib
# destdir is for backward compatibility only, use ponydir instead.
ifdef destdir
$(warning Please use ponydir instead of destdir.)
ponydir ?= $(destdir)
endif
ponydir ?= $(libdir)/pony/$(tag)
symlink := yes
ifdef ponydir
ifndef prefix
symlink := no
endif
endif
ifneq (,$(filter $(OSTYPE), osx bsd))
symlink.flags = -sf
else
symlink.flags = -srf
endif
ifneq (,$(filter $(OSTYPE), osx bsd))
SED_INPLACE = sed -i -e
else
SED_INPLACE = sed -i
endif
LIB_EXT ?= a
BUILD_FLAGS = -march=$(arch) -mtune=$(tune) -Werror -Wconversion \
-Wno-sign-conversion -Wextra -Wall
LINKER_FLAGS = -march=$(arch) -mtune=$(tune) $(LDFLAGS)
AR_FLAGS ?= rcs
ALL_CFLAGS = -std=gnu11 -fexceptions \
-DPONY_VERSION=\"$(tag)\" -DLLVM_VERSION=\"$(llvm_version)\" \
-DPONY_COMPILER=\"$(CC)\" -DPONY_ARCH=\"$(arch)\" \
-DBUILD_COMPILER=\"$(compiler_version)\" \
-DPONY_BUILD_CONFIG=\"$(config)\" \
-DPONY_VERSION_STR=\"$(version_str)\" \
-D_FILE_OFFSET_BITS=64
ALL_CXXFLAGS = -std=gnu++11 -fno-rtti
LL_FLAGS = -mcpu=$(cpu)
# Determine pointer size in bits.
BITS := $(bits)
UNAME_M := $(shell uname -m)
ifeq ($(BITS),64)
ifeq ($(UNAME_M),x86_64)
ifeq (,$(filter $(arch), armv8-a))
BUILD_FLAGS += -mcx16
LINKER_FLAGS += -mcx16
endif
endif
endif
ifneq ($(fpu),)
BUILD_FLAGS += -mfpu=$(fpu)
LINKER_FLAGS += -mfpu=$(fpu)
endif
PONY_BUILD_DIR ?= build/$(config)
PONY_SOURCE_DIR ?= src
PONY_TEST_DIR ?= test
PONY_BENCHMARK_DIR ?= benchmark
comma:= ,
empty:=
space:= $(empty) $(empty)
define USE_CHECK
$$(info Enabling use option: $1)
ifeq ($1,valgrind)
ALL_CFLAGS += -DUSE_VALGRIND
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-valgrind
else ifeq ($1,thread_sanitizer)
ALL_CFLAGS += -fsanitize=thread -DPONY_SANITIZER=\"thread\"
ALL_CXXFLAGS += -fsanitize=thread -DPONY_SANITIZER=\"thread\"
LINKER_FLAGS += -fsanitize=thread -DPONY_SANITIZER=\"thread\"
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-thread_sanitizer
else ifeq ($1,address_sanitizer)
ALL_CFLAGS += -fsanitize=address -DPONY_SANITIZER=\"address\"
ALL_CXXFLAGS += -fsanitize=address -DPONY_SANITIZER=\"address\"
LINKER_FLAGS += -fsanitize=address -DPONY_SANITIZER=\"address\"
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-address_sanitizer
else ifeq ($1,undefined_behavior_sanitizer)
ALL_CFLAGS += -fsanitize=undefined -DPONY_SANITIZER=\"undefined\"
ALL_CXXFLAGS += -fsanitize=undefined -DPONY_SANITIZER=\"undefined\"
LINKER_FLAGS += -fsanitize=undefined -DPONY_SANITIZER=\"undefined\"
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-undefined_behavior_sanitizer
else ifeq ($1,coverage)
ifneq (,$(shell $(CC) -v 2>&1 | grep clang))
# clang
COVERAGE_FLAGS = -O0 -fprofile-instr-generate -fcoverage-mapping
LINKER_FLAGS += -fprofile-instr-generate -fcoverage-mapping
else
ifneq (,$(shell $(CC) -v 2>&1 | grep "gcc version"))
# gcc
COVERAGE_FLAGS = -O0 -fprofile-arcs -ftest-coverage
LINKER_FLAGS += -fprofile-arcs
else
$$(error coverage not supported for this compiler/platform)
endif
ALL_CFLAGS += $(COVERAGE_FLAGS)
ALL_CXXFLAGS += $(COVERAGE_FLAGS)
endif
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-coverage
else ifeq ($1,pooltrack)
ALL_CFLAGS += -DUSE_POOLTRACK
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-pooltrack
else ifeq ($1,dtrace)
DTRACE ?= $(shell which dtrace)
ifeq (, $$(DTRACE))
$$(error No dtrace compatible user application static probe generation tool found)
endif
ALL_CFLAGS += -DUSE_DYNAMIC_TRACE
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-dtrace
else ifeq ($1,actor_continuations)
ALL_CFLAGS += -DUSE_ACTOR_CONTINUATIONS
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-actor_continuations
else ifeq ($1,scheduler_scaling_pthreads)
ALL_CFLAGS += -DUSE_SCHEDULER_SCALING_PTHREADS
PONY_BUILD_DIR := $(PONY_BUILD_DIR)-scheduler_scaling_pthreads
else ifeq ($1,llvm_link_static)
ifeq (,$(LLVM_LINK_STATIC))
LLVM_LINK_STATIC=--link-static
$$(info "linking llvm statically")
else
$$(warning LLVM_LINK_STATIC already set to '$(LLVM_LINK_STATIC)'; using pre-existing value)
endif
else
$$(error ERROR: Unknown use option specified: $1)
endif
endef
ifdef use
$(foreach useitem,$(subst $(comma),$(space),$(use)),$(eval $(call USE_CHECK,$(useitem))))
endif
ifdef config
ifeq (,$(filter $(config),debug release))
$(error Unknown configuration "$(config)")
endif
endif
ifeq ($(config),release)
BUILD_FLAGS += -O3 -DNDEBUG
LL_FLAGS += -O3
ifeq ($(lto),yes)
BUILD_FLAGS += -flto -DPONY_USE_LTO
LINKER_FLAGS += -flto
ifdef LTO_PLUGIN
AR_FLAGS += --plugin $(LTO_PLUGIN)
endif
ifneq (,$(filter $(OSTYPE),linux bsd))
LINKER_FLAGS += -fuse-linker-plugin -fuse-ld=gold
endif
endif
else
BUILD_FLAGS += -g -DDEBUG
endif
ifeq ($(OSTYPE),osx)
ALL_CFLAGS += -mmacosx-version-min=10.12 -DUSE_SCHEDULER_SCALING_PTHREADS
ALL_CXXFLAGS += -stdlib=libc++ -mmacosx-version-min=10.12
endif
# If we are not cleaning we need LLVM_CONFIG
ifneq ($(MAKECMDGOALS),clean)
ifndef LLVM_CONFIG
ifneq (,$(shell which llvm-config 2> /dev/null))
LLVM_CONFIG = llvm-config
else
$(error No LLVM installation found! Set LLVM_CONFIG environment variable \
to the `llvm-config` binary for your installation)
endif
else ifeq (,$(shell which $(LLVM_CONFIG) 2> /dev/null))
$(error LLVM config $(LLVM_CONFIG) not found! Set LLVM_CONFIG environment \
variable to a valid LLVM installation.)
endif
LLVM_BINDIR := $(shell $(LLVM_CONFIG) --bindir 2> /dev/null)
LLVM_LINK := $(LLVM_BINDIR)/llvm-link
LLVM_OPT := $(LLVM_BINDIR)/opt
LLVM_LLC := $(LLVM_BINDIR)/llc
LLVM_AS := $(LLVM_BINDIR)/llvm-as
llvm_build_mode := $(shell $(LLVM_CONFIG) --build-mode)
ifeq (Release,$(llvm_build_mode))
LLVM_BUILD_MODE=LLVM_BUILD_MODE_Release
else ifeq (RelWithDebInfo,$(llvm_build_mode))
LLVM_BUILD_MODE=LLVM_BUILD_MODE_RelWithDebInfo
else ifeq (MinSizeRel,$(llvm_build_mode))
LLVM_BUILD_MODE=LLVM_BUILD_MODE_MinSizeRel
else ifeq (Debug,$(llvm_build_mode))
LLVM_BUILD_MODE=LLVM_BUILD_MODE_Debug
else
$(error "Unknown llvm build-mode of $(llvm_build_mode)", aborting)
endif
llvm_version := $(shell $(LLVM_CONFIG) --version)
ifeq ($(OSTYPE),osx)
ifneq (,$(shell which $(LLVM_BINDIR)/llvm-ar 2> /dev/null))
AR = $(LLVM_BINDIR)/llvm-ar
AR_FLAGS := rcs
else
AR = /usr/bin/ar
AR_FLAGS := -rcs
endif
endif
ifeq ($(llvm_version),3.9.1)
else ifeq ($(llvm_version),5.0.2)
else ifeq ($(llvm_version),6.0.1)
else ifeq ($(llvm_version),7.0.1)
else
$(warning WARNING: Unsupported LLVM version: $(llvm_version))
$(warning Please use LLVM 3.9.1, 5.0.2, 6.0.1, 7.0.1)
endif
# Third party, but prebuilt. Prebuilt libraries are defined as
# (1) a name (stored in prebuilt)
# (2) the linker flags necessary to link against the prebuilt libraries
# (3) a list of include directories for a set of libraries
# (4) a list of the libraries to link against
llvm.libdir := $(CROSS_SYSROOT)$(subst -L,,$(shell $(LLVM_CONFIG) --ldflags $(LLVM_LINK_STATIC)))
llvm.ldflags := -L$(llvm.libdir)
# Set rpath for ponyc if we're dynamically linking LLVM_VENDOR
ifeq (,$(LLVM_LINK_STATIC))
# We're linking dynamically as "LLVM_LINK_STATIC" is empty.
ifeq ($(LLVM_VENDOR),true)
# LLVM_VENDOR is true, so have linker set rpath.
llvm.ldflags += -Wl,-rpath,$(llvm.libdir)
endif
endif
#$(info llvm.libdir="$(llvm.libdir)")
#$(info llvm.ldflags="$(llvm.ldflags)")
# Get cflags using llvm-config
llvm.get_cflags := $(LLVM_CONFIG) --cflags $(LLVM_LINK_STATIC)
#$(info llvm.get_cflags="$(llvm.get_cflags)")
llvm.cflags := $(shell sh -c "$(llvm.get_cflags)")
#$(info llvm.cflags="$(llvm.cflags)")
# Get include dirs using grep & sed to extract "-I<dir>" and "-isystem<dir>" entries
# that can occur anywhere in the string and <dir> may have a leading spaces, but the
# regex assumes a directory does NOT contain spaces.
# Note: [:space:] is used for greater portability.
llvm.get_include_dirs := echo '$(llvm.cflags)' | grep -oE -- '(^-I[[:space:]]*| -I[[:space:]]*|^-isystem[[:space:]]*| -isystem[[:space:]]*)[^[:space:]]+' | sed -E 's/^[[:space:]]*(-I[[:space:]]*|-isystem[[:space:]]*)//'
#$(info llvm.get_include_dirs="$(llvm.get_include_dirs)")
llvm.include_dirs := $(shell sh -c "$(llvm.get_include_dirs)")
#$(info llvm.include_dirs="$(llvm.include_dirs)")
# Get the compiler output of verbose "-v" and preprocess, "-E" parameters which
# contains the search paths.
verbose_preprocess_string := $(shell echo | $(CC) -v -E - 2>&1)
#$(info verbose_preprocess_string="$(verbose_preprocess_string)")
# We must escape any double quotes, ", and any hash, #, characters.
quoteDblQuote := $(subst ",\",$(verbose_preprocess_string))
#$(info quoteDblQuote="$(quoteDblQuote)")
quoted_verbose_preprocess_string := $(subst \#,\\\#,$(quoteDblQuote))
#$(info quoted_verbose_preprocess_string="$(quoted_verbose_preprocess_string)")
# Now use a sed command line to extract the search paths from the
# quoted verbose preprocess string
get_search_paths := sed 's/\(.*\)search starts here:\(.*\)End of search list.\(.*\)/\2/'
#$(info get_search_paths="$(get_search_paths)")
search_paths := $(shell echo "$(quoted_verbose_preprocess_string)" | $(get_search_paths))
#$(info search_paths="$(search_paths)")
# Note: $(search_paths) is padded with a space on front and back so
# that when we iterate the ${inc_dir} variable is guaranteed to have
# a space at the beginning and end making finding a match easy. If
# there is no match we output the ${inc_dir}.
loopit := \
for inc_dir in $(llvm.include_dirs); do \
if ! echo " $(search_paths) " | grep -q " $${inc_dir} "; then \
echo "-isystem $(CROSS_SYSROOT)$${inc_dir}"; \
fi \
done
#$(info loopit="$(loopit)")
llvm.include = $(shell $(loopit))
#$(info llvm.include="$(llvm.include)")
llvm.libs := $(shell $(LLVM_CONFIG) --libs $(LLVM_LINK_STATIC)) -lz -lncurses
endif
compiler_version := "$(shell $(CC) --version | sed -n 1p)"
ifeq ($(runtime-bitcode),yes)
ifeq (,$(shell $(CC) -v 2>&1 | grep clang))
$(error Compiling the runtime as a bitcode file requires clang)
endif
endif
makefile_abs_path := $(realpath $(lastword $(MAKEFILE_LIST)))
packages_abs_src := $(shell dirname $(makefile_abs_path))/packages
$(shell mkdir -p $(PONY_BUILD_DIR))
$(info Building into $(PONY_BUILD_DIR))
lib := $(PONY_BUILD_DIR)/lib/$(arch)
bin := $(PONY_BUILD_DIR)
tests := $(PONY_BUILD_DIR)
benchmarks := $(PONY_BUILD_DIR)
obj := $(PONY_BUILD_DIR)/obj-$(arch)
# Libraries. Defined as
# (1) a name and output directory
libponyc := $(lib)
libponycc := $(lib)
libponyrt := $(lib)
ifeq ($(OSTYPE),linux)
libponyrt-pic := $(lib)
endif
# Define special case rules for a targets source files. By default
# this makefile assumes that a targets source files can be found
# relative to a parent directory of the same name in $(PONY_SOURCE_DIR).
# Note that it is possible to collect files and exceptions with
# arbitrarily complex shell commands, as long as ':=' is used
# for definition, instead of '='.
ifneq ($(OSTYPE),windows)
libponyc.except += src/libponyc/platform/signed.cc
libponyc.except += src/libponyc/platform/unsigned.cc
libponyc.except += src/libponyc/platform/vcvars.c
endif
# Handle platform specific code to avoid "no symbols" warnings.
libponyrt.except =
ifneq ($(OSTYPE),windows)
libponyrt.except += src/libponyrt/asio/iocp.c
libponyrt.except += src/libponyrt/lang/win_except.c
endif
ifneq ($(OSTYPE),linux)
libponyrt.except += src/libponyrt/asio/epoll.c
endif
ifneq ($(OSTYPE),osx)
ifneq ($(OSTYPE),bsd)
libponyrt.except += src/libponyrt/asio/kqueue.c
endif
endif
libponyrt.except += src/libponyrt/asio/sock.c
libponyrt.except += src/libponyrt/dist/dist.c
libponyrt.except += src/libponyrt/dist/proto.c
ifeq ($(OSTYPE),linux)
libponyrt-pic.dir := src/libponyrt
libponyrt-pic.except := $(libponyrt.except)
endif
# Third party, but requires compilation. Defined as
# (1) a name and output directory.
# (2) a list of the source files to be compiled.
libgtest := $(lib)
libgtest.dir := lib/gtest
libgtest.files := $(libgtest.dir)/gtest-all.cc
libgbenchmark := $(lib)
libgbenchmark.dir := lib/gbenchmark
libgbenchmark.srcdir := $(libgbenchmark.dir)/src
libblake2 := $(lib)
libblake2.dir := lib/blake2
libblake2.files := $(libblake2.dir)/blake2b-ref.c
# We don't add libponyrt here. It's a special case because it can be compiled
# to LLVM bitcode.
ifeq ($(OSTYPE), linux)
libraries := libponyc libponyrt-pic libgtest libgbenchmark libblake2
else
libraries := libponyc libgtest libgbenchmark libblake2
endif
ifeq ($(OSTYPE), bsd)
extra.bsd.libs = -lpthread -lexecinfo
llvm.libs += $(extra.bsd.libs)
endif
prebuilt := llvm
# Binaries. Defined as
# (1) a name and output directory.
ponyc := $(bin)
binaries := ponyc
# Tests suites are directly attached to the libraries they test.
libponyc.tests := $(tests)
libponyrt.tests := $(tests)
tests := libponyc.tests libponyrt.tests
# Benchmark suites are directly attached to the libraries they test.
libponyc.benchmarks := $(benchmarks)
libponyc.benchmarks.dir := benchmark/libponyc
libponyc.benchmarks.srcdir := $(libponyc.benchmarks.dir)
libponyrt.benchmarks := $(benchmarks)
libponyrt.benchmarks.dir := benchmark/libponyrt
libponyrt.benchmarks.srcdir := $(libponyrt.benchmarks.dir)
benchmarks := libponyc.benchmarks libponyrt.benchmarks
# Define include paths for targets if necessary. Note that these include paths
# will automatically apply to the test suite of a target as well.
libponyc.include := -I src/common/ -I src/libponyrt/ $(llvm.include) \
-isystem lib/blake2
libponycc.include := -I src/common/ $(llvm.include)
libponyrt.include := -I src/common/ -I src/libponyrt/
libponyrt-pic.include := $(libponyrt.include)
libponyc.tests.include := -I src/common/ -I src/libponyc/ -I src/libponyrt \
$(llvm.include) -isystem lib/gtest/
libponyrt.tests.include := -I src/common/ -I src/libponyrt/ -isystem lib/gtest/
libponyc.benchmarks.include := -I src/common/ -I src/libponyc/ \
$(llvm.include) -isystem lib/gbenchmark/include/
libponyrt.benchmarks.include := -I src/common/ -I src/libponyrt/ -isystem \
lib/gbenchmark/include/
ponyc.include := -I src/common/ -I src/libponyrt/ $(llvm.include)
libgtest.include := -isystem lib/gtest/
libgbenchmark.include := -isystem lib/gbenchmark/include/
libblake2.include := -isystem lib/blake2/
ifneq (,$(filter $(OSTYPE), osx bsd))
libponyrt.include += -I $(CROSS_SYSROOT)/usr/local/include
endif
# target specific build options
libponyrt.tests.linkoptions += -rdynamic
ifneq ($(ALPINE),)
libponyrt.tests.linkoptions += -lexecinfo
endif
libponyc.buildoptions = -D__STDC_CONSTANT_MACROS
libponyc.buildoptions += -D__STDC_FORMAT_MACROS
libponyc.buildoptions += -D__STDC_LIMIT_MACROS
libponyc.buildoptions += -DPONY_ALWAYS_ASSERT
libponyc.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE)
libponyc.tests.buildoptions = -D__STDC_CONSTANT_MACROS
libponyc.tests.buildoptions += -D__STDC_FORMAT_MACROS
libponyc.tests.buildoptions += -D__STDC_LIMIT_MACROS
libponyc.tests.buildoptions += -DPONY_ALWAYS_ASSERT
libponyc.tests.buildoptions += -DPONY_PACKAGES_DIR=\"$(packages_abs_src)\"
libponyc.tests.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE)
libponyc.tests.linkoptions += -rdynamic
ifneq ($(ALPINE),)
libponyc.tests.linkoptions += -lexecinfo
endif
libponyc.benchmarks.buildoptions = -D__STDC_CONSTANT_MACROS
libponyc.benchmarks.buildoptions += -D__STDC_FORMAT_MACROS
libponyc.benchmarks.buildoptions += -D__STDC_LIMIT_MACROS
libponyc.benchmarks.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE)
libgbenchmark.buildoptions := \
-Wshadow -pedantic -pedantic-errors \
-Wfloat-equal -fstrict-aliasing -Wstrict-aliasing -Wno-invalid-offsetof \
-DHAVE_POSIX_REGEX -DHAVE_STD_REGEX -DHAVE_STEADY_CLOCK
ifneq ($(ALPINE),)
libponyc.benchmarks.linkoptions += -lexecinfo
libponyrt.benchmarks.linkoptions += -lexecinfo
endif
ponyc.buildoptions = $(libponyc.buildoptions)
ponyc.linkoptions += -rdynamic
ifneq ($(ALPINE),)
ponyc.linkoptions += -lexecinfo
BUILD_FLAGS += -DALPINE_LINUX
endif
ifeq ($(OSTYPE), linux)
libponyrt-pic.buildoptions += -fpic
libponyrt-pic.buildoptions-ll += -relocation-model=pic
endif
# Set default PIC for compiling if requested
ifdef default_pic
ifeq (true,$(default_pic))
libponyrt.buildoptions += -fpic
libponyrt.buildoptions-ll += -relocation-model=pic
BUILD_FLAGS += -DPONY_DEFAULT_PIC=true
else
ifneq (false,$(default_pic))
$(error default_pic must be true or false)
endif
endif
endif
# target specific disabling of build options
libgtest.disable = -Wconversion -Wno-sign-conversion -Wextra
libgbenchmark.disable = -Wconversion -Wno-sign-conversion
libblake2.disable = -Wconversion -Wno-sign-conversion -Wextra
# Link relationships.
ponyc.links = libponyc libponyrt llvm libblake2
libponyc.tests.links = libgtest libponyc llvm libblake2
libponyc.tests.links.whole = libponyrt
libponyrt.tests.links = libgtest libponyrt
libponyc.benchmarks.links = libblake2 libgbenchmark libponyc libponyrt llvm
libponyrt.benchmarks.links = libgbenchmark libponyrt
ifeq ($(OSTYPE),linux)
ponyc.links += libpthread libdl libatomic
libponyc.tests.links += libpthread libdl libatomic
libponyrt.tests.links += libpthread libdl libatomic
libponyc.benchmarks.links += libpthread libdl libatomic
libponyrt.benchmarks.links += libpthread libdl libatomic
endif
ifeq ($(OSTYPE),bsd)
libponyc.tests.links += libpthread
libponyrt.tests.links += $(extra.bsd.libs)
libponyc.benchmarks.links += libpthread
libponyrt.benchmarks.links += $(extra.bsd.libs)
endif
ifneq (, $(DTRACE))
$(shell $(DTRACE) -h -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_SOURCE_DIR)/common/dtrace_probes.h)
endif
# Overwrite the default linker for a target.
ponyc.linker = $(CXX) #compile as C but link as CPP (llvm)
libponyc.benchmarks.linker = $(CXX)
libponyrt.benchmarks.linker = $(CXX)
# make targets
targets := $(libraries) libponyrt $(binaries) $(tests) $(benchmarks)
.PHONY: all $(targets) install uninstall clean stats deploy prerelease check-version test-core test-stdlib-debug test-stdlib test-examples validate-grammar test-ci test-cross-ci benchmark stdlib stdlib-debug
all: $(targets)
@:
# Dependencies
libponyc.depends := libponyrt libblake2
libponyc.tests.depends := libponyc libgtest
libponyrt.tests.depends := libponyrt libgtest
libponyc.benchmarks.depends := libponyc libgbenchmark
libponyrt.benchmarks.depends := libponyrt libgbenchmark
ponyc.depends := libponyc libponyrt
# Generic make section, edit with care.
##########################################################################
# #
# DIRECTORY: Determines the source dir of a specific target #
# #
# ENUMERATE: Enumerates input and output files for a specific target #
# #
# CONFIGURE_COMPILER: Chooses a C or C++ compiler depending on the #
# target file. #
# #
# CONFIGURE_LIBS: Builds a string of libraries to link for a targets #
# link dependency. #
# #
# CONFIGURE_LINKER: Assembles the linker flags required for a target. #
# #
# EXPAND_COMMAND: Macro that expands to a proper make command for each #
# target. #
# #
##########################################################################
define DIRECTORY
$(eval sourcedir := )
$(eval outdir := $(obj)/$(1))
ifdef $(1).srcdir
sourcedir := $($(1).srcdir)
else ifdef $(1).dir
sourcedir := $($(1).dir)
else ifneq ($$(filter $(1),$(tests)),)
sourcedir := $(PONY_TEST_DIR)/$(subst .tests,,$(1))
outdir := $(obj)/tests/$(subst .tests,,$(1))
else ifneq ($$(filter $(1),$(benchmarks)),)
sourcedir := $(PONY_BENCHMARK_DIR)/$(subst .benchmarks,,$(1))
outdir := $(obj)/benchmarks/$(subst .benchmarks,,$(1))
else
sourcedir := $(PONY_SOURCE_DIR)/$(1)
endif
endef
define ENUMERATE
$(eval sourcefiles := )
ifdef $(1).files
sourcefiles := $$($(1).files)
else
sourcefiles := $$(shell find $$(sourcedir) -type f -name "*.c" -or -name\
"*.cc" -or -name "*.ll" | grep -v '.*/\.')
endif
ifdef $(1).except
sourcefiles := $$(filter-out $($(1).except),$$(sourcefiles))
endif
endef
define CONFIGURE_COMPILER
ifeq ($(suffix $(1)),.cc)
compiler := $(CXX)
flags := $(ALL_CXXFLAGS) $(CXXFLAGS)
endif
ifeq ($(suffix $(1)),.c)
compiler := $(CC)
flags := $(ALL_CFLAGS) $(CFLAGS)
endif
ifeq ($(suffix $(1)),.bc)
compiler := $(CC)
flags := $(ALL_CFLAGS) $(CFLAGS)
endif
ifeq ($(suffix $(1)),.ll)
compiler := $(CC)
flags := $(ALL_CFLAGS) $(CFLAGS) -Wno-override-module
endif
endef
define CONFIGURE_LIBS
ifneq (,$$(filter $(1),$(prebuilt)))
linkcmd += $($(1).ldflags)
libs += $($(1).libs)
else
libs += $(subst lib,-l,$(1))
endif
endef
define CONFIGURE_LIBS_WHOLE
ifeq ($(OSTYPE),osx)
wholelibs += -Wl,-force_load,$(lib)/$(1).a
else
wholelibs += $(subst lib,-l,$(1))
endif
endef
define CONFIGURE_LINKER_WHOLE
$(eval wholelibs :=)
ifneq ($($(1).links.whole),)
$(foreach lk,$($(1).links.whole),$(eval $(call CONFIGURE_LIBS_WHOLE,$(lk))))
ifeq ($(OSTYPE),osx)
libs += $(wholelibs)
else
libs += -Wl,--whole-archive $(wholelibs) -Wl,--no-whole-archive
endif
endif
endef
define CONFIGURE_LINKER
$(eval linkcmd := $(LINKER_FLAGS) -L $(lib))
$(eval linker := $(CC))
$(eval libs :=)
ifdef $(1).linker
linker := $($(1).linker)
else ifneq (,$$(filter .cc,$(suffix $(sourcefiles))))
linker := $(CXX)
endif
$(eval $(call CONFIGURE_LINKER_WHOLE,$(1)))
$(foreach lk,$($(1).links),$(eval $(call CONFIGURE_LIBS,$(lk))))
linkcmd += $(libs) -L $(CROSS_SYSROOT)/usr/local/lib $($(1).linkoptions)
endef
define PREPARE
$(eval $(call DIRECTORY,$(1)))
$(eval $(call ENUMERATE,$(1)))
$(eval $(call CONFIGURE_LINKER,$(1)))
$(eval objectfiles := $(subst $(sourcedir)/,$(outdir)/,$(addsuffix .o,\
$(sourcefiles))))
$(eval bitcodefiles := $(subst .o,.bc,$(objectfiles)))
$(eval dependencies := $(subst .c,,$(subst .cc,,$(subst .ll,,$(subst .o,.d,\
$(objectfiles))))))
endef
define EXPAND_OBJCMD
$(eval file := $(subst .o,,$(1)))
$(eval $(call CONFIGURE_COMPILER,$(file)))
ifeq ($(3),libponyrtyes)
ifneq ($(suffix $(file)),.bc)
$(subst .c,,$(subst .cc,,$(subst .ll,,$(1)))): $(subst .c,.bc,$(subst .cc,.bc,$(subst .ll,.bc,$(file))))
@echo '$$(notdir $$<)'
@mkdir -p $$(dir $$@)
$(SILENT)$(compiler) $(flags) -c -o $$@ $$<
else ifeq ($(suffix $(subst .bc,,$(file))),.ll)
$(subst .ll,,$(1)): $(subst $(outdir)/,$(sourcedir)/,$(subst .bc,,$(file)))
@echo '$$(notdir $$<)'
@mkdir -p $$(dir $$@)
$(SILENT)$(LLVM_AS) -o $$@ $$<
else
$(subst .c,,$(subst .cc,,$(1))): $(subst $(outdir)/,$(sourcedir)/,$(subst .bc,,$(file)))
@echo '$$(notdir $$<)'
@mkdir -p $$(dir $$@)
$(SILENT)$(compiler) -MMD -MP $(filter-out $($(2).disable),$(BUILD_FLAGS)) \
$(flags) $($(2).buildoptions) -emit-llvm -c -o $$@ $$< $($(2).include)
endif
else ifeq ($(suffix $(file)),.ll)
$(subst .ll,,$(1)): $(subst $(outdir)/,$(sourcedir)/,$(file))
@echo '$$(notdir $$<)'
@mkdir -p $$(dir $$@)
$(SILENT)$(LLVM_LLC) $(LL_FLAGS) $($(2).buildoptions-ll) -filetype=obj -o $$@ $$<
else
$(subst .c,,$(subst .cc,,$(1))): $(subst $(outdir)/,$(sourcedir)/,$(file))
@echo '$$(notdir $$<)'
@mkdir -p $$(dir $$@)
$(SILENT)$(compiler) -MMD -MP $(filter-out $($(2).disable),$(BUILD_FLAGS)) \
$(flags) $($(2).buildoptions) -c -o $$@ $$< $($(2).include)
endif
endef
define EXPAND_COMMAND
$(eval $(call PREPARE,$(1)))
$(eval ofiles := $(subst .c,,$(subst .cc,,$(subst .ll,,$(objectfiles)))))
$(eval bcfiles := $(subst .c,,$(subst .cc,,$(subst .ll,,$(bitcodefiles)))))
$(eval depends := )
$(foreach d,$($(1).depends),$(eval depends += $($(d))/$(d).$(LIB_EXT)))
ifeq ($(1),libponyrt)
$($(1))/libponyrt.$(LIB_EXT): $(depends) $(ofiles)
@mkdir -p $$(dir $$@)
@echo 'Linking libponyrt'
ifneq (,$(DTRACE))
ifeq ($(OSTYPE), linux)
@echo 'Generating dtrace object file (linux)'
$(SILENT)$(DTRACE) -G -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_BUILD_DIR)/dtrace_probes.o
$(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(PONY_BUILD_DIR)/dtrace_probes.o
else ifeq ($(OSTYPE), bsd)
@echo 'Generating dtrace object file (bsd)'
$(SILENT)rm -f $(PONY_BUILD_DIR)/dtrace_probes.o
$(SILENT)$(DTRACE) -G -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_BUILD_DIR)/dtrace_probes.o $(ofiles)
$(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(PONY_BUILD_DIR)/dtrace_probes.o
$(SILENT)$(AR) $(AR_FLAGS) $(lib)/libdtrace_probes.a $(PONY_BUILD_DIR)/dtrace_probes.o
else
$(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles)
endif
else
$(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles)
endif
ifeq ($(runtime-bitcode),yes)
$($(1))/libponyrt.bc: $(depends) $(bcfiles)
@mkdir -p $$(dir $$@)
@echo 'Generating bitcode for libponyrt'
$(SILENT)$(LLVM_LINK) -o $$@ $(bcfiles)
ifeq ($(config),release)
$(SILENT)$(LLVM_OPT) -O3 -o $$@ $$@
endif
libponyrt: $($(1))/libponyrt.bc $($(1))/libponyrt.$(LIB_EXT)
else
libponyrt: $($(1))/libponyrt.$(LIB_EXT)
endif
else ifneq ($(filter $(1),$(libraries)),)
$($(1))/$(1).$(LIB_EXT): $(depends) $(ofiles)
@mkdir -p $$(dir $$@)
@echo 'Linking $(1)'
$(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles)
$(1): $($(1))/$(1).$(LIB_EXT)
else
$($(1))/$(1): $(depends) $(ofiles)
@mkdir -p $$(dir $$@)
@echo 'Linking $(1)'
$(SILENT)$(linker) -o $$@ $(ofiles) $(linkcmd)
$(1): $($(1))/$(1)
endif
$(foreach bcfile,$(bitcodefiles),$(eval $(call EXPAND_OBJCMD,$(bcfile),$(1),$(addsuffix $(runtime-bitcode),$(1)))))
$(foreach ofile,$(objectfiles),$(eval $(call EXPAND_OBJCMD,$(ofile),$(1),$(addsuffix $(runtime-bitcode),$(1)))))
-include $(dependencies)
endef
$(foreach target,$(targets),$(eval $(call EXPAND_COMMAND,$(target))))
define EXPAND_INSTALL
ifeq ($(OSTYPE),linux)
install-libponyrt-pic: libponyrt-pic
@mkdir -p $(destdir)/lib/$(arch)
$(SILENT)cp $(lib)/libponyrt-pic.a $(DESTDIR)$(ponydir)/lib/$(arch)
endif
install-libponyrt: libponyrt
@mkdir -p $(destdir)/lib/$(arch)
$(SILENT)cp $(lib)/libponyrt.a $(DESTDIR)$(ponydir)/lib/$(arch)
ifeq ($(OSTYPE),linux)
install: libponyc libponyrt libponyrt-pic ponyc
else
install: libponyc libponyrt ponyc
endif
@mkdir -p $(DESTDIR)$(ponydir)/bin
@mkdir -p $(DESTDIR)$(ponydir)/lib/$(arch)
@mkdir -p $(DESTDIR)$(ponydir)/include/pony/detail
$(SILENT)cp $(lib)/libponyrt.a $(DESTDIR)$(ponydir)/lib/$(arch)
ifeq ($(OSTYPE),linux)
$(SILENT)cp $(lib)/libponyrt-pic.a $(DESTDIR)$(ponydir)/lib/$(arch)
endif
ifneq ($(wildcard $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.bc),)
$(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.bc $(DESTDIR)$(ponydir)/lib/$(arch)
endif
ifneq ($(wildcard $(lib)/libdtrace_probes.a),)
$(SILENT)cp $(lib)/libdtrace_probes.a $(DESTDIR)$(ponydir)/lib/$(arch)
endif
$(SILENT)cp $(lib)/libponyc.a $(DESTDIR)$(ponydir)/lib/$(arch)
$(SILENT)cp $(bin)/ponyc $(DESTDIR)$(ponydir)/bin
$(SILENT)cp src/libponyrt/pony.h $(DESTDIR)$(ponydir)/include
$(SILENT)cp src/common/pony/detail/atomics.h $(DESTDIR)$(ponydir)/include/pony/detail
$(SILENT)cp -r packages $(DESTDIR)$(ponydir)/
ifeq ($$(symlink),yes)
@mkdir -p $(DESTDIR)$(bindir)
@mkdir -p $(DESTDIR)$(libdir)
@mkdir -p $(DESTDIR)$(includedir)/pony/detail
$(SILENT)ln $(symlink.flags) $(ponydir)/bin/ponyc $(DESTDIR)$(bindir)/ponyc
$(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyrt.a $(DESTDIR)$(libdir)/libponyrt.a
ifeq ($(OSTYPE),linux)
$(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyrt-pic.a $(DESTDIR)$(libdir)/libponyrt-pic.a
endif
ifneq ($(wildcard $(DESTDIR)$(ponydir)/lib/libponyrt.bc),)
$(SILENT)ln $(symlink.flags) $(ponydir)/lib/libponyrt.bc $(DESTDIR)$(libdir)/libponyrt.bc
endif
ifneq ($(wildcard $(PONY_BUILD_DIR)/libdtrace_probes.a),)
$(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libdtrace_probes.a $(DESTDIR)$(libdir)/libdtrace_probes.a
endif
$(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyc.a $(DESTDIR)$(libdir)/libponyc.a
$(SILENT)ln $(symlink.flags) $(ponydir)/include/pony.h $(DESTDIR)$(includedir)/pony.h
$(SILENT)ln $(symlink.flags) $(ponydir)/include/pony/detail/atomics.h $(DESTDIR)$(includedir)/pony/detail/atomics.h
endif
endef
$(eval $(call EXPAND_INSTALL))
define EXPAND_UNINSTALL
uninstall:
-$(SILENT)rm -rf $(ponydir) 2>/dev/null ||:
-$(SILENT)rm $(bindir)/ponyc 2>/dev/null ||:
-$(SILENT)rm $(libdir)/libponyrt.a 2>/dev/null ||:
ifeq ($(OSTYPE),linux)
-$(SILENT)rm $(libdir)/libponyrt-pic.a 2>/dev/null ||:
endif
ifneq ($(wildcard $(libdir)/libponyrt.bc),)
-$(SILENT)rm $(libdir)/libponyrt.bc 2>/dev/null ||:
endif
ifneq ($(wildcard $(libdir)/libdtrace_probes.a),)
-$(SILENT)rm $(libdir)/libdtrace_probes.a 2>/dev/null ||:
endif
-$(SILENT)rm $(libdir)/libponyc.a 2>/dev/null ||:
-$(SILENT)rm $(includedir)/pony.h 2>/dev/null ||:
-$(SILENT)rm -r $(includedir)/pony/ 2>/dev/null ||:
endef
$(eval $(call EXPAND_UNINSTALL))
ifdef verbose
bench_verbose = -DCMAKE_VERBOSE_MAKEFILE=true
endif
ifeq ($(lto),yes)
bench_lto = -DBENCHMARK_ENABLE_LTO=true
endif
benchmark: all
$(SILENT)echo "Running libponyc benchmarks..."
$(SILENT)$(PONY_BUILD_DIR)/libponyc.benchmarks
$(SILENT)echo "Running libponyrt benchmarks..."
$(SILENT)(PONY_BUILD_DIR)/libponyrt.benchmarks
stdlib-debug: all
$(SILENT)PONYPATH=.:$(PONYPATH) $(PONY_BUILD_DIR)/ponyc $(cross_args) -d -s --checktree --verify packages/stdlib
stdlib: all
$(SILENT)PONYPATH=.:$(PONYPATH) $(PONY_BUILD_DIR)/ponyc $(cross_args) --checktree --verify packages/stdlib
test-stdlib-debug: stdlib-debug
$(SILENT)$(cross_runner) ./stdlib --sequential
$(SILENT)rm stdlib
test-stdlib: stdlib
$(SILENT)$(cross_runner) ./stdlib --sequential
$(SILENT)rm stdlib
test-core: all
$(SILENT)$(PONY_BUILD_DIR)/libponyc.tests --gtest_shuffle
$(SILENT)$(PONY_BUILD_DIR)/libponyrt.tests --gtest_shuffle
test: test-core test-stdlib test-examples
test-examples: all
$(SILENT)PONYPATH=.:$(PONYPATH) find examples/*/* -name '*.pony' -print | xargs -n 1 dirname | sort -u | grep -v ffi- | xargs -n 1 -I {} $(PONY_BUILD_DIR)/ponyc $(cross_args) -d -s --checktree -o {} {}
check-version: all
$(SILENT)$(PONY_BUILD_DIR)/ponyc --version
validate-grammar: all
$(SILENT)$(PONY_BUILD_DIR)/ponyc --antlr > pony.g.new
$(SILENT)diff pony.g pony.g.new
$(SILENT)rm pony.g.new
test-ci: all check-version test-core test-stdlib-debug test-stdlib test-examples validate-grammar
test-cross-ci: cross_args=--triple=$(cross_triple) --cpu=$(cross_cpu) --link-arch=$(cross_arch) --linker='$(cross_linker)'
test-cross-ci: cross_runner=$(QEMU_RUNNER)
test-cross-ci: test-ci
docs: all
$(SILENT)$(PONY_BUILD_DIR)/ponyc packages/stdlib --docs --pass expr
docs-online: docs
$(SILENT)$(SED_INPLACE) 's/site_name:\ stdlib/site_name:\ Pony Standard Library/' stdlib-docs/mkdocs.yml
# Note: linux only
define EXPAND_DEPLOY
deploy: test docs
$(SILENT)bash .bintray.bash debian "$(package_base_version)" "$(package_name)"
$(SILENT)bash .bintray.bash rpm "$(package_base_version)" "$(package_name)"
$(SILENT)bash .bintray.bash source "$(package_base_version)" "$(package_name)"
$(SILENT)rm -rf build/bin
@mkdir -p build/bin
@mkdir -p $(package)/usr/bin
@mkdir -p $(package)/usr/include/pony/detail
@mkdir -p $(package)/usr/lib
@mkdir -p $(package)/usr/lib/pony/$(package_version)/bin
@mkdir -p $(package)/usr/lib/pony/$(package_version)/include/pony/detail
@mkdir -p $(package)/usr/lib/pony/$(package_version)/lib
$(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyc.a $(package)/usr/lib/pony/$(package_version)/lib
$(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.a $(package)/usr/lib/pony/$(package_version)/lib
ifeq ($(OSTYPE),linux)
$(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt-pic.a $(package)/usr/lib/pony/$(package_version)/lib
endif
ifneq ($(wildcard $(PONY_BUILD_DIR)/libponyrt.bc),)
$(SILENT)cp $(PONY_BUILD_DIR)/libponyrt.bc $(package)/usr/lib/pony/$(package_version)/lib
endif
ifneq ($(wildcard $(PONY_BUILD_DIR)/libdtrace_probes.a),)
$(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libdtrace_probes.a $(package)/usr/lib/pony/$(package_version)/lib
endif
$(SILENT)cp $(PONY_BUILD_DIR)/ponyc $(package)/usr/lib/pony/$(package_version)/bin
$(SILENT)cp src/libponyrt/pony.h $(package)/usr/lib/pony/$(package_version)/include
$(SILENT)cp src/common/pony/detail/atomics.h $(package)/usr/lib/pony/$(package_version)/include/pony/detail
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt.a $(package)/usr/lib/libponyrt.a
ifeq ($(OSTYPE),linux)
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt-pic.a $(package)/usr/lib/libponyrt-pic.a
endif
ifneq ($(wildcard /usr/lib/pony/$(package_version)/lib/libponyrt.bc),)
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt.bc $(package)/usr/lib/libponyrt.bc
endif
ifneq ($(wildcard /usr/lib/pony/$(package_version)/lib/libdtrace_probes.a),)
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libdtrace_probes.a $(package)/usr/lib/libdtrace_probes.a
endif
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyc.a $(package)/usr/lib/libponyc.a
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/bin/ponyc $(package)/usr/bin/ponyc
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/include/pony.h $(package)/usr/include/pony.h
$(SILENT)ln -f -s /usr/lib/pony/$(package_version)/include/pony/detail/atomics.h $(package)/usr/include/pony/detail/atomics.h
$(SILENT)cp -r packages $(package)/usr/lib/pony/$(package_version)/
$(SILENT)fpm -s dir -t deb -C $(package) -p build/bin --name $(package_name) --conflicts "ponyc-master" --conflicts "ponyc-release" --version $(package_base_version) --description "The Pony Compiler" --provides "ponyc" --provides "ponyc-release"
$(SILENT)fpm -s dir -t rpm -C $(package) -p build/bin --name $(package_name) --conflicts "ponyc-master" --conflicts "ponyc-release" --version $(package_base_version) --description "The Pony Compiler" --provides "ponyc" --provides "ponyc-release" --depends "ponydep-ncurses"
$(SILENT)git archive HEAD > build/bin/$(archive)
$(SILENT)tar rvf build/bin/$(archive) stdlib-docs
$(SILENT)bzip2 build/bin/$(archive)
$(SILENT)rm -rf $(package) build/bin/$(archive)
endef
$(eval $(call EXPAND_DEPLOY))
stats:
@echo
@echo '------------------------------'
@echo 'Compiler and standard library '
@echo '------------------------------'
@echo
@cloc --read-lang-def=pony.cloc src packages
@echo
@echo '------------------------------'
@echo 'Test suite:'
@echo '------------------------------'
@echo
@cloc --read-lang-def=pony.cloc test
clean:
@rm -rf $(PONY_BUILD_DIR)
@rm -rf $(package)
@rm -rf build/bin
@rm -rf stdlib-docs
@rm -f src/common/dtrace_probes.h
-@rmdir build 2>/dev/null ||:
@echo 'Repository cleaned ($(PONY_BUILD_DIR)).'
help:
@echo 'Usage: make [config=name] [options] [use=opt,...] [target]'
@echo
@echo 'CONFIGURATIONS:'
@echo ' debug'
@echo ' release (default)'
@echo
@echo 'ARCHITECTURE:'
@echo ' native (default)'
@echo ' [any compiler supported architecture]'
@echo
@echo 'options:'
@echo ' arch=Name Architecture if Name not specified then host name'
@echo ' default_pic=true Make --pic the default'
@echo
@echo 'USE OPTIONS:'
@echo ' valgrind'
@echo ' pooltrack'
@echo ' dtrace'
@echo ' actor_continuations'
@echo ' coverage'
@echo ' llvm_link_static'
@echo ' scheduler_scaling_pthreads'
@echo ' thread_sanitizer'
@echo ' address_sanitizer'
@echo ' undefined_behavior_sanitizer'
@echo
@echo 'TARGETS:'
@echo ' libponyc Pony compiler library'
@echo ' libponyrt Pony runtime'
@echo ' libponyrt-pic Pony runtime -fpic'
@echo ' libponyc.tests Test suite for libponyc'
@echo ' libponyrt.tests Test suite for libponyrt'
@echo ' libponyc.benchmarks Benchmark suite for libponyc'
@echo ' libponyrt.benchmarks Benchmark suite for libponyrt'
@echo ' ponyc Pony compiler executable'
@echo
@echo ' all Build all of the above (default)'
@echo ' test Run test suite'
@echo ' benchmark Build and run benchmark suite'
@echo ' install Install ponyc'
@echo ' install-libponyrt Install libponyrt only (for cross'
@echo ' linking)'
@echo ' install-libponyrt-pic Install libponyrt-pic only (for cross'
@echo ' linking)'
@echo ' uninstall Remove all versions of ponyc'
@echo ' stats Print Pony cloc statistics'
@echo ' clean Delete all build files'
@echo
| 1 | 13,694 | we have a standard format for putting these together elsewhere, it does the filter check first and errors out and then sets based on the value. i think this should be adjusted to do that. there's no logical change, just an approach change. | ponylang-ponyc | c |
@@ -1088,6 +1088,7 @@ function createFunctionsMenu() {
{text: 'Power', handler: applyFuncToEachWithInput('pow', 'Please enter a power factor')},
{text: 'Square Root', handler: applyFuncToEach('squareRoot')},
{text: 'Time-adjusted Derivative', handler: applyFuncToEachWithInput('perSecond', "Please enter a maximum value if this metric is a wrapping counter (or just leave this blank)", {allowBlank: true})},
+ {text: 'Delay', handler: applyFuncToEachWithInput('scale', 'Please enter the number of steps to delay')},
{text: 'Integral', handler: applyFuncToEach('integral')},
{text: 'Percentile Values', handler: applyFuncToEachWithInput('percentileOfSeries', "Please enter the percentile to use")},
{text: 'Non-negative Derivative', handler: applyFuncToEachWithInput('nonNegativeDerivative', "Please enter a maximum value if this metric is a wrapping counter (or just leave this blank)", {allowBlank: true})}, | 1 | /* Copyright 2008 Orbitz WorldWide
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* ======================================================================
*
* PLEASE DO NOT USE A COMMA AFTER THE FINAL ITEM IN A LIST.
*
* ======================================================================
*
* It works fine in FF / Chrome, but completely breaks Internet Explorer.
* Thank you.
*
*/
var DEFAULT_WINDOW_WIDTH = 600;
var DEFAULT_WINDOW_HEIGHT = 400;
var Composer;
function createComposerWindow(myComposer) {
//This global is an ugly hack, probly need to make these widgets into more formal objects
//and keep their associated Composer object as an attribute.
Composer = myComposer;
//Can't define this inline because I need a reference in a closure below
var timeDisplay = new Ext.Toolbar.TextItem({text: "Now showing the past 24 hours"});
var topToolbar = [
createToolbarButton('Update Graph', 'refresh.png', updateGraph),
createToolbarButton('Select a Date Range', 'calendar.png', toggleWindow(createCalendarWindow) ),
createToolbarButton('Select Recent Data', 'clock.png', toggleWindow(createRecentWindow) ),
createToolbarButton('Create from URL', 'upload.png', toggleWindow(createURLWindow) ),
createToolbarButton('Short URL', 'share.png', showShortUrl),
'-',
timeDisplay
];
if (GraphiteConfig.showMyGraphs) {
var saveButton = createToolbarButton('Save to My Graphs', 'save.png', saveMyGraph);
var deleteButton = createToolbarButton('Delete from My Graphs', 'trash.png', deleteMyGraph);
topToolbar.splice(0, 0, saveButton, deleteButton);
}
var bottomToolbar = [
{ text: "Graph Options", menu: createOptionsMenu() },
{ text: "Graph Data", handler: toggleWindow(GraphDataWindow.create.createDelegate(GraphDataWindow)) },
{ text: "Auto-Refresh", id: 'autorefresh_button', enableToggle: true, toggleHandler: toggleAutoRefresh }
];
var win = new Ext.Window({
width: DEFAULT_WINDOW_WIDTH,
height: DEFAULT_WINDOW_HEIGHT,
title: "Graphite Composer",
layout: "border",
region: "center",
maximizable: true,
closable: false,
tbar: topToolbar,
buttons: bottomToolbar,
buttonAlign: 'left',
items: { html: "<img id='image-viewer' src='" + document.body.dataset.baseUrl + "render'/>", region: "center" },
listeners: {
activate: keepDataWindowOnTop,
show: fitImageToWindow,
resize: fitImageToWindow
}
});
// Tack on some convenience closures
win.updateTimeDisplay = function (time) {
var text;
if (time.mode == 'date-range') {
text = "<b>From</b> " + time.startDate.toLocaleString();
text += " <b>Until</b> " + time.endDate.toLocaleString();
text = text.replace(/:00 /g, " "); // Strip out the seconds
} else if (time.mode == 'recent') {
text = "Now showing the past " + time.quantity + " " + time.units;
}
timeDisplay.getEl().dom.innerHTML = text;
};
win.updateUI = function () {
var toggled = Composer.url.getParam('autorefresh') ? true : false;
Ext.getCmp('autorefresh_button').toggle(toggled);
updateCheckItems();
};
win.getImage = function () {
return Ext.getDom('image-viewer');
};
return win;
}
function toggleWindow(createFunc) { // Convenience for lazily creating toggled dialogs
function toggler (button, e) {
if (!button.window) { //First click, create the window
button.window = createFunc();
}
if (button.window.isVisible()) {
button.window.hide();
} else {
button.window.show();
}
}
return toggler;
}
function ifEnter(func) { // Convenience decorator for specialkey listener definitions
return function (widget, e) {
if (e.getCharCode() == Ext.EventObject.RETURN) {
func(widget);
}
}
}
function keepDataWindowOnTop () {
if (GraphDataWindow.window && GraphDataWindow.window.isVisible()) {
GraphDataWindow.window.toFront();
}
}
function fitImageToWindow(win) {
Composer.url.setParam('width', win.getInnerWidth());
Composer.url.setParam('height', win.getInnerHeight());
try {
Composer.updateImage();
} catch (err) {
//An exception gets thrown when the initial resize event
//occurs prior to rendering the image viewer. Safe to ignore.
}
}
/* Toolbar stuff */
function createToolbarButton(tip, icon, handler) {
return new Ext.Toolbar.Button({
style: "margin: 0 5px; background:transparent url(" + document.body.dataset.staticRoot + "img/" + icon + ") no-repeat scroll 0% 50%",
handler: handler,
handleMouseEvents: false,
text: " ",
listeners: {
render: function (button) {
button.el.toolTip = new Ext.ToolTip({
html: tip,
showDelay: 100,
dismissDelay: 10000,
target: button.el
});
}
}
});
}
/* "Date Range" Calendar */
function createCalendarWindow() {
// Start/End labels
var style = "font-family: tahoma,arial,verdana,sans-serif; font-size:11px;";
var startDateHeader = {
html: "<center><span id='startDate' style=\"" + style + "\">Start Date</span></center>"
};
var endDateHeader = {
html: "<center><span id='endDate' style=\"" + style + "\">End Date</span></center>"
};
// Date controls
var startDateControl = new Ext.DatePicker({
id: 'start-date',
maxDate: new Date()
});
var endDateControl = new Ext.DatePicker({
id: 'end-date',
maxDate: new Date()
});
startDateControl.on('select', calendarSelectionMade);
endDateControl.on('select', calendarSelectionMade);
// Time controls
startTimeControl = new Ext.form.TimeField({
id: 'start-time',
increment: 30,
allowBlank: false,
value: "12:00 AM",
listeners: {select: calendarSelectionMade, specialkey: ifEnter(calendarSelectionMade)}
});
endTimeControl = new Ext.form.TimeField({
id: 'end-time',
allowBlank: false,
value: "11:59 PM",
listeners: {select: calendarSelectionMade, specialkey: ifEnter(calendarSelectionMade)}
});
var myWindow;
var resizeStuff = function () {
startTimeControl.setWidth( startDateControl.el.getWidth() );
endTimeControl.setWidth( endDateControl.el.getWidth() );
myWindow.setWidth( startDateControl.el.getWidth() + endDateControl.el.getWidth() + myWindow.getFrameWidth() );
//myWindow.setHeight( startDateControl.el.getHeight() + startTimeControl.el.getHeight() + myWindow.getFrameHeight() );
};
myWindow = new Ext.Window({
title: "Select Date Range",
layout: 'table',
height: 300,
width: 400,
layoutConfig: { columns: 2 },
closeAction: 'hide',
items: [
startDateHeader,
endDateHeader,
startDateControl,
endDateControl,
startTimeControl,
endTimeControl
],
listeners: {show: resizeStuff}
});
return myWindow;
}
function calendarSelectionMade(datePicker, selectedDate) {
var startDate = getCalendarSelection('start');
var endDate = getCalendarSelection('end');
Composer.url.setParam('from', asDateString(startDate) );
Composer.url.setParam('until', asDateString(endDate) );
Composer.updateImage();
Composer.window.updateTimeDisplay({
mode: 'date-range',
startDate: startDate,
endDate: endDate
});
}
function getCalendarSelection(which) {
var myDate = Ext.getCmp(which + '-date').getValue();
var myTime = Ext.getCmp(which + '-time').getEl().dom.value; // Need to grab the raw textfield value, which may not be selected
var myHour = myTime.match(/(\d+):/)[1];
var myMinute = myTime.match(/:(\d+)/)[1];
if (myTime.match(/\bAM\b/i) && myHour == '12') {
myHour = 0;
}
if (myTime.match(/\bPM\b/i) && myHour != '12') {
myHour = parseInt(myHour) + 12;
}
return myDate.add(Date.HOUR, myHour).add(Date.MINUTE, myMinute);
}
function asDateString(dateObj) {
return dateObj.format('H:i_Ymd');
}
/* Short url window */
function showShortUrl() {
showUrl = function(options, success, response) {
if(success) {
var win = new Ext.Window({
title: "Graph URL",
width: 600,
height: 125,
layout: 'border',
modal: true,
items: [
{
xtype: "label",
region: 'north',
style: "text-align: center;",
text: "Short Direct URL to this graph"
}, {
xtype: 'textfield',
region: 'center',
value: window.location.origin + response.responseText,
editable: false,
style: "text-align: center; font-size: large;",
listeners: {
focus: function (field) { field.selectText(); }
}
}
],
buttonAlign: 'center',
buttons: [
{text: "Close", handler: function () { win.close(); } }
]
});
win.show();
}
}
Ext.Ajax.request({
method: 'GET',
url: document.body.dataset.baseUrl + 's/render/?' + Composer.url.queryString,
callback: showUrl,
});
}
/* "Recent Data" dialog */
function toggleWindow(createFunc) {
function toggler (button, e) {
if (!button.window) { //First click, create the window
button.window = createFunc();
}
if (button.window.isVisible()) {
button.window.hide();
} else {
button.window.show();
}
}
return toggler;
}
function createURLWindow() {
var urlField = new Ext.form.TextField({
id: 'from-url',
allowBlank: false,
vtype: 'url',
listeners: { change: urlChosen, specialkey: ifEnter(urlChosen) }
});
return new Ext.Window({
title: "Enter a URL to build graph from",
layout: 'fit',
height: 60,
width: 450,
closeAction: 'hide',
items: [
urlField
]
});
}
function urlChosen() {
var url = Ext.getCmp('from-url').getValue();
Composer.loadMyGraph("temp", decodeURIComponent(url))
}
function createRecentWindow() {
var quantityField = new Ext.form.NumberField({
id: 'time-quantity',
grow: true,
value: 24,
listeners: {change: recentSelectionMade, specialkey: ifEnter(recentSelectionMade)}
});
var unitSelector = new Ext.form.ComboBox({
id: 'time-units',
editable: false,
triggerAction: 'all',
mode: 'local',
store: ['minutes', 'hours', 'days', 'weeks', 'months', 'years'],
width: 75,
value: 'hours',
listeners: {select: recentSelectionMade}
});
return new Ext.Window({
title: "Select a Recent Time Range",
layout: 'table',
height: 60, //there's gotta be a way to auto-size these windows!
width: 235,
layoutConfig: { columns: 3 },
closeAction: 'hide',
items: [
{
html: "<div style=\"border: none; background-color: rgb(223,232,246)\">View the past</div>",
style: "border: none; background-color: rgb(223,232,246)"
},
quantityField,
unitSelector
]
});
}
function recentSelectionMade(combo, record, index) {
var quantity = Ext.getCmp('time-quantity').getValue();
var units = Ext.getCmp('time-units').getValue();
var fromString = '-' + quantity + units;
Composer.url.setParam('from', fromString);
Composer.url.removeParam('until');
Composer.updateImage();
Composer.window.updateTimeDisplay({
mode: 'recent',
quantity: quantity,
units: units
});
}
/* "Save to MyGraphs" */
function saveMyGraph(button, e) {
var myGraphName = "";
if (Composer.state.myGraphName) {
myGraphName = Composer.state.myGraphName;
var tmpArray = myGraphName.split('.');
if (tmpArray.length > 1) {
tmpArray = tmpArray.slice(1, tmpArray.length);
myGraphName = tmpArray.join('.');
}
}
Ext.MessageBox.prompt(
"Save to My Graphs", //title
"Please enter a name for your Graph", //prompt message
function (button, text) { //handler
if (button != 'ok') {
return;
}
if (!text) {
Ext.Msg.alert("You must enter a graph name!");
return;
}
if (text.charAt(text.length - 1) == '.') {
Ext.Msg.alert("Graph names cannot end in a period.");
return;
}
//Save the name for future use and re-load the "My Graphs" tree
Composer.state.myGraphName = text;
//Send the request
Ext.Ajax.request({
method: 'GET',
url: document.body.dataset.baseUrl + 'composer/mygraph/',
params: {action: 'save', graphName: text, url: Composer.url.getURL()},
callback: handleSaveMyGraphResponse
});
},
this, //scope
false, //multiline
myGraphName ? myGraphName : "" //default value
);
}
function handleSaveMyGraphResponse(options, success, response) {
var message;
if (success) {
Browser.trees.mygraphs.reload();
message = "Graph saved successfully";
} else {
message = "There was an error saving your Graph, please try again later.";
}
Ext.MessageBox.show({
title: "Save to My Graphs - Result",
msg: message,
buttons: Ext.MessageBox.OK
});
}
function deleteMyGraph() {
Ext.MessageBox.prompt(
"Delete a saved My Graph", //title
"Please enter the name of the My Graph you wish to delete", //prompt message
function (button, text) { //handler
if (button != 'ok') {
return;
}
if (!text) {
Ext.Msg.alert("Invalid My Graph name!");
return;
}
//Send the request
Ext.Ajax.request({
method: 'GET',
url: document.body.dataset.baseUrl + 'composer/mygraph/',
params: {action: 'delete', graphName: text},
callback: function (options, success, response) {
var message;
if (success) {
Browser.trees.mygraphs.reload();
message = "Graph deleted successfully";
} else {
message = "There was an error performing the operation.";
}
Ext.Msg.show({
title: 'Delete My Graph',
msg: message,
buttons: Ext.Msg.OK
});
}
});
},
this, //scope
false, //multiline
Composer.state.myGraphName ? Composer.state.myGraphName : "" //default value
);
}
/* Graph Data dialog */
var GraphDataWindow = {
create: function () {
var _this = this;
this.targetList = new Ext.ListView({
store: TargetStore,
multiSelect: true,
emptyText: "No graph targets",
reserveScrollOffset: true,
columnSort: false,
hideHeaders: true,
width: 385,
height: 140,
columns: [ {header: "Graph Targets", width: 1.0, dataIndex: "value"} ],
listeners: {
contextmenu: this.targetContextMenu,
afterrender: this.targetChanged,
selectionchange: this.targetChanged,
dblclick: function (targetList, index, node, e) {
targetList.select(index);
this.editTarget();
},
scope: this
}
});
var targetsPanel = new Ext.Panel({
region: 'center',
width: 400,
height: 200,
layout: 'fit',
items: this.targetList
});
var buttonPanel = new Ext.Panel({
region: 'east',
width: 100,
baseCls: 'x-window-mc',
layout: {
type: 'vbox',
align: 'stretch'
},
defaults: { xtype: 'button', disabled: true },
items: [
{
text: 'Add',
handler: this.addTarget.createDelegate(this),
disabled: false
}, {
text: 'Edit',
id: 'editTargetButton',
handler: this.editTarget.createDelegate(this)
}, {
text: 'Remove',
id: 'removeTargetButton',
handler: this.removeTarget.createDelegate(this)
}, {
text: 'Move',
id: 'moveButton',
menuAlign: 'tr-tl',
menu: {
subMenuAlign: 'tr-tl',
defaults: {
defaultAlign: 'tr-tl',
},
items: [
{ text: 'Move Up', handler: this.moveTargetUp.createDelegate(this) },
{ text: 'Move Down', handler: this.moveTargetDown.createDelegate(this) },
{ text: 'Swap', handler: this.swapTargets.createDelegate(this), id: 'menuSwapTargets' }
]
}
}, {
text: 'Apply Function',
id: 'applyFunctionButton',
menuAlign: 'tr-tl',
menu: {
subMenuAlign: 'tr-tl',
defaults: {
defaultAlign: 'tr-tl'
},
items: createFunctionsMenu()
}
}, {
text: 'Undo Function',
handler: this.removeOuterCall.createDelegate(this),
id: 'undoFunctionButton'
}
]
});
this.window = new Ext.Window({
title: "Graph Data",
height: 200,
width: 600,
closeAction: 'hide',
layout: 'border',
items: [
targetsPanel,
buttonPanel
],
listeners: {
afterrender: function () {
if (_this.targetList.getNodes().length > 0) {
_this.targetList.select(0);
}
}
}
});
return this.window;
},
targetChanged: function () {
if (!this.targetList) { return; } // Ignore initial call
var selected;
try {
selected = this.getSelectedTargets().length;
} catch (e) {
return;
}
if (selected == 0) {
Ext.getCmp('editTargetButton').disable();
Ext.getCmp('removeTargetButton').disable();
Ext.getCmp('applyFunctionButton').disable();
Ext.getCmp('undoFunctionButton').disable();
Ext.getCmp('moveButton').disable();
} else {
Ext.getCmp('editTargetButton').enable();
Ext.getCmp('removeTargetButton').enable();
Ext.getCmp('applyFunctionButton').enable();
Ext.getCmp('undoFunctionButton').enable();
Ext.getCmp('moveButton').enable();
}
// Swap Targets
if (selected == 2)
Ext.getCmp('menuSwapTargets').enable();
else
Ext.getCmp('menuSwapTargets').disable();
},
targetContextMenu: function (targetList, index, node, e) {
/* Select the right-clicked row unless it is already selected */
if (! targetList.isSelected(index) ) {
targetList.select(index);
}
var removeItem = {text: "Remove", handler: this.removeTarget.createDelegate(this)};
var editItem = {text: "Edit", handler: this.editTarget.createDelegate(this)};
var moveMenu = {
text: "Move",
menu: [
{ text: "Move Up", handler: this.moveTargetUp.createDelegate(this) },
{ text: "Move Down", handler: this.moveTargetDown.createDelegate(this) },
{ text: "Swap", handler: this.swapTargets.createDelegate(this), disabled: true }
]
};
if (this.getSelectedTargets().length == 0) {
removeItem.disabled = true;
editItem.disabled = true;
moveMenu.disabled = true;
}
if (this.getSelectedTargets().length == 2)
moveMenu.menu[2].disabled = false;
var contextMenu = new Ext.menu.Menu({ items: [removeItem, editItem, moveMenu] });
contextMenu.showAt( e.getXY() );
e.stopEvent();
},
applyFuncToEach: function (funcName, extraArg) {
var _this = this;
function applyFunc() {
Ext.each(_this.getSelectedTargets(),
function (target) {
var newTarget;
if (extraArg) {
newTarget = funcName + '(' + target + ',' + extraArg + ')';
} else {
newTarget = funcName + '(' + target + ')';
}
replaceTarget(target, newTarget);
_this.targetList.select( TargetStore.findExact('value', newTarget), true);
}
);
Composer.syncTargetList();
Composer.updateImage();
}
return applyFunc;
},
applyFuncToEachWithInput: function (funcName, question, options) {
if (options == null) {
options = {};
}
function applyFunc() {
Ext.MessageBox.prompt(
"Input Required", //title
question, //message
function (button, inputValue) { //handler
if (button == 'ok' && (options.allowBlank || inputValue != '')) {
if (options.quote) {
inputValue = '"' + inputValue + '"';
}
applyFuncToEach(funcName, inputValue)();
}
},
this, //scope
false, //multiline
"" //initial value
);
}
applyFunc = applyFunc.createDelegate(this);
return applyFunc;
},
applyFuncToAll: function (funcName) {
function applyFunc() {
var args = this.getSelectedTargets().join(',');
var oldTargets = this.getSelectedTargets();
var firstTarget = oldTargets.shift();
var newTarget = funcName + '(' + args + ')';
// Insert new target where the first selected was
replaceTarget(firstTarget,newTarget);
Ext.each(oldTargets,
function (target) {
removeTarget(target);
}
);
Composer.syncTargetList();
Composer.updateImage();
this.targetList.select( TargetStore.findExact('value', newTarget), true);
}
applyFunc = applyFunc.createDelegate(this);
return applyFunc;
},
removeOuterCall: function () {
/* It turns out that this is a big pain in the ass to do properly.
* The following code is *almost* correct. It will fail if there is
* an argument with a quoted parenthesis in it. Who cares... */
var _this = this;
Ext.each(this.getSelectedTargets(),
function (target) {
var args = [];
var i, c;
var lastArg = 0;
var depth = 0;
var argString = target.replace(/^[^(]+\((.+)\)/, "$1"); //First we strip it down to just args
for (i = 0; i < argString.length; i++) {
switch (argString.charAt(i)) {
case '(': depth += 1; break;
case '{': depth += 1; break;
case ')': depth -= 1; break;
case '}': depth -= 1; break;
case ',':
if (depth > 0) { continue; }
if (depth < 0) { Ext.Msg.alert("Malformed target, cannot remove outer call."); return; }
args.push( argString.substring(lastArg, i).replace(/^\s+/, '').replace(/\s+$/, '') );
lastArg = i + 1;
break;
}
}
args.push( argString.substring(lastArg, i) );
var firstIndex = indexOfTarget(target);
removeTarget(target);
args.reverse()
Ext.each(args, function (arg) {
if (!arg.match(/^([0123456789\.]+|".+"|'.*')$/)) { //Skip string and number literals
insertTarget(firstIndex, arg);
_this.targetList.select( TargetStore.findExact('value', arg), true);
}
});
Composer.syncTargetList();
Composer.updateImage();
}
);
},
addTarget: function (target) {
var metricCompleter;
var win;
metricCompleter = new MetricCompleter({
listeners: {
specialkey: function (field, e) {
if (e.getKey() == e.ENTER) {
var target = metricCompleter.getValue();
addTarget(target);
Composer.syncTargetList();
Composer.updateImage();
win.close();
e.stopEvent();
return false;
}
},
afterrender: function (field) {
metricCompleter.focus('', 500);
}
}
});
win = new Ext.Window({
title: "Add a new Graph Target",
id: 'addTargetWindow',
modal: true,
width: 400,
height: 115,
layout: {
type: 'vbox',
align:'stretch',
pack: 'center'
},
items: [
{xtype: 'label', text: "Type the path of your new Graph Target."},
metricCompleter
],
buttonAlign: 'center',
buttons: [
{
xtype: 'button',
text: 'OK',
handler: function () {
var target = metricCompleter.getValue();
addTarget(target);
Composer.syncTargetList();
Composer.updateImage();
win.close();
}
}, {
xtype: 'button',
text: 'Cancel',
handler: function () {
Ext.getCmp('addTargetWindow').close();
}
}
]
});
win.show();
},
removeTarget: function (item, e) {
Ext.each(this.getSelectedTargets(), function (target) {
removeTarget(target);
});
Composer.syncTargetList();
Composer.updateImage();
},
editTarget: function (item, e) {
var selected = this.targetList.getSelectedRecords();
if (selected.length != 1) {
Ext.MessageBox.show({
title: "Error",
msg: "You must select exactly one target to edit.",
icon: Ext.MessageBox.ERROR,
buttons: Ext.MessageBox.OK
});
return;
}
var record = selected[0];
var metricCompleter;
var win;
metricCompleter = new MetricCompleter({
value: record.get('value'),
listeners: {
specialkey: function (field, e) {
if (e.getKey() == e.ENTER) {
var target = metricCompleter.getValue();
record.set('value', target);
record.commit();
Composer.syncTargetList();
Composer.updateImage();
win.close();
e.stopEvent();
return false;
}
},
afterrender: function (field) {
metricCompleter.focus('', 500);
}
}
});
function editHandler () {
var newValue = metricCompleter.getValue();
if (newValue != '') {
record.set('value', newValue);
record.commit();
Composer.syncTargetList();
Composer.updateImage();
}
win.close();
}
editHandler = editHandler.createDelegate(this); //dynamic scoping can really be a bitch
win = new Ext.Window({
title: "Edit Graph Target",
id: 'editTargetWindow',
modal: true,
width: 400,
height: 115,
layout: {
type: 'vbox',
align:'stretch',
pack: 'center'
},
items: [
{xtype: 'label', text: "Edit the path of your Graph Target."},
metricCompleter
],
buttonAlign: 'center',
buttons: [
{
xtype: 'button',
text: 'OK',
handler: editHandler
}, {
xtype: 'button',
text: 'Cancel',
handler: function () {
win.close();
}
}
]
});
win.show();
},
moveTargetUp: function() {
this._moveTarget(-1);
},
moveTargetDown: function() {
this._moveTarget(1);
},
swapTargets: function() {
this._swapTargets();
},
_moveTarget: function(direction) {
store = this.targetList.getStore();
selectedRecords = this.targetList.getSelectedRecords();
// Don't move past boundaries
exit = false;
Ext.each(selectedRecords, function(record) {
index = store.indexOf(record);
if (direction == -1 && index == 0) {
exit = true;
return false;
}
else if (direction == 1 && index == store.getCount() - 1) {
exit = true;
return false;
}
});
if (exit)
return;
newSelections = [];
Ext.each(selectedRecords, function(recordA) {
indexA = store.indexOf( recordA );
valueA = recordA.get('value');
recordB = store.getAt( indexA + direction );
// swap
recordA.set('value', recordB.get('value'));
recordB.set('value', valueA);
recordA.commit();
recordB.commit();
newSelections.push( indexA + direction );
});
Composer.syncTargetList();
Composer.updateImage();
this.targetList.select(newSelections);
},
_swapTargets: function() {
selectedRecords = this.targetList.getSelectedRecords();
if (selectedRecords.length != 2)
return;
recordA = selectedRecords[0];
recordB = selectedRecords[1];
valueA = recordA.get('value');
recordA.set('value', recordB.get('value'));
recordB.set('value', valueA);
recordA.commit();
recordB.commit();
Composer.syncTargetList();
Composer.updateImage();
},
addWlSelected: function (item, e) {
Ext.Ajax.request({
url: document.body.dataset.baseUrl + "whitelist/add",
method: "POST",
success: function () { Ext.Msg.alert("Result", "Successfully added metrics to whitelist."); },
failure: function () { Ext.Msg.alert("Result", "Failed to add metrics to whitelist."); },
params: {metrics: this.getSelectedTargets().join("\n") }
});
},
removeWlSelected: function (item, e) {
Ext.Ajax.request({
url: document.body.dataset.baseUrl + "whitelist/remove",
method: "POST",
success: function () { Ext.Msg.alert("Result", "Successfully removed metrics from whitelist."); },
failure: function () { Ext.Msg.alert("Result", "Failed to remove metrics from whitelist."); },
params: {metrics: this.getSelectedTargets().join("\n") }
});
},
getSelectedTargets: function () {
var targets = [];
Ext.each(this.targetList.getSelectedRecords(), function (record) {
targets.push( record.get('value') );
});
return targets;
}
};
/* Yet another ghetto api hack */
var applyFuncToAll = GraphDataWindow.applyFuncToAll.createDelegate(GraphDataWindow);
var applyFuncToEach = GraphDataWindow.applyFuncToEach.createDelegate(GraphDataWindow);
var applyFuncToEachWithInput = GraphDataWindow.applyFuncToEachWithInput.createDelegate(GraphDataWindow);
function createFunctionsMenu() {
return [{
text: 'Combine',
menu: [
{text: 'Sum', handler: applyFuncToAll('sumSeries')},
{text: 'Average', handler: applyFuncToAll('averageSeries')},
{text: 'Product', handler: applyFuncToAll('multiplySeries')},
{text: 'Min Values', handler: applyFuncToAll('minSeries')},
{text: 'Max Values', handler: applyFuncToAll('maxSeries')},
{text: 'Group', handler: applyFuncToAll('group')},
{text: 'Range', handler: applyFuncToAll('rangeOfSeries')},
{text: 'Count', handler: applyFuncToEach('countSeries')}
]
}, {
text: 'Transform',
menu: [
{text: 'Scale', handler: applyFuncToEachWithInput('scale', 'Please enter a scale factor')},
{text: 'ScaleToSeconds', handler: applyFuncToEachWithInput('scaleToSeconds', 'Please enter a number of seconds to scale to')},
{text: 'Offset', handler: applyFuncToEachWithInput('offset', 'Please enter the value to offset Y-values by')},
{text: 'OffsetToZero', handler: applyFuncToEach('offsetToZero')},
{text: 'Derivative', handler: applyFuncToEach('derivative')},
{text: 'Power', handler: applyFuncToEachWithInput('pow', 'Please enter a power factor')},
{text: 'Square Root', handler: applyFuncToEach('squareRoot')},
{text: 'Time-adjusted Derivative', handler: applyFuncToEachWithInput('perSecond', "Please enter a maximum value if this metric is a wrapping counter (or just leave this blank)", {allowBlank: true})},
{text: 'Integral', handler: applyFuncToEach('integral')},
{text: 'Percentile Values', handler: applyFuncToEachWithInput('percentileOfSeries', "Please enter the percentile to use")},
{text: 'Non-negative Derivative', handler: applyFuncToEachWithInput('nonNegativeDerivative', "Please enter a maximum value if this metric is a wrapping counter (or just leave this blank)", {allowBlank: true})},
{text: 'Log', handler: applyFuncToEachWithInput('log', 'Please enter a base')},
{text: 'Invert', handler: applyFuncToEach('invert')},
{text: 'Absolute Value', handler: applyFuncToEach('absolute')},
{text: 'timeShift', handler: applyFuncToEachWithInput('timeShift', 'Shift this metric ___ back in time (examples: 10min, 7d, 2w)', {quote: true})},
{text: 'Summarize', handler: applyFuncToEachWithInput('summarize', 'Please enter a summary interval (examples: 10min, 1h, 7d)', {quote: true})},
{text: 'Hit Count', handler: applyFuncToEachWithInput('hitcount', 'Please enter a summary interval (examples: 10min, 1h, 7d)', {quote: true})}
]
}, {
text: 'Calculate',
menu: [
{text: 'Moving Average', handler: applyFuncToEachWithInput('movingAverage', 'Moving average for the last ___ data points')},
{text: 'Moving Median', handler: applyFuncToEachWithInput('movingMedian', 'Moving median for the last ___ data points')},
{text: 'Moving Standard Deviation', handler: applyFuncToEachWithInput('stdev', 'Moving standard deviation for the last ___ data points')},
{text: 'Holt-Winters Forecast', handler: applyFuncToEach('holtWintersForecast')},
{text: 'Holt-Winters Confidence Bands', handler: applyFuncToEach('holtWintersConfidenceBands')},
{text: 'Holt-Winters Aberration', handler: applyFuncToEach('holtWintersAberration')},
{text: 'As Percent', handler: applyFuncToEachWithInput('asPercent', 'Please enter the value that corresponds to 100% or leave blank to use the total', {allowBlank: true})},
{text: 'Difference (of 2 series)', handler: applyFuncToAll('diffSeries')},
{text: 'Ratio (of 2 series)', handler: applyFuncToAll('divideSeries')}
]
}, {
text: 'Filter',
menu: [
{
text: 'Data Filters',
menu: [
{text: 'Remove Above Value', handler: applyFuncToEachWithInput('removeAboveValue', 'Set any values above ___ to None')},
{text: 'Remove Above Percentile', handler: applyFuncToEachWithInput('removeAbovePercentile', 'Set any values above the ___th percentile to None')},
{text: 'Remove Below Value', handler: applyFuncToEachWithInput('removeBelowValue', 'Set any values below ___ to None')},
{text: 'Remove Below Percentile', handler: applyFuncToEachWithInput('removeBelowPercentile', 'Set any values below the ___th percentile to None')}
]
},
{text: 'Most Deviant', handler: applyFuncToEachWithInput('mostDeviant', 'Draw the ___ metrics with the highest standard deviation')},
{text: 'Highest Current Value', handler: applyFuncToEachWithInput('highestCurrent', 'Draw the ___ metrics with the highest current value')},
{text: 'Lowest Current Value', handler: applyFuncToEachWithInput('lowestCurrent', 'Draw the ___ metrics with the lowest current value')},
{text: 'Highest Maximum Value', handler: applyFuncToEachWithInput('highestMax', 'Draw the ___ metrics with the highest maximum value')},
{text: 'Nth Percentile Value', handler: applyFuncToEachWithInput('nPercentile', 'Draw the ___th Percentile for each metric.')},
{text: 'Remove Between Percentile', handler: applyFuncToEachWithInput('removeBetweenPercentile', 'Draw the metrics that have a value outside the ___th percentile of the average at a time')},
{text: 'Current Value Above', handler: applyFuncToEachWithInput('currentAbove', 'Draw all metrics whose current value is above ___')},
{text: 'Current Value Below', handler: applyFuncToEachWithInput('currentBelow', 'Draw all metrics whose current value is below ___')},
{text: 'Highest Average Value', handler: applyFuncToEachWithInput('highestAverage', 'Draw the ___ metrics with the highest average value')},
{text: 'Lowest Average Value', handler: applyFuncToEachWithInput('lowestAverage', 'Draw the ___ metrics with the lowest average value')},
{text: 'Average Value Above', handler: applyFuncToEachWithInput('averageAbove', 'Draw all metrics whose average value is above ___')},
{text: 'Average Value Below', handler: applyFuncToEachWithInput('averageBelow', 'Draw all metrics whose average value is below ___')},
{text: 'Average Outside Percentile Value', handler: applyFuncToEachWithInput('averageOutsidePercentile', 'Draw the metrics which average lies outside the ___th percentile of all averages')},
{text: 'Maximum Value Above', handler: applyFuncToEachWithInput('maximumAbove', 'Draw all metrics whose maximum value is above ___')},
{text: 'Maximum Value Below', handler: applyFuncToEachWithInput('maximumBelow', 'Draw all metrics whose maximum value is below ___')},
{text: 'Minimum Value Above', handler: applyFuncToEachWithInput('minimumAbove', 'Draw all metrics whose minimum value is above ___')},
{text: 'Minimum Value Below', handler: applyFuncToEachWithInput('minimumBelow', 'Draw all metrics whose minimum value is below ___')},
{text: 'sortByName', handler: applyFuncToEach('sortByName')},
{text: 'sortByTotal', handler: applyFuncToEach('sortByTotal')},
{text: 'sortByMaxima', handler: applyFuncToEach('sortByMaxima')},
{text: 'sortByMinima', handler: applyFuncToEach('sortByMinima')},
{text: 'limit', handler: applyFuncToEachWithInput('limit', 'Limit to first ___ of a list of metrics')},
{text: 'Exclude', handler: applyFuncToEachWithInput('exclude', 'Exclude metrics that match a regular expression')},
{text: 'Grep', handler: applyFuncToEachWithInput('grep', 'Exclude metrics that don\'t match a regular expression')}
]
}, {
text: 'Special',
menu: [
{text: 'Set Legend Name', handler: applyFuncToEachWithInput('alias', 'Enter a legend label for this graph target', {quote: true})},
{text: 'Set Legend Name By Metric', handler: applyFuncToEach('aliasByMetric')},
{text: 'Set Legend Name By Node', handler: applyFuncToEachWithInput('aliasByNode', 'Enter the 0-indexed node to display')},
{text: 'Add Values to Legend Name',
menu: [
{text: "Cacti Style Legend", handler: applyFuncToEach('cactiStyle')},
{text: "Last Value", handler: applyFuncToEach('legendValue', '"last"')},
{text: "Average Value", handler: applyFuncToEach('legendValue', '"avg"')},
{text: "Total Value", handler: applyFuncToEach('legendValue', '"total"')},
{text: "Min Value", handler: applyFuncToEach('legendValue', '"min"')},
{text: "Max Value", handler: applyFuncToEach('legendValue', '"max"')}
]},
{text: 'Color', handler: applyFuncToEachWithInput('color', 'Set the color for this graph target', {quote: true})},
{text: 'Alpha', handler: applyFuncToEachWithInput('alpha', 'Set the alpha (transparency) for this graph target (between 0.0 and 1.0)')},
{text: 'Consolidate By',
menu: [
{text: "Sum", handler: applyFuncToEach('consolidateBy', '"sum"')},
{text: "Max", handler: applyFuncToEach('consolidateBy', '"max"')},
{text: "Min", handler: applyFuncToEach('consolidateBy', '"min"')}
]},
{text: 'Draw non-zero As Infinite', handler: applyFuncToEach('drawAsInfinite')},
{text: 'Line Width', handler: applyFuncToEachWithInput('lineWidth', 'Please enter a line width for this graph target')},
{text: 'Dashed Line', handler: applyFuncToEach('dashed')},
{text: 'Keep Last Value', handler: applyFuncToEachWithInput('keepLastValue', 'Please enter the maximum number of "None" datapoints to overwrite, or leave empty for no limit. (default: empty)', {allowBlank: true})},
{text: 'Changed', handler: applyFuncToEach('changed')},
{text: 'Transform Nulls', handler: applyFuncToEachWithInput('transformNull', 'Please enter the value to transform null values to')},
{text: 'Count non-nulls', handler: applyFuncToAll('isNonNull')},
{text: 'Substring', handler: applyFuncToEachWithInput('substr', 'Enter a starting position')},
{text: 'Group', handler: applyFuncToAll('group')},
{text: 'Area Between', handler: applyFuncToEach('areaBetween')},
// {text: 'GroupByNode', handler: applyFuncToEachWithInput('group')}, // requires 2 parameters
// {text: 'Add Threshold Line', handler: applyFuncToEachWithInput('threshold', 'Enter a threshold value')},
{text: 'Draw Stacked', handler: applyFuncToEach('stacked')},
{text: 'Draw in Second Y Axis', handler: applyFuncToEach('secondYAxis')},
{text: 'Aggregate Line',
menu: [
{text: "Avg", handler: applyFuncToEach('aggregateLine', '"avg"')},
{text: "Max", handler: applyFuncToEach('aggregateLine', '"max"')},
{text: "Min", handler: applyFuncToEach('aggregateLine', '"min"')}
]
}
]
}
];
}
/* Auto-Refresh feature */
function toggleAutoRefresh(button, pressed) {
//A closure makes this really simple
var doRefresh = function () {
Composer.updateImage();
//var interval = Math.min.apply(null, [context['interval'] for each (context in MetricContexts)] || [0]) || 60;
var interval = 60;
button.timer = setTimeout(doRefresh, interval * 1000)
}
if (button.timer) { // AutoRefresh is on
if (!pressed) { // The button was untoggled, turn off AutoRefresh
clearTimeout(button.timer);
button.timer = null;
}
} else { // AutoRefresh is off
if (pressed) { // The button was toggled, turn on AutoRefresh
doRefresh();
}
}
}
/* Display Options Menu */
function createOptionsMenu() {
var yAxisUnitMenu = new Ext.menu.Menu({
items: [
menuRadioItem("yUnit", "Standard", "yUnitSystem", "si"),
menuRadioItem("yUnit", "Binary", "yUnitSystem", "binary"),
menuRadioItem("yUnit", "None", "yUnitSystem", "none")
]
});
var yAxisSideMenu = new Ext.menu.Menu({
items: [
menuRadioItem("yAxis", "Left", "yAxisSide", "left"),
menuRadioItem("yAxis", "Right", "yAxisSide", "right")
]
});
var yAxisLeftMenu = new Ext.menu.Menu({
items: [
menuInputItem("Left Y Label", "vtitle", "Left Y Label", /^$/),
menuInputItem("Left Y Minimum", "yMinLeft"),
menuInputItem("Left Y Maximum", "yMaxLeft"),
menuInputItem("Left Y Limit", "yLimitLeft"),
menuInputItem("Left Y Step", "yStepLeft"),
menuInputItem("Left Line Width", "leftWidth"),
menuInputItem("Left Line Color", "leftColor"),
menuInputItem("Left Line Dashed (length, in px)", "leftDashed")
]
});
var yAxisRightMenu = new Ext.menu.Menu({
items: [
menuInputItem("Right Y Label", "vtitleRight", "Right Y Label", /^$/),
menuInputItem("Right Y Minimum", "yMinRight"),
menuInputItem("Right Y Maximum", "yMaxRight"),
menuInputItem("Right Y Limit", "yLimitRight"),
menuInputItem("Right Y Step", "yStepRight"),
menuInputItem("Right Line Width", "rightWidth"),
menuInputItem("Right Line Color", "rightColor"),
menuInputItem("Right Line Dashed (length, in px)", "rightDashed")
]
});
var SecondYAxisMenu = new Ext.menu.Menu({
items: [
{text: "Left Y-Axis", menu: yAxisLeftMenu},
{text: "Right Y-Axis", menu: yAxisRightMenu}
]
});
var yAxisMenu = new Ext.menu.Menu({
items: [
menuInputItem("Label", "vtitle", "Y-Axis Label", /^$/),
menuInputItem("Minimum", "yMin"),
menuInputItem("Maximum", "yMax"),
menuInputItem("Minor Lines", "minorY", "Enter the number of minor lines to draw", /^[a-zA-Z]/),
menuInputItem("Logarithmic Scale", "logBase", "Enter the logarithmic base to use (ie. 10, e, etc...)"),
menuInputItem("Step", "yStep", "Enter the Y-axis step to use (e.g. 0.2)"),
menuInputItem("Divisors", "yDivisors", "Enter the target number of intermediate Y-axis values (e.g. 4,5,6)", /^[a-zA-Z]/),
{text: "Unit", menu: yAxisUnitMenu},
{text: "Side", menu: yAxisSideMenu},
{text: "Dual Y-Axis Options", menu: SecondYAxisMenu},
menuHelpItem("Dual Y-Axis Help", "To select metrics to associate with the second (right-side) y-axis, go into the Graph Data dialog box, highlight a metric, click Apply Functions, Special, Second Y Axis.")
]
});
var xAxisMenu = new Ext.menu.Menu({
items: [
menuInputItem("Time Format", "xFormat", "Enter the time format (see Python's datetime.strftime())", /^$/),
menuInputItem("Timezone", "tz", "Enter the timezone to display (e.g. UTC or America/Chicago)", /^$/),
menuInputItem("Point-width Consolidation Threshold", "minXStep", "Enter the closest number of pixels between points before consolidation")
]
});
var areaMenu = new Ext.menu.Menu({
items: [
menuRadioItem("area", "None", "areaMode", ""),
menuRadioItem("area", "First Only", "areaMode", "first"),
menuRadioItem("area", "Stacked", "areaMode", "stacked"),
menuRadioItem("area", "All", "areaMode", "all")
]
});
var lineMenu = new Ext.menu.Menu({
items: [
menuRadioItem("line", "Slope Line (default)", "lineMode", ""),
menuRadioItem("line", "Staircase Line", "lineMode", "staircase"),
menuRadioItem("line", "Connected Line", "lineMode", "connected"),
menuInputItem("Connected Line Limit", "connectedLimit", "The number of consecutive None values to jump over when in connected line mode. (default: no limit, leave empty)"),
menuCheckItem("Draw Null as Zero", "drawNullAsZero")
]
});
var fontFacesMenu = new Ext.menu.Menu({
items: [
menuRadioItem("fontFace", "Sans", "fontName", "Sans"),
menuRadioItem("fontFace", "Times", "fontName", "Times"),
menuRadioItem("fontFace", "Courier", "fontName", "Courier"),
menuRadioItem("fontFace", "Helvetica", "fontName", "Helvetica")
]
});
var fontMenu = new Ext.menu.Menu({
items: [
{text: "Face", menu: fontFacesMenu},
{
text: "Style",
menu: {
items: [
menuCheckItem("Italics", "fontItalic"),
menuCheckItem("Bold", "fontBold")
]
}
},
menuInputItem("Size", "fontSize", "Enter the font size in pt"),
{text: "Color", menu: createColorMenu('fgcolor')}
]
});
var displayMenu = new Ext.menu.Menu({
items: [
{text: "Font", menu: fontMenu},
{
text: "Color",
menu: {
items: [
menuInputItem("Line Colors", "colorList", "Enter an ordered list of comma-separated colors (name or hex values)", /^$/),
{text: "Background", menu: createColorMenu('bgcolor')},
{text: "Major Grid Line", menu: createColorMenu('majorGridLineColor')},
{text: "Minor Grid Line", menu: createColorMenu('minorGridLineColor')},
menuInputItem("Filled Area Alpha Value", "areaAlpha", "Enter the alpha value (between 0.0 and 1.0)")
]
}
},
{
text: "Graph Legend",
menu: {
items: [
menuRadioItem("legend", "Hide If Too Many", "hideLegend"),
menuRadioItem("legend", "Always Hide", "hideLegend", "true"),
menuRadioItem("legend", "Never Hide", "hideLegend", "false"),
menuCheckItem("Hide Duplicate Items", "uniqueLegend")
]
}
},
menuInputItem("Line Thickness", "lineWidth", "Enter the line thickness in pixels"),
menuInputItem("Margin", "margin", "Enter the margin width in pixels"),
menuCheckItem("Graph Only", "graphOnly"),
menuCheckItem("Hide Axes", "hideAxes"),
menuCheckItem("Hide Y-Axis", "hideYAxis"),
menuCheckItem("Hide Grid", "hideGrid"),
menuInputItem("Apply Template", "template", "Enter the name of a template defined in graphTemplates.conf", /^$/)
]
});
return {
xtype: 'menu',
items: [
menuInputItem("Graph Title", "title", "Graph Title", /^$/),
{text: "Display", menu: displayMenu},
{text: "Line Mode", menu: lineMenu},
{text: "Area Mode", menu: areaMenu},
{text: "X-Axis", menu: xAxisMenu},
{text: "Y-Axis", menu: yAxisMenu}
]
};
}
/* Graph Options API */
function updateGraph() {
return Composer.updateImage();
}
function getParam(param) {
return Composer.url.getParam(param);
}
function setParam(param, value) {
return Composer.url.setParam(param, value);
}
function removeParam(param) {
return Composer.url.removeParam(param);
}
/* End of Graph Options API */
function createColorMenu(param) {
var colorPicker = new Ext.menu.ColorMenu({hideOnClick: false});
colorPicker.on('select',
function (palette, color) {
setParam(param, color);
updateGraph();
}
);
return colorPicker;
}
function menuInputItem(name, param, question, regexp) {
return new Ext.menu.Item({text: name, handler: paramPrompt(question || name, param, regexp)});
}
function menuHelpItem(name, message) {
return new Ext.menu.Item({text: name, handler: helpMessage(name, message)});
}
function paramPrompt(question, param, regexp) {
if(regexp == null) {
regexp = /[^A-Za-z0-9_.\-]/;
}
return function (menuItem, e) {
Ext.MessageBox.prompt(
"Input Required",
question,
function (button, value) {
if (value.search(regexp) != -1) {
Ext.Msg.alert("Input can only contain letters, numbers, underscores, or periods.");
return;
}
if (value.charAt(value.length - 1) == '.') {
Ext.Msg.alert("Input cannot end in a period.");
return;
}
setParam(param, value);
updateGraph();
},
this, //scope
false, //multiline
getParam(param) || "" //default value
);
};
}
function helpMessage(myTitle, myMessage) {
return function (menuItem, e) {
Ext.MessageBox.show(
{title: myTitle,
msg: myMessage,
button: Ext.MessageBox.OK
}
);
};
}
var checkItems = [];
function menuCheckItem(name, param, paramValue) {
var checkItem = new Ext.menu.CheckItem({text: name, param: param, hideOnClick: false});
checkItems.push(checkItem); //keep a list of each check item we create so we can update them later
checkItem.on('checkchange',
function (item, checked) {
if (paramValue) { // Set param to a specific value
if (checked) {
setParam(param, paramValue);
} else { // Remove the param if we're being unchecked
removeParam(param);
}
} else { // Set the param to true/false
setParam(param, checked.toString());
}
updateGraph();
}
);
return checkItem;
}
function menuRadioItem(groupName, name, param, paramValue ) {
var selectItem = new Ext.menu.CheckItem({text: name, param: param, hideOnClick: false, group: groupName, checked: (paramValue ? false : true)});
selectItem.on('checkchange',
function( item, clicked ) {
if( paramValue ) {
setParam(param, paramValue);
} else {
removeParam(param);
}
updateGraph();
}
);
return selectItem;
}
function updateCheckItems() {
Ext.each(checkItems,
function (item) {
var param = item.initialConfig.param;
item.setChecked(getParam(param) ? true : false, true);
}
);
}
| 1 | 9,412 | Note this should be `delay` instead of `scale` here. I'll fix this in my branch. | graphite-project-graphite-web | py |
@@ -87,7 +87,6 @@ class Uppy {
this.upload = this.upload.bind(this)
this.emitter = ee()
- this.on = this.emitter.on.bind(this.emitter)
this.off = this.emitter.off.bind(this.emitter)
this.once = this.emitter.once.bind(this.emitter)
this.emit = this.emitter.emit.bind(this.emitter) | 1 | const Utils = require('../core/Utils')
const Translator = require('../core/Translator')
const ee = require('namespace-emitter')
const cuid = require('cuid')
const throttle = require('lodash.throttle')
const prettyBytes = require('prettier-bytes')
const match = require('mime-match')
const DefaultStore = require('../store/DefaultStore')
/**
* Uppy Core module.
* Manages plugins, state updates, acts as an event bus,
* adds/removes files and metadata.
*
* @param {object} opts — Uppy options
*/
class Uppy {
constructor (opts) {
const defaultLocale = {
strings: {
youCanOnlyUploadX: {
0: 'You can only upload %{smart_count} file',
1: 'You can only upload %{smart_count} files'
},
youHaveToAtLeastSelectX: {
0: 'You have to select at least %{smart_count} file',
1: 'You have to select at least %{smart_count} files'
},
exceedsSize: 'This file exceeds maximum allowed size of',
youCanOnlyUploadFileTypes: 'You can only upload:',
uppyServerError: 'Connection with Uppy Server failed'
}
}
// set default options
const defaultOptions = {
id: 'uppy',
autoProceed: true,
debug: false,
restrictions: {
maxFileSize: false,
maxNumberOfFiles: false,
minNumberOfFiles: false,
allowedFileTypes: false
},
meta: {},
onBeforeFileAdded: (currentFile, files) => Promise.resolve(),
onBeforeUpload: (files, done) => Promise.resolve(),
locale: defaultLocale,
store: new DefaultStore()
}
// Merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
this.locale = Object.assign({}, defaultLocale, this.opts.locale)
this.locale.strings = Object.assign({}, defaultLocale.strings, this.opts.locale.strings)
// i18n
this.translator = new Translator({locale: this.locale})
this.i18n = this.translator.translate.bind(this.translator)
// Container for different types of plugins
this.plugins = {}
this.translator = new Translator({locale: this.opts.locale})
this.i18n = this.translator.translate.bind(this.translator)
this.getState = this.getState.bind(this)
this.getPlugin = this.getPlugin.bind(this)
this.setFileMeta = this.setFileMeta.bind(this)
this.setFileState = this.setFileState.bind(this)
this.log = this.log.bind(this)
this.info = this.info.bind(this)
this.hideInfo = this.hideInfo.bind(this)
this.addFile = this.addFile.bind(this)
this.removeFile = this.removeFile.bind(this)
this.pauseResume = this.pauseResume.bind(this)
this._calculateProgress = this._calculateProgress.bind(this)
this.updateOnlineStatus = this.updateOnlineStatus.bind(this)
this.resetProgress = this.resetProgress.bind(this)
this.pauseAll = this.pauseAll.bind(this)
this.resumeAll = this.resumeAll.bind(this)
this.retryAll = this.retryAll.bind(this)
this.cancelAll = this.cancelAll.bind(this)
this.retryUpload = this.retryUpload.bind(this)
this.upload = this.upload.bind(this)
this.emitter = ee()
this.on = this.emitter.on.bind(this.emitter)
this.off = this.emitter.off.bind(this.emitter)
this.once = this.emitter.once.bind(this.emitter)
this.emit = this.emitter.emit.bind(this.emitter)
this.preProcessors = []
this.uploaders = []
this.postProcessors = []
this.store = this.opts.store
this.setState({
plugins: {},
files: {},
currentUploads: {},
capabilities: {
resumableUploads: false
},
totalProgress: 0,
meta: Object.assign({}, this.opts.meta),
info: {
isHidden: true,
type: 'info',
message: ''
}
})
this._storeUnsubscribe = this.store.subscribe((prevState, nextState, patch) => {
this.emit('state-update', prevState, nextState, patch)
this.updateAll(nextState)
})
// for debugging and testing
// this.updateNum = 0
if (this.opts.debug) {
global.uppyLog = ''
global[this.opts.id] = this
}
}
/**
* Iterate on all plugins and run `update` on them.
* Called each time state changes.
*
*/
updateAll (state) {
this.iteratePlugins(plugin => {
plugin.update(state)
})
}
/**
* Updates state
*
* @param {patch} object
*/
setState (patch) {
this.store.setState(patch)
}
/**
* Returns current state.
*/
getState () {
return this.store.getState()
}
/**
* Back compat for when this.state is used instead of this.getState().
*/
get state () {
return this.getState()
}
/**
* Shorthand to set state for a specific file.
*/
setFileState (fileID, state) {
this.setState({
files: Object.assign({}, this.getState().files, {
[fileID]: Object.assign({}, this.getState().files[fileID], state)
})
})
}
resetProgress () {
const defaultProgress = {
percentage: 0,
bytesUploaded: 0,
uploadComplete: false,
uploadStarted: false
}
const files = Object.assign({}, this.getState().files)
const updatedFiles = {}
Object.keys(files).forEach(fileID => {
const updatedFile = Object.assign({}, files[fileID])
updatedFile.progress = Object.assign({}, updatedFile.progress, defaultProgress)
updatedFiles[fileID] = updatedFile
})
this.setState({
files: updatedFiles,
totalProgress: 0
})
// TODO Document on the website
this.emit('reset-progress')
}
addPreProcessor (fn) {
this.preProcessors.push(fn)
}
removePreProcessor (fn) {
const i = this.preProcessors.indexOf(fn)
if (i !== -1) {
this.preProcessors.splice(i, 1)
}
}
addPostProcessor (fn) {
this.postProcessors.push(fn)
}
removePostProcessor (fn) {
const i = this.postProcessors.indexOf(fn)
if (i !== -1) {
this.postProcessors.splice(i, 1)
}
}
addUploader (fn) {
this.uploaders.push(fn)
}
removeUploader (fn) {
const i = this.uploaders.indexOf(fn)
if (i !== -1) {
this.uploaders.splice(i, 1)
}
}
setMeta (data) {
const updatedMeta = Object.assign({}, this.getState().meta, data)
const updatedFiles = Object.assign({}, this.getState().files)
Object.keys(updatedFiles).forEach((fileID) => {
updatedFiles[fileID] = Object.assign({}, updatedFiles[fileID], {
meta: Object.assign({}, updatedFiles[fileID].meta, data)
})
})
this.log('Adding metadata:')
this.log(data)
this.setState({
meta: updatedMeta,
files: updatedFiles
})
}
setFileMeta (fileID, data) {
const updatedFiles = Object.assign({}, this.getState().files)
if (!updatedFiles[fileID]) {
this.log('Was trying to set metadata for a file that’s not with us anymore: ', fileID)
return
}
const newMeta = Object.assign({}, updatedFiles[fileID].meta, data)
updatedFiles[fileID] = Object.assign({}, updatedFiles[fileID], {
meta: newMeta
})
this.setState({files: updatedFiles})
}
/**
* Get a file object.
*
* @param {string} fileID The ID of the file object to return.
*/
getFile (fileID) {
return this.getState().files[fileID]
}
/**
* Check if minNumberOfFiles restriction is reached before uploading.
*
* @return {boolean}
* @private
*/
_checkMinNumberOfFiles () {
const {minNumberOfFiles} = this.opts.restrictions
if (Object.keys(this.getState().files).length < minNumberOfFiles) {
this.info(`${this.i18n('youHaveToAtLeastSelectX', { smart_count: minNumberOfFiles })}`, 'error', 5000)
return false
}
return true
}
/**
* Check if file passes a set of restrictions set in options: maxFileSize,
* maxNumberOfFiles and allowedFileTypes.
*
* @param {object} file object to check
* @return {boolean}
* @private
*/
_checkRestrictions (file) {
const {maxFileSize, maxNumberOfFiles, allowedFileTypes} = this.opts.restrictions
if (maxNumberOfFiles) {
if (Object.keys(this.getState().files).length + 1 > maxNumberOfFiles) {
this.info(`${this.i18n('youCanOnlyUploadX', { smart_count: maxNumberOfFiles })}`, 'error', 5000)
return false
}
}
if (allowedFileTypes) {
const isCorrectFileType = allowedFileTypes.filter((type) => {
if (!file.type) return false
return match(file.type, type)
}).length > 0
if (!isCorrectFileType) {
const allowedFileTypesString = allowedFileTypes.join(', ')
this.info(`${this.i18n('youCanOnlyUploadFileTypes')} ${allowedFileTypesString}`, 'error', 5000)
return false
}
}
if (maxFileSize) {
if (file.data.size > maxFileSize) {
this.info(`${this.i18n('exceedsSize')} ${prettyBytes(maxFileSize)}`, 'error', 5000)
return false
}
}
return true
}
/**
* Add a new file to `state.files`. This will run `onBeforeFileAdded`,
* try to guess file type in a clever way, check file against restrictions,
* and start an upload if `autoProceed === true`.
*
* @param {object} file object to add
*/
addFile (file) {
// Wrap this in a Promise `.then()` handler so errors will reject the Promise
// instead of throwing.
const beforeFileAdded = Promise.resolve()
.then(() => this.opts.onBeforeFileAdded(file, this.getState().files))
return beforeFileAdded.catch((err) => {
const message = typeof err === 'object' ? err.message : err
this.info(message, 'error', 5000)
return Promise.reject(new Error(`onBeforeFileAdded: ${message}`))
}).then(() => {
return Utils.getFileType(file).then((fileType) => {
const updatedFiles = Object.assign({}, this.getState().files)
let fileName
if (file.name) {
fileName = file.name
} else if (fileType.split('/')[0] === 'image') {
fileName = fileType.split('/')[0] + '.' + fileType.split('/')[1]
} else {
fileName = 'noname'
}
const fileExtension = Utils.getFileNameAndExtension(fileName).extension
const isRemote = file.isRemote || false
const fileID = Utils.generateFileID(file)
const newFile = {
source: file.source || '',
id: fileID,
name: fileName,
extension: fileExtension || '',
meta: Object.assign({}, this.getState().meta, {
name: fileName,
type: fileType
}),
type: fileType,
data: file.data,
progress: {
percentage: 0,
bytesUploaded: 0,
bytesTotal: file.data.size || 0,
uploadComplete: false,
uploadStarted: false
},
size: file.data.size || 0,
isRemote: isRemote,
remote: file.remote || '',
preview: file.preview
}
const isFileAllowed = this._checkRestrictions(newFile)
if (!isFileAllowed) {
return Promise.reject(new Error('File not allowed'))
}
updatedFiles[fileID] = newFile
this.setState({files: updatedFiles})
this.emit('file-added', newFile)
this.log(`Added file: ${fileName}, ${fileID}, mime type: ${fileType}`)
if (this.opts.autoProceed && !this.scheduledAutoProceed) {
this.scheduledAutoProceed = setTimeout(() => {
this.scheduledAutoProceed = null
this.upload().catch((err) => {
console.error(err.stack || err.message || err)
})
}, 4)
}
})
})
}
removeFile (fileID) {
const { files, currentUploads } = this.state
const updatedFiles = Object.assign({}, files)
const removedFile = updatedFiles[fileID]
delete updatedFiles[fileID]
// Remove this file from its `currentUpload`.
const updatedUploads = Object.assign({}, currentUploads)
const removeUploads = []
Object.keys(updatedUploads).forEach((uploadID) => {
const newFileIDs = currentUploads[uploadID].fileIDs.filter((uploadFileID) => uploadFileID !== fileID)
// Remove the upload if no files are associated with it anymore.
if (newFileIDs.length === 0) {
removeUploads.push(uploadID)
return
}
updatedUploads[uploadID] = Object.assign({}, currentUploads[uploadID], {
fileIDs: newFileIDs
})
})
this.setState({
currentUploads: updatedUploads,
files: updatedFiles
})
removeUploads.forEach((uploadID) => {
this._removeUpload(uploadID)
})
this._calculateTotalProgress()
this.emit('file-removed', fileID)
// Clean up object URLs.
if (removedFile.preview && Utils.isObjectURL(removedFile.preview)) {
URL.revokeObjectURL(removedFile.preview)
}
this.log(`Removed file: ${fileID}`)
}
pauseResume (fileID) {
const updatedFiles = Object.assign({}, this.getState().files)
if (updatedFiles[fileID].uploadComplete) return
const wasPaused = updatedFiles[fileID].isPaused || false
const isPaused = !wasPaused
const updatedFile = Object.assign({}, updatedFiles[fileID], {
isPaused: isPaused
})
updatedFiles[fileID] = updatedFile
this.setState({files: updatedFiles})
this.emit('upload-pause', fileID, isPaused)
return isPaused
}
pauseAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: true
})
updatedFiles[file] = updatedFile
})
this.setState({files: updatedFiles})
this.emit('pause-all')
}
resumeAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({files: updatedFiles})
this.emit('resume-all')
}
retryAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const filesToRetry = Object.keys(updatedFiles).filter(file => {
return updatedFiles[file].error
})
filesToRetry.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({
files: updatedFiles,
error: null
})
this.emit('retry-all', filesToRetry)
const uploadID = this._createUpload(filesToRetry)
return this._runUpload(uploadID)
}
cancelAll () {
this.emit('cancel-all')
this.setState({ files: {}, totalProgress: 0 })
}
retryUpload (fileID) {
const updatedFiles = Object.assign({}, this.getState().files)
const updatedFile = Object.assign({}, updatedFiles[fileID],
{ error: null, isPaused: false }
)
updatedFiles[fileID] = updatedFile
this.setState({
files: updatedFiles
})
this.emit('upload-retry', fileID)
const uploadID = this._createUpload([ fileID ])
return this._runUpload(uploadID)
}
reset () {
this.cancelAll()
}
_calculateProgress (data) {
const fileID = data.id
// skip progress event for a file that’s been removed
if (!this.getFile(fileID)) {
this.log('Trying to set progress for a file that’s been removed: ', fileID)
return
}
this.setFileState(fileID, {
progress: Object.assign({}, this.getState().files[fileID].progress, {
bytesUploaded: data.bytesUploaded,
bytesTotal: data.bytesTotal,
percentage: Math.floor((data.bytesUploaded / data.bytesTotal * 100).toFixed(2))
})
})
this._calculateTotalProgress()
}
_calculateTotalProgress () {
// calculate total progress, using the number of files currently uploading,
// multiplied by 100 and the summ of individual progress of each file
const files = Object.assign({}, this.getState().files)
const inProgress = Object.keys(files).filter((file) => {
return files[file].progress.uploadStarted
})
const progressMax = inProgress.length * 100
let progressAll = 0
inProgress.forEach((file) => {
progressAll = progressAll + files[file].progress.percentage
})
const totalProgress = progressMax === 0 ? 0 : Math.floor((progressAll * 100 / progressMax).toFixed(2))
this.setState({
totalProgress: totalProgress
})
}
/**
* Registers listeners for all global actions, like:
* `error`, `file-removed`, `upload-progress`
*
*/
actions () {
// const log = this.log
// this.on('*', function (payload) {
// log(`[Core] Event: ${this.event}`)
// log(payload)
// })
// stress-test re-rendering
// setInterval(() => {
// this.setState({bla: 'bla'})
// }, 20)
this.on('error', (error) => {
this.setState({ error: error.message })
})
this.on('upload-error', (fileID, error) => {
this.setFileState(fileID, { error: error.message })
this.setState({ error: error.message })
const fileName = this.getState().files[fileID].name
let message = `Failed to upload ${fileName}`
if (typeof error === 'object' && error.message) {
message = { message: message, details: error.message }
}
this.info(message, 'error', 5000)
})
this.on('upload', () => {
this.setState({ error: null })
})
// this.on('file-add', (data) => {
// this.addFile(data)
// })
this.on('file-remove', (fileID) => {
this.removeFile(fileID)
})
this.on('upload-started', (fileID, upload) => {
const file = this.getFile(fileID)
this.setFileState(fileID, {
progress: Object.assign({}, file.progress, {
uploadStarted: Date.now(),
uploadComplete: false,
percentage: 0,
bytesUploaded: 0,
bytesTotal: file.size
})
})
})
// upload progress events can occur frequently, especially when you have a good
// connection to the remote server. Therefore, we are throtteling them to
// prevent accessive function calls.
// see also: https://github.com/tus/tus-js-client/commit/9940f27b2361fd7e10ba58b09b60d82422183bbb
const _throttledCalculateProgress = throttle(this._calculateProgress, 100, { leading: true, trailing: false })
this.on('upload-progress', _throttledCalculateProgress)
this.on('upload-success', (fileID, uploadResp, uploadURL) => {
this.setFileState(fileID, {
progress: Object.assign({}, this.getState().files[fileID].progress, {
uploadComplete: true,
percentage: 100
}),
uploadURL: uploadURL,
isPaused: false
})
this._calculateTotalProgress()
})
this.on('preprocess-progress', (fileID, progress) => {
this.setFileState(fileID, {
progress: Object.assign({}, this.getState().files[fileID].progress, {
preprocess: progress
})
})
})
this.on('preprocess-complete', (fileID) => {
const files = Object.assign({}, this.getState().files)
files[fileID] = Object.assign({}, files[fileID], {
progress: Object.assign({}, files[fileID].progress)
})
delete files[fileID].progress.preprocess
this.setState({ files: files })
})
this.on('postprocess-progress', (fileID, progress) => {
this.setFileState(fileID, {
progress: Object.assign({}, this.getState().files[fileID].progress, {
postprocess: progress
})
})
})
this.on('postprocess-complete', (fileID) => {
const files = Object.assign({}, this.getState().files)
files[fileID] = Object.assign({}, files[fileID], {
progress: Object.assign({}, files[fileID].progress)
})
delete files[fileID].progress.postprocess
// TODO should we set some kind of `fullyComplete` property on the file object
// so it's easier to see that the file is upload…fully complete…rather than
// what we have to do now (`uploadComplete && !postprocess`)
this.setState({ files: files })
})
this.on('restored', () => {
// Files may have changed--ensure progress is still accurate.
this._calculateTotalProgress()
})
// show informer if offline
if (typeof window !== 'undefined') {
window.addEventListener('online', () => this.updateOnlineStatus())
window.addEventListener('offline', () => this.updateOnlineStatus())
setTimeout(() => this.updateOnlineStatus(), 3000)
}
}
updateOnlineStatus () {
const online =
typeof window.navigator.onLine !== 'undefined'
? window.navigator.onLine
: true
if (!online) {
this.emit('is-offline')
this.info('No internet connection', 'error', 0)
this.wasOffline = true
} else {
this.emit('is-online')
if (this.wasOffline) {
this.emit('back-online')
this.info('Connected!', 'success', 3000)
this.wasOffline = false
}
}
}
getID () {
return this.opts.id
}
/**
* Registers a plugin with Core.
*
* @param {Class} Plugin object
* @param {Object} options object that will be passed to Plugin later
* @return {Object} self for chaining
*/
use (Plugin, opts) {
if (typeof Plugin !== 'function') {
let msg = `Expected a plugin class, but got ${Plugin === null ? 'null' : typeof Plugin}.` +
' Please verify that the plugin was imported and spelled correctly.'
throw new TypeError(msg)
}
// Instantiate
const plugin = new Plugin(this, opts)
const pluginId = plugin.id
this.plugins[plugin.type] = this.plugins[plugin.type] || []
if (!pluginId) {
throw new Error('Your plugin must have an id')
}
if (!plugin.type) {
throw new Error('Your plugin must have a type')
}
let existsPluginAlready = this.getPlugin(pluginId)
if (existsPluginAlready) {
let msg = `Already found a plugin named '${existsPluginAlready.id}'.
Tried to use: '${pluginId}'.
Uppy is currently limited to running one of every plugin.
Share your use case with us over at
https://github.com/transloadit/uppy/issues/
if you want us to reconsider.`
throw new Error(msg)
}
this.plugins[plugin.type].push(plugin)
plugin.install()
return this
}
/**
* Find one Plugin by name.
*
* @param string name description
*/
getPlugin (name) {
let foundPlugin = false
this.iteratePlugins((plugin) => {
const pluginName = plugin.id
if (pluginName === name) {
foundPlugin = plugin
return false
}
})
return foundPlugin
}
/**
* Iterate through all `use`d plugins.
*
* @param function method description
*/
iteratePlugins (method) {
Object.keys(this.plugins).forEach(pluginType => {
this.plugins[pluginType].forEach(method)
})
}
/**
* Uninstall and remove a plugin.
*
* @param {Plugin} instance The plugin instance to remove.
*/
removePlugin (instance) {
const list = this.plugins[instance.type]
if (instance.uninstall) {
instance.uninstall()
}
const index = list.indexOf(instance)
if (index !== -1) {
list.splice(index, 1)
}
}
/**
* Uninstall all plugins and close down this Uppy instance.
*/
close () {
this.reset()
this._storeUnsubscribe()
this.iteratePlugins((plugin) => {
plugin.uninstall()
})
}
/**
* Set info message in `state.info`, so that UI plugins like `Informer`
* can display the message.
*
* @param {string} msg Message to be displayed by the informer
*/
info (message, type = 'info', duration = 3000) {
const isComplexMessage = typeof message === 'object'
this.setState({
info: {
isHidden: false,
type: type,
message: isComplexMessage ? message.message : message,
details: isComplexMessage ? message.details : null
}
})
this.emit('info-visible')
window.clearTimeout(this.infoTimeoutID)
if (duration === 0) {
this.infoTimeoutID = undefined
return
}
// hide the informer after `duration` milliseconds
this.infoTimeoutID = setTimeout(this.hideInfo, duration)
}
hideInfo () {
const newInfo = Object.assign({}, this.getState().info, {
isHidden: true
})
this.setState({
info: newInfo
})
this.emit('info-hidden')
}
/**
* Logs stuff to console, only if `debug` is set to true. Silent in production.
*
* @param {String|Object} msg to log
* @param {String} type optional `error` or `warning`
*/
log (msg, type) {
if (!this.opts.debug) {
return
}
let message = `[Uppy] [${Utils.getTimeStamp()}] ${msg}`
global.uppyLog = global.uppyLog + '\n' + 'DEBUG LOG: ' + msg
if (type === 'error') {
console.error(message)
return
}
if (type === 'warning') {
console.warn(message)
return
}
if (msg === `${msg}`) {
console.log(message)
} else {
message = `[Uppy] [${Utils.getTimeStamp()}]`
console.log(message)
console.dir(msg)
}
}
/**
* Initializes actions.
*
*/
run () {
this.log('Core is run, initializing actions...')
this.actions()
return this
}
/**
* Restore an upload by its ID.
*/
restore (uploadID) {
this.log(`Core: attempting to restore upload "${uploadID}"`)
if (!this.getState().currentUploads[uploadID]) {
this._removeUpload(uploadID)
return Promise.reject(new Error('Nonexistent upload'))
}
return this._runUpload(uploadID)
}
/**
* Create an upload for a bunch of files.
*
* @param {Array<string>} fileIDs File IDs to include in this upload.
* @return {string} ID of this upload.
*/
_createUpload (fileIDs) {
const uploadID = cuid()
this.emit('upload', {
id: uploadID,
fileIDs: fileIDs
})
this.setState({
currentUploads: Object.assign({}, this.getState().currentUploads, {
[uploadID]: {
fileIDs: fileIDs,
step: 0,
result: {}
}
})
})
return uploadID
}
_getUpload (uploadID) {
return this.getState().currentUploads[uploadID]
}
/**
* Add data to an upload's result object.
*
* @param {string} uploadID The ID of the upload.
* @param {object} data Data properties to add to the result object.
*/
addResultData (uploadID, data) {
const currentUploads = this.getState().currentUploads
const currentUpload = Object.assign({}, currentUploads[uploadID], {
result: Object.assign({}, currentUploads[uploadID].result, data)
})
this.setState({
currentUploads: Object.assign({}, currentUploads, {
[uploadID]: currentUpload
})
})
}
/**
* Remove an upload, eg. if it has been canceled or completed.
*
* @param {string} uploadID The ID of the upload.
*/
_removeUpload (uploadID) {
const currentUploads = Object.assign({}, this.getState().currentUploads)
delete currentUploads[uploadID]
this.setState({
currentUploads: currentUploads
})
}
/**
* Run an upload. This picks up where it left off in case the upload is being restored.
*
* @private
*/
_runUpload (uploadID) {
const uploadData = this.getState().currentUploads[uploadID]
const fileIDs = uploadData.fileIDs
const restoreStep = uploadData.step
const steps = [
...this.preProcessors,
...this.uploaders,
...this.postProcessors
]
let lastStep = Promise.resolve()
steps.forEach((fn, step) => {
// Skip this step if we are restoring and have already completed this step before.
if (step < restoreStep) {
return
}
lastStep = lastStep.then(() => {
const { currentUploads } = this.getState()
const currentUpload = Object.assign({}, currentUploads[uploadID], {
step: step
})
this.setState({
currentUploads: Object.assign({}, currentUploads, {
[uploadID]: currentUpload
})
})
// TODO give this the `currentUpload` object as its only parameter maybe?
// Otherwise when more metadata may be added to the upload this would keep getting more parameters
return fn(fileIDs, uploadID)
}).then((result) => {
return null
})
})
// Not returning the `catch`ed promise, because we still want to return a rejected
// promise from this method if the upload failed.
lastStep.catch((err) => {
this.emit('error', err)
this._removeUpload(uploadID)
})
return lastStep.then(() => {
const files = fileIDs.map((fileID) => this.getFile(fileID))
const successful = files.filter((file) => file && !file.error)
const failed = files.filter((file) => file && file.error)
this.addResultData(uploadID, { successful, failed, uploadID })
const { currentUploads } = this.getState()
const result = currentUploads[uploadID].result
this.emit('complete', result)
// Compatibility with pre-0.21
this.emit('success', fileIDs)
this._removeUpload(uploadID)
return result
})
}
/**
* Start an upload for all the files that are not currently being uploaded.
*
* @return {Promise}
*/
upload () {
if (!this.plugins.uploader) {
this.log('No uploader type plugins are used', 'warning')
}
const isMinNumberOfFilesReached = this._checkMinNumberOfFiles()
if (!isMinNumberOfFilesReached) {
return Promise.reject(new Error('Minimum number of files has not been reached'))
}
const beforeUpload = Promise.resolve()
.then(() => this.opts.onBeforeUpload(this.getState().files))
return beforeUpload.catch((err) => {
const message = typeof err === 'object' ? err.message : err
this.info(message, 'error', 5000)
return Promise.reject(new Error(`onBeforeUpload: ${message}`))
}).then(() => {
const { currentUploads } = this.getState()
// get a list of files that are currently assigned to uploads
const currentlyUploadingFiles = Object.keys(currentUploads).reduce((prev, curr) => prev.concat(currentUploads[curr].fileIDs), [])
const waitingFileIDs = []
Object.keys(this.getState().files).forEach((fileID) => {
const file = this.getFile(fileID)
// if the file hasn't started uploading and hasn't already been assigned to an upload..
if ((!file.progress.uploadStarted) && (currentlyUploadingFiles.indexOf(fileID) === -1)) {
waitingFileIDs.push(file.id)
}
})
const uploadID = this._createUpload(waitingFileIDs)
return this._runUpload(uploadID)
})
}
}
module.exports = function (opts) {
return new Uppy(opts)
}
// Expose class constructor.
module.exports.Uppy = Uppy
| 1 | 10,523 | `on` likely still needs a bind, similar to `this.resetProgress = this.resetProgress.bind(this)` correct? | transloadit-uppy | js |
@@ -1,4 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
+import random
+
+import torch
+import torch.distributed as dist
+import torch.nn.functional as F
+from mmcv.runner import get_dist_info
+
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
| 1 | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 1 | 26,397 | Considering the trade-off between training speed and accuracy, multi-scale training is temporarily kept. More elegant implementation will be adopted in the future. | open-mmlab-mmdetection | py |
@@ -38,6 +38,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"context"
+
"github.com/cihub/seelog"
"github.com/pkg/errors"
) | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package engine contains the core logic for managing tasks
package engine
import (
"regexp"
"strconv"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/containermetadata"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/resources"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/utils"
utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"context"
"github.com/cihub/seelog"
"github.com/pkg/errors"
)
const (
//DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint
DockerEndpointEnvVariable = "DOCKER_HOST"
// DockerDefaultEndpoint is the default value for the Docker endpoint
DockerDefaultEndpoint = "unix:///var/run/docker.sock"
capabilityPrefix = "com.amazonaws.ecs.capability."
capabilityTaskIAMRole = "task-iam-role"
capabilityTaskIAMRoleNetHost = "task-iam-role-network-host"
capabilityTaskCPUMemLimit = "task-cpu-mem-limit"
attributePrefix = "ecs.capability."
labelPrefix = "com.amazonaws.ecs."
labelTaskARN = labelPrefix + "task-arn"
labelContainerName = labelPrefix + "container-name"
labelTaskDefinitionFamily = labelPrefix + "task-definition-family"
labelTaskDefinitionVersion = labelPrefix + "task-definition-version"
labelCluster = labelPrefix + "cluster"
)
// DockerTaskEngine is a state machine for managing a task and its containers
// in ECS.
//
// DockerTaskEngine implements an abstraction over the DockerGoClient so that
// it does not have to know about tasks, only containers
// The DockerTaskEngine interacts with Docker to implement a TaskEngine
type DockerTaskEngine struct {
// implements TaskEngine
cfg *config.Config
ctx context.Context
initialized bool
mustInitLock sync.Mutex
// state stores all tasks this task engine is aware of, including their
// current state and mappings to/from dockerId and name.
// This is used to checkpoint state to disk so tasks may survive agent
// failures or updates
state dockerstate.TaskEngineState
managedTasks map[string]*managedTask
taskStopGroup *utilsync.SequentialWaitGroup
events <-chan DockerContainerChangeEvent
stateChangeEvents chan statechange.Event
saver statemanager.Saver
client DockerClient
clientLock sync.Mutex
cniClient ecscni.CNIClient
containerChangeEventStream *eventstream.EventStream
stopEngine context.CancelFunc
// processTasks is a mutex that the task engine must acquire before changing
// any task's state which it manages. Since this is a lock that encompasses
// all tasks, it must not acquire it for any significant duration
// The write mutex should be taken when adding and removing tasks from managedTasks.
processTasks sync.RWMutex
enableConcurrentPull bool
credentialsManager credentials.Manager
_time ttime.Time
_timeOnce sync.Once
imageManager ImageManager
containerStatusToTransitionFunction map[api.ContainerStatus]transitionApplyFunc
metadataManager containermetadata.Manager
resource resources.Resource
}
// NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine.
// The distinction between created and initialized is that when created it may
// be serialized/deserialized, but it will not communicate with docker until it
// is also initialized.
func NewDockerTaskEngine(cfg *config.Config, client DockerClient,
credentialsManager credentials.Manager, containerChangeEventStream *eventstream.EventStream,
imageManager ImageManager, state dockerstate.TaskEngineState,
metadataManager containermetadata.Manager,
resource resources.Resource) *DockerTaskEngine {
dockerTaskEngine := &DockerTaskEngine{
cfg: cfg,
client: client,
saver: statemanager.NewNoopStateManager(),
state: state,
managedTasks: make(map[string]*managedTask),
taskStopGroup: utilsync.NewSequentialWaitGroup(),
stateChangeEvents: make(chan statechange.Event),
enableConcurrentPull: false,
credentialsManager: credentialsManager,
containerChangeEventStream: containerChangeEventStream,
imageManager: imageManager,
cniClient: ecscni.NewClient(&ecscni.Config{
PluginsPath: cfg.CNIPluginsPath,
MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion,
}),
metadataManager: metadataManager,
resource: resource,
}
dockerTaskEngine.initializeContainerStatusToTransitionFunction()
return dockerTaskEngine
}
func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() {
containerStatusToTransitionFunction := map[api.ContainerStatus]transitionApplyFunc{
api.ContainerPulled: engine.pullContainer,
api.ContainerCreated: engine.createContainer,
api.ContainerRunning: engine.startContainer,
api.ContainerResourcesProvisioned: engine.provisionContainerResources,
api.ContainerStopped: engine.stopContainer,
}
engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction
}
// ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1
// Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718)
// Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks.
var ImagePullDeleteLock sync.RWMutex
// UnmarshalJSON restores a previously marshaled task-engine state from json
func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error {
return engine.state.UnmarshalJSON(data)
}
// MarshalJSON marshals into state directly
func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) {
return engine.state.MarshalJSON()
}
// Init initializes a DockerTaskEngine such that it may communicate with docker
// and operate normally.
// This function must be called before any other function, except serializing and deserializing, can succeed without error.
func (engine *DockerTaskEngine) Init(ctx context.Context) error {
// TODO, pass in a a context from main from background so that other things can stop us, not just the tests
derivedCtx, cancel := context.WithCancel(ctx)
engine.stopEngine = cancel
engine.ctx = derivedCtx
// Determine whether the engine can perform concurrent "docker pull" based on docker version
engine.enableConcurrentPull = engine.isParallelPullCompatible()
// Open the event stream before we sync state so that e.g. if a container
// goes from running to stopped after we sync with it as "running" we still
// have the "went to stopped" event pending so we can be up to date.
err := engine.openEventstream(derivedCtx)
if err != nil {
return err
}
engine.synchronizeState()
// Now catch up and start processing new events per normal
go engine.handleDockerEvents(derivedCtx)
engine.initialized = true
return nil
}
// SetDockerClient provides a way to override the client used for communication with docker as a testing hook.
func (engine *DockerTaskEngine) SetDockerClient(client DockerClient) {
engine.clientLock.Lock()
engine.clientLock.Unlock()
engine.client = client
}
// MustInit blocks and retries until an engine can be initialized.
func (engine *DockerTaskEngine) MustInit(ctx context.Context) {
if engine.initialized {
return
}
engine.mustInitLock.Lock()
defer engine.mustInitLock.Unlock()
errorOnce := sync.Once{}
taskEngineConnectBackoff := utils.NewSimpleBackoff(200*time.Millisecond, 2*time.Second, 0.20, 1.5)
utils.RetryWithBackoff(taskEngineConnectBackoff, func() error {
if engine.initialized {
return nil
}
err := engine.Init(ctx)
if err != nil {
errorOnce.Do(func() {
seelog.Errorf("Task engine: could not connect to docker daemon: %v", err)
})
}
return err
})
}
// SetSaver sets the saver that is used by the DockerTaskEngine
func (engine *DockerTaskEngine) SetSaver(saver statemanager.Saver) {
engine.saver = saver
}
// Shutdown makes a best-effort attempt to cleanup after the task engine.
// This should not be relied on for anything more complicated than testing.
func (engine *DockerTaskEngine) Shutdown() {
engine.stopEngine()
engine.Disable()
}
// Disable prevents this engine from managing any additional tasks.
func (engine *DockerTaskEngine) Disable() {
engine.processTasks.Lock()
}
// synchronizeState explicitly goes through each docker container stored in
// "state" and updates its KnownStatus appropriately, as well as queueing up
// events to push upstream.
func (engine *DockerTaskEngine) synchronizeState() {
engine.processTasks.Lock()
defer engine.processTasks.Unlock()
imageStates := engine.state.AllImageStates()
if len(imageStates) != 0 {
engine.imageManager.AddAllImageStates(imageStates)
}
tasks := engine.state.AllTasks()
var tasksToStart []*api.Task
for _, task := range tasks {
conts, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
// task hasn't started processing, no need to check container status
tasksToStart = append(tasksToStart, task)
continue
}
for _, cont := range conts {
engine.synchronizeContainerStatus(cont, task)
}
tasksToStart = append(tasksToStart, task)
// Put tasks that are stopped by acs but hasn't been stopped in wait group
if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 {
engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1)
}
}
for _, task := range tasksToStart {
engine.startTask(task)
}
engine.saver.Save()
}
// updateContainerMetadata sets the container metadata from the docker inspect
func updateContainerMetadata(metadata *DockerContainerMetadata, container *api.Container, task *api.Task) {
container.SetCreatedAt(metadata.CreatedAt)
container.SetStartedAt(metadata.StartedAt)
container.SetFinishedAt(metadata.FinishedAt)
// Set the labels if it's not set
if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 {
container.SetLabels(metadata.Labels)
}
// Update Volume
if metadata.Volumes != nil {
task.UpdateMountPoints(container, metadata.Volumes)
}
// Set Exitcode if it's not set
if metadata.ExitCode != nil {
container.SetKnownExitCode(metadata.ExitCode)
}
// Set port mappings
if len(metadata.PortBindings) != 0 && len(container.KnownPortBindings) == 0 {
container.KnownPortBindings = metadata.PortBindings
}
// update the container health information
if container.HealthStatusShouldBeReported() {
container.SetHealthStatus(metadata.Health)
}
}
// synchronizeContainerStatus checks and updates the container status with docker
func (engine *DockerTaskEngine) synchronizeContainerStatus(container *api.DockerContainer, task *api.Task) {
if container.DockerID == "" {
seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s",
task.Arn, container.DockerName)
// Figure out the dockerid
describedContainer, err := engine.client.InspectContainer(container.DockerName, inspectContainerTimeout)
if err != nil {
seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v",
task.Arn, container.DockerName, err)
} else {
// update the container metadata in case the container was created during agent restart
metadata := metadataFromContainer(describedContainer)
updateContainerMetadata(&metadata, container.Container, task)
container.DockerID = describedContainer.ID
container.Container.SetKnownStatus(dockerStateToState(describedContainer.State))
// update mappings that need dockerid
engine.state.AddContainer(container, task)
engine.imageManager.RecordContainerReference(container.Container)
}
return
}
currentState, metadata := engine.client.DescribeContainer(container.DockerID)
if metadata.Error != nil {
currentState = api.ContainerStopped
// If this is a Docker API error
if metadata.Error.ErrorName() == cannotDescribeContainerError {
seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v",
task.Arn, container.DockerID, container.DockerName, metadata.Error)
if !container.Container.KnownTerminal() {
container.Container.ApplyingError = api.NewNamedError(&ContainerVanishedError{})
engine.imageManager.RemoveContainerReferenceFromImageState(container.Container)
}
} else {
// If this is a container state error
updateContainerMetadata(&metadata, container.Container, task)
container.Container.ApplyingError = api.NewNamedError(metadata.Error)
}
} else {
// update the container metadata in case the container status/metadata changed during agent restart
updateContainerMetadata(&metadata, container.Container, task)
engine.imageManager.RecordContainerReference(container.Container)
if engine.cfg.ContainerMetadataEnabled && !container.Container.IsMetadataFileUpdated() {
go engine.updateMetadataFile(task, container)
}
}
if currentState > container.Container.GetKnownStatus() {
// update the container known status
container.Container.SetKnownStatus(currentState)
}
// Update task ExecutionStoppedAt timestamp
task.RecordExecutionStoppedAt(container.Container)
}
// CheckTaskState inspects the state of all containers within a task and writes
// their state to the managed task's container channel.
func (engine *DockerTaskEngine) CheckTaskState(task *api.Task) {
taskContainers, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
seelog.Warnf("Task engine [%s]: could not check task state; no task in state", task.Arn)
return
}
for _, container := range task.Containers {
dockerContainer, ok := taskContainers[container.Name]
if !ok {
continue
}
status, metadata := engine.client.DescribeContainer(dockerContainer.DockerID)
engine.processTasks.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.processTasks.RUnlock()
if ok {
managedTask.dockerMessages <- dockerContainerChange{
container: container,
event: DockerContainerChangeEvent{
Status: status,
DockerContainerMetadata: metadata,
},
}
}
}
}
// sweepTask deletes all the containers associated with a task
func (engine *DockerTaskEngine) sweepTask(task *api.Task) {
for _, cont := range task.Containers {
err := engine.removeContainer(task, cont)
if err != nil {
seelog.Debugf("Task engine [%s]: unable to remove old container [%s]: %v",
task.Arn, cont.Name, err)
}
// Internal container(created by ecs-agent) state isn't recorded
if cont.IsInternal() {
continue
}
err = engine.imageManager.RemoveContainerReferenceFromImageState(cont)
if err != nil {
seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v",
task.Arn, cont.Name, err)
}
}
// Clean metadata directory for task
if engine.cfg.ContainerMetadataEnabled {
err := engine.metadataManager.Clean(task.Arn)
if err != nil {
seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err)
}
}
engine.saver.Save()
}
func (engine *DockerTaskEngine) deleteTask(task *api.Task, handleCleanupDone chan<- struct{}) {
if engine.cfg.TaskCPUMemLimit.Enabled() {
err := engine.resource.Cleanup(task)
if err != nil {
seelog.Warnf("Task engine [%s]: unable to cleanup platform resources: %v",
task.Arn, err)
}
}
// Now remove ourselves from the global state and cleanup channels
engine.processTasks.Lock()
engine.state.RemoveTask(task)
eni := task.GetTaskENI()
if eni == nil {
seelog.Debugf("Task engine [%s]: no eni associated with task", task.Arn)
} else {
seelog.Debugf("Task engine [%s]: removing the eni from agent state", task.Arn)
engine.state.RemoveENIAttachment(eni.MacAddress)
}
seelog.Debugf("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn)
delete(engine.managedTasks, task.Arn)
handleCleanupDone <- struct{}{}
engine.processTasks.Unlock()
engine.saver.Save()
}
func (engine *DockerTaskEngine) emitTaskEvent(task *api.Task, reason string) {
event, err := api.NewTaskStateChangeEvent(task, reason)
if err != nil {
seelog.Debugf("Task engine [%s]: unable to create task state change event: %v", task.Arn, err)
return
}
seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String())
engine.stateChangeEvents <- event
}
// startTask creates a managedTask construct to track the task and then begins
// pushing it towards its desired state when allowed startTask is protected by
// the processTasks lock of 'AddTask'. It should not be called from anywhere
// else and should exit quickly to allow AddTask to do more work.
func (engine *DockerTaskEngine) startTask(task *api.Task) {
// Create a channel that may be used to communicate with this task, survey
// what tasks need to be waited for for this one to start, and then spin off
// a goroutine to oversee this task
thisTask := engine.newManagedTask(task)
thisTask._time = engine.time()
go thisTask.overseeTask()
}
func (engine *DockerTaskEngine) time() ttime.Time {
engine._timeOnce.Do(func() {
if engine._time == nil {
engine._time = &ttime.DefaultTime{}
}
})
return engine._time
}
// openEventstream opens, but does not consume, the docker event stream
func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error {
events, err := engine.client.ContainerEvents(ctx)
if err != nil {
return err
}
engine.events = events
return nil
}
// handleDockerEvents must be called after openEventstream; it processes each
// event that it reads from the docker eventstream
func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case event := <-engine.events:
engine.handleDockerEvent(event)
}
}
}
// handleDockerEvent is the entrypoint for task modifications originating with
// events occurring through Docker, outside the task engine itself.
// handleDockerEvent is responsible for taking an event that correlates to a
// container and placing it in the context of the task to which that container
// belongs.
func (engine *DockerTaskEngine) handleDockerEvent(event DockerContainerChangeEvent) {
seelog.Debugf("Task engine: handling a docker event: %s", event.String())
task, ok := engine.state.TaskByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task",
event.DockerID)
return
}
cont, ok := engine.state.ContainerByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container",
event.DockerID)
return
}
// Container health status change doesnot affect the container status
// no need to process this in task manager
if event.Type == api.ContainerHealthEvent {
if cont.Container.HealthStatusShouldBeReported() {
seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v",
cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health)
cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health)
}
return
}
engine.processTasks.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
// hold the lock until the message is sent so we don't send on a closed channel
defer engine.processTasks.RUnlock()
if !ok {
seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s",
task.Arn, event.String())
return
}
seelog.Debugf("Task engine [%s]: writing docker event to the task: %s",
task.Arn, event.String())
managedTask.dockerMessages <- dockerContainerChange{container: cont.Container, event: event}
seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s",
task.Arn, event.String())
}
// StateChangeEvents returns channels to read task and container state changes. These
// changes should be read as soon as possible as them not being read will block
// processing the task referenced by the event.
func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event {
return engine.stateChangeEvents
}
// AddTask starts tracking a task
func (engine *DockerTaskEngine) AddTask(task *api.Task) error {
task.PostUnmarshalTask(engine.cfg, engine.credentialsManager)
engine.processTasks.Lock()
defer engine.processTasks.Unlock()
existingTask, exists := engine.state.TaskByArn(task.Arn)
if !exists {
// This will update the container desired status
task.UpdateDesiredStatus()
engine.state.AddTask(task)
if dependencygraph.ValidDependencies(task) {
engine.startTask(task)
} else {
seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn)
task.SetKnownStatus(api.TaskStopped)
task.SetDesiredStatus(api.TaskStopped)
err := TaskDependencyError{task.Arn}
engine.emitTaskEvent(task, err.Error())
}
return nil
}
// Update task
engine.updateTaskUnsafe(existingTask, task)
return nil
}
// ListTasks returns the tasks currently managed by the DockerTaskEngine
func (engine *DockerTaskEngine) ListTasks() ([]*api.Task, error) {
return engine.state.AllTasks(), nil
}
// GetTaskByArn returns the task identified by that ARN
func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*api.Task, bool) {
return engine.state.TaskByArn(arn)
}
func (engine *DockerTaskEngine) pullContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
switch container.Type {
case api.ContainerCNIPause:
// ContainerCNIPause image are managed at startup
return DockerContainerMetadata{}
case api.ContainerEmptyHostVolume:
// ContainerEmptyHostVolume image is either local (must be imported) or remote (must be pulled)
if emptyvolume.LocalImage {
return engine.client.ImportLocalEmptyVolumeImage()
}
}
// Record the pullStoppedAt timestamp
defer func() {
timestamp := engine.time().Now()
task.SetPullStoppedAt(timestamp)
}()
if engine.enableConcurrentPull {
seelog.Infof("Task engine [%s]: pulling container %s concurrently", task.Arn, container.Name)
return engine.concurrentPull(task, container)
}
seelog.Infof("Task engine [%s]: pulling container %s serially", task.Arn, container.Name)
return engine.serialPull(task, container)
}
func (engine *DockerTaskEngine) concurrentPull(task *api.Task, container *api.Container) DockerContainerMetadata {
seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image - %s",
task.Arn, container.Image)
ImagePullDeleteLock.RLock()
seelog.Debugf("Task engine [%s]: Acquired ImagePullDeleteLock, start pulling image - %s",
task.Arn, container.Image)
defer seelog.Debugf("Task engine [%s]: Released ImagePullDeleteLock after pulling image - %s",
task.Arn, container.Image)
defer ImagePullDeleteLock.RUnlock()
// Record the task pull_started_at timestamp
pullStart := engine.time().Now()
defer func(startTime time.Time) {
seelog.Infof("Task engine [%s]: Finished pulling container %s in %s",
task.Arn, container.Image, time.Since(startTime).String())
}(pullStart)
ok := task.SetPullStartedAt(pullStart)
if ok {
seelog.Infof("Task engine [%s]: Recording timestamp for starting image pulltime: %s",
task.Arn, pullStart)
}
return engine.pullAndUpdateContainerReference(task, container)
}
func (engine *DockerTaskEngine) serialPull(task *api.Task, container *api.Container) DockerContainerMetadata {
seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image - %s",
task.Arn, container.Image)
ImagePullDeleteLock.Lock()
seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image - %s",
task.Arn, container.Image)
defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image - %s",
task.Arn, container.Image)
defer ImagePullDeleteLock.Unlock()
pullStart := engine.time().Now()
defer func(startTime time.Time) {
seelog.Infof("Task engine [%s]: finished pulling image [%s] in %s",
task.Arn, container.Image, time.Since(startTime).String())
}(pullStart)
ok := task.SetPullStartedAt(pullStart)
if ok {
seelog.Infof("Task engine [%s]: recording timestamp for starting image pull: %s",
task.Arn, pullStart.String())
}
return engine.pullAndUpdateContainerReference(task, container)
}
func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *api.Task, container *api.Container) DockerContainerMetadata {
// If a task is blocked here for some time, and before it starts pulling image,
// the task's desired status is set to stopped, then don't pull the image
if task.GetDesiredStatus() == api.TaskStopped {
seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping container [%s] pull",
task.Arn, container.Name)
container.SetDesiredStatus(api.ContainerStopped)
return DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}}
}
// Set the credentials for pull from ECR if necessary
if container.ShouldPullWithExecutionRole() {
executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID())
if !ok {
seelog.Infof("Task engine [%s]: unable to acquire ECR credentials for container [%s]",
task.Arn, container.Name)
return DockerContainerMetadata{
Error: CannotPullECRContainerError{
fromError: errors.New("engine ecr credentials: not found"),
},
}
}
iamCredentials := executionCredentials.GetIAMRoleCredentials()
container.SetRegistryAuthCredentials(iamCredentials)
// Clean up the ECR pull credentials after pulling
defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{})
}
metadata := engine.client.PullImage(container.Image, container.RegistryAuthentication)
// Don't add internal images(created by ecs-agent) into imagemanger state
if container.IsInternal() {
return metadata
}
err := engine.imageManager.RecordContainerReference(container)
if err != nil {
seelog.Errorf("Task engine [%s]: Unable to add container reference to image state: %v",
task.Arn, err)
}
imageState := engine.imageManager.GetImageStateFromImageName(container.Image)
engine.state.AddImageState(imageState)
engine.saver.Save()
return metadata
}
func (engine *DockerTaskEngine) createContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name)
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
dockerContainerName := ""
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
containerMap = make(map[string]*api.DockerContainer)
} else {
// looking for container that has docker name but not created
for _, v := range containerMap {
if v.Container.Name == container.Name {
dockerContainerName = v.DockerName
break
}
}
}
// Resolve HostConfig
// we have to do this in create, not start, because docker no longer handles
// merging create config with start hostconfig the same; e.g. memory limits
// get lost
dockerClientVersion, versionErr := client.APIVersion()
if versionErr != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}}
}
hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion)
if hcerr != nil {
return DockerContainerMetadata{Error: api.NamedError(hcerr)}
}
if container.AWSLogAuthExecutionRole() {
err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager)
if err != nil {
return DockerContainerMetadata{Error: api.NamedError(err)}
}
}
config, err := task.DockerConfig(container, dockerClientVersion)
if err != nil {
return DockerContainerMetadata{Error: api.NamedError(err)}
}
// Augment labels with some metadata from the agent. Explicitly do this last
// such that it will always override duplicates in the provided raw config
// data.
config.Labels[labelTaskARN] = task.Arn
config.Labels[labelContainerName] = container.Name
config.Labels[labelTaskDefinitionFamily] = task.Family
config.Labels[labelTaskDefinitionVersion] = task.Version
config.Labels[labelCluster] = engine.cfg.Cluster
if dockerContainerName == "" {
// only alphanumeric and hyphen characters are allowed
reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+")
name := reInvalidChars.ReplaceAllString(container.Name, "")
dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()
// Pre-add the container in case we stop before the next, more useful,
// AddContainer call. This ensures we have a way to get the container if
// we die before 'createContainer' returns because we can inspect by
// name
engine.state.AddContainer(&api.DockerContainer{
DockerName: dockerContainerName,
Container: container,
}, task)
seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s",
task.Arn, container.Name, dockerContainerName)
engine.saver.ForceSave()
}
// Create metadata directory and file then populate it with common metadata of all containers of this task
// Afterwards add this directory to the container's mounts if file creation was successful
if engine.cfg.ContainerMetadataEnabled && !container.IsInternal() {
mderr := engine.metadataManager.Create(config, hostConfig, task.Arn, container.Name)
if mderr != nil {
seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v",
task.Arn, container.Name, mderr)
}
}
metadata := client.CreateContainer(config, hostConfig, dockerContainerName, createContainerTimeout)
if metadata.DockerID != "" {
engine.state.AddContainer(&api.DockerContainer{DockerID: metadata.DockerID,
DockerName: dockerContainerName,
Container: container}, task)
}
container.SetLabels(config.Labels)
seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s",
task.Arn, container.Name, metadata.DockerID)
return metadata
}
func (engine *DockerTaskEngine) startContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
seelog.Infof("Task engine [%s]: starting container: %s", task.Arn, container.Name)
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return DockerContainerMetadata{
Error: CannotStartContainerError{
fromError: errors.Errorf("Container belongs to unrecognized task %s", task.Arn),
},
}
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return DockerContainerMetadata{
Error: CannotStartContainerError{
fromError: errors.Errorf("Container not recorded as created"),
},
}
}
dockerContainerMD := client.StartContainer(dockerContainer.DockerID, startContainerTimeout)
// Get metadata through container inspection and available task information then write this to the metadata file
// Performs this in the background to avoid delaying container start
// TODO: Add a state to the api.Container for the status of the metadata file (Whether it needs update) and
// add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted
if dockerContainerMD.Error == nil &&
engine.cfg.ContainerMetadataEnabled &&
!container.IsInternal() {
go func() {
err := engine.metadataManager.Update(dockerContainer.DockerID, task.Arn, container.Name)
if err != nil {
seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, container.Name, err)
return
}
container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, container.Name)
}()
}
return dockerContainerMD
}
func (engine *DockerTaskEngine) provisionContainerResources(task *api.Task, container *api.Container) DockerContainerMetadata {
seelog.Infof("Task engine [%s]: setting up container resources for container [%s]",
task.Arn, container.Name)
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, container)
if err != nil {
return DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrap(err,
"container resource provisioning: unable to build cni configuration"),
},
}
}
// Invoke the libcni to config the network namespace for the container
result, err := engine.cniClient.SetupNS(cniConfig)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v",
task.Arn, err)
return DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
Error: ContainerNetworkingError{errors.Wrap(err,
"container resource provisioning: failed to setup network namespace")},
}
}
taskIP := result.IPs[0].Address.IP.String()
seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP)
engine.state.AddTaskIPAddress(taskIP, task.Arn)
return DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
}
}
// cleanupPauseContainerNetwork will clean up the network namespace of pause container
func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *api.Task, container *api.Container) error {
seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn)
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, container)
if err != nil {
return errors.Wrapf(err,
"engine: failed cleanup task network namespace, task: %s", task.String())
}
return engine.cniClient.CleanupNS(cniConfig)
}
func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer(task *api.Task, container *api.Container) (*ecscni.Config, error) {
cfg, err := task.BuildCNIConfig()
if err != nil {
return nil, errors.Wrapf(err, "engine: build cni configuration from task failed")
}
if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 {
cfg.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address
}
if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 {
cfg.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes
}
// Get the pid of container
containers, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return nil, errors.New("engine: failed to find the pause container, no containers in the task")
}
pauseContainer, ok := containers[container.Name]
if !ok {
return nil, errors.New("engine: failed to find the pause container")
}
containerInspectOutput, err := engine.client.InspectContainer(pauseContainer.DockerName, inspectContainerTimeout)
if err != nil {
return nil, err
}
cfg.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid)
cfg.ContainerID = containerInspectOutput.ID
cfg.BlockInstanceMetdata = engine.cfg.AWSVPCBlockInstanceMetdata
return cfg, nil
}
func (engine *DockerTaskEngine) stopContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name)
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return DockerContainerMetadata{
Error: CannotStopContainerError{
fromError: errors.Errorf("Container belongs to unrecognized task %s", task.Arn),
},
}
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return DockerContainerMetadata{
Error: CannotStopContainerError{errors.Errorf("Container not recorded as created")},
}
}
// Cleanup the pause container network namespace before stop the container
if container.Type == api.ContainerCNIPause {
err := engine.cleanupPauseContainerNetwork(task, container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v",
task.Arn, err)
}
seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn)
}
return engine.client.StopContainer(dockerContainer.DockerID, stopContainerTimeout)
}
func (engine *DockerTaskEngine) removeContainer(task *api.Task, container *api.Container) error {
seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name)
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return errors.New("No such task: " + task.Arn)
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return errors.New("No container named '" + container.Name + "' created in " + task.Arn)
}
return engine.client.RemoveContainer(dockerContainer.DockerName, removeContainerTimeout)
}
// updateTaskUnsafe determines if a new transition needs to be applied to the
// referenced task, and if needed applies it. It should not be called anywhere
// but from 'AddTask' and is protected by the processTasks lock there.
func (engine *DockerTaskEngine) updateTaskUnsafe(task *api.Task, update *api.Task) {
managedTask, ok := engine.managedTasks[task.Arn]
if !ok {
seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.",
task.Arn)
return
}
// Keep the lock because sequence numbers cannot be correct unless they are
// also read in the order addtask was called
// This does block the engine's ability to ingest any new events (including
// stops for past tasks, ack!), but this is necessary for correctness
updateDesiredStatus := update.GetDesiredStatus()
seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
transition := acsTransition{desiredStatus: updateDesiredStatus}
transition.seqnum = update.StopSequenceNumber
managedTask.acsMessages <- transition
seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
}
// transitionContainer calls applyContainerState, and then notifies the managed
// task of the change. transitionContainer is called by progressContainers and
// by handleStoppedToRunningContainerTransition.
func (engine *DockerTaskEngine) transitionContainer(task *api.Task, container *api.Container, to api.ContainerStatus) {
// Let docker events operate async so that we can continue to handle ACS / other requests
// This is safe because 'applyContainerState' will not mutate the task
metadata := engine.applyContainerState(task, container, to)
engine.processTasks.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
if ok {
managedTask.dockerMessages <- dockerContainerChange{
container: container,
event: DockerContainerChangeEvent{
Status: to,
DockerContainerMetadata: metadata,
},
}
}
engine.processTasks.RUnlock()
}
// applyContainerState moves the container to the given state by calling the
// function defined in the transitionFunctionMap for the state
func (engine *DockerTaskEngine) applyContainerState(task *api.Task, container *api.Container, nextState api.ContainerStatus) DockerContainerMetadata {
transitionFunction, ok := engine.transitionFunctionMap()[nextState]
if !ok {
seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s",
task.Arn, container.Name, nextState.String())
return DockerContainerMetadata{Error: &impossibleTransitionError{nextState}}
}
metadata := transitionFunction(task, container)
if metadata.Error != nil {
seelog.Infof("Task engine [%s]: error transitioning container [%s] to [%s]: %v",
task.Arn, container.Name, nextState.String(), metadata.Error)
} else {
seelog.Debugf("Task engine [%s]: transitioned container [%s] to [%s]",
task.Arn, container.Name, nextState.String())
engine.saver.Save()
}
return metadata
}
// transitionFunctionMap provides the logic for the simple state machine of the
// DockerTaskEngine. Each desired state maps to a function that can be called
// to try and move the task to that desired state.
func (engine *DockerTaskEngine) transitionFunctionMap() map[api.ContainerStatus]transitionApplyFunc {
return engine.containerStatusToTransitionFunction
}
type transitionApplyFunc (func(*api.Task, *api.Container) DockerContainerMetadata)
// State is a function primarily meant for testing usage; it is explicitly not
// part of the TaskEngine interface and should not be relied upon.
// It returns an internal representation of the state of this DockerTaskEngine.
func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState {
return engine.state
}
// Version returns the underlying docker version.
func (engine *DockerTaskEngine) Version() (string, error) {
return engine.client.Version()
}
// isParallelPullCompatible checks the docker version and return true if docker version >= 1.11.1
func (engine *DockerTaskEngine) isParallelPullCompatible() bool {
version, err := engine.Version()
if err != nil {
seelog.Warnf("Task engine: failed to get docker version: %v", err)
return false
}
match, err := utils.Version(version).Matches(">=1.11.1")
if err != nil {
seelog.Warnf("Task engine: Could not compare docker version: %v", err)
return false
}
if match {
seelog.Debugf("Task engine: Found Docker version [%s]. Enabling concurrent pull", version)
return true
}
return false
}
func (engine *DockerTaskEngine) updateMetadataFile(task *api.Task, cont *api.DockerContainer) {
err := engine.metadataManager.Update(cont.DockerID, task.Arn, cont.Container.Name)
if err != nil {
seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, cont.Container.Name, err)
} else {
cont.Container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, cont.Container.Name)
}
}
| 1 | 19,051 | Can you move this up along with the other builtin libraries? | aws-amazon-ecs-agent | go |
@@ -633,6 +633,12 @@ class GeneralSettingsPanel(SettingsPanel):
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
+ # Translators: The label of a checkbox in general settings to toggle allowing of usage stats gathering
+ item=self.allowUsageStatsCheckBox=wx.CheckBox(self,label=_("Allow the NVDA project to gather NVDA usage statistics"))
+ item.Value=config.conf["update"]["allowUsageStats"]
+ if globalVars.appArgs.secure:
+ item.Disable()
+ settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle startup notifications
# for a pending NVDA update.
item=self.notifyForPendingUpdateCheckBox=wx.CheckBox(self,label=_("Notify for &pending update on startup")) | 1 | # -*- coding: UTF-8 -*-
#settingsDialogs.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Rui Batista, Joseph Lee, Heiko Folkerts, Zahari Yurukov, Leonard de Ruijter, Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import glob
import os
import copy
import re
import wx
from wx.lib import scrolledpanel
from wx.lib.expando import ExpandoTextCtrl
import wx.lib.newevent
import winUser
import logHandler
import installer
from synthDriverHandler import *
import config
import languageHandler
import speech
import gui
from gui import nvdaControls
import globalVars
from logHandler import log
import nvwave
import audioDucking
import speechDictHandler
import appModuleHandler
import queueHandler
import braille
import brailleTables
import brailleInput
import core
import keyboardHandler
import characterProcessing
import guiHelper
try:
import updateCheck
except RuntimeError:
updateCheck = None
import inputCore
import nvdaControls
import touchHandler
import winVersion
import weakref
import time
class SettingsDialog(wx.Dialog):
"""A settings dialog.
A settings dialog consists of one or more settings controls and OK and Cancel buttons and an optional Apply button.
Action may be taken in response to the OK, Cancel or Apply buttons.
To use this dialog:
* Set L{title} to the title of the dialog.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, override L{postInit} to perform actions after the dialog is created, such as setting the focus. Be
aware that L{postInit} is also called by L{onApply}.
* Optionally, extend one or more of L{onOk}, L{onCancel} or L{onApply} to perform actions in response to the
OK, Cancel or Apply buttons, respectively.
@ivar title: The title of the dialog.
@type title: str
"""
class MultiInstanceError(RuntimeError): pass
_instances=weakref.WeakSet()
title = ""
shouldSuspendConfigProfileTriggers = True
def __new__(cls, *args, **kwargs):
if next((dlg for dlg in SettingsDialog._instances if isinstance(dlg,cls)),None) or (
SettingsDialog._instances and not kwargs.get('multiInstanceAllowed',False)
):
raise SettingsDialog.MultiInstanceError("Only one instance of SettingsDialog can exist at a time")
pass
obj = super(SettingsDialog, cls).__new__(cls, *args, **kwargs)
SettingsDialog._instances.add(obj)
return obj
def __init__(self, parent,
resizeable=False,
hasApplyButton=False,
settingsSizerOrientation=wx.VERTICAL,
multiInstanceAllowed=False):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param resizeable: True if the settings dialog should be resizable by the user, only set this if
you have tested that the components resize correctly.
@type resizeable: bool
@param hasApplyButton: C{True} to add an apply button to the dialog; defaults to C{False} for backwards compatibility.
@type hasApplyButton: bool
@param settingsSizerOrientation: Either wx.VERTICAL or wx.HORIZONTAL. This controls the orientation of the
sizer that is passed into L{makeSettings}. The default is wx.VERTICAL.
@type settingsSizerOrientation: wx.Orientation
@param multiInstanceAllowed: Whether multiple instances of SettingsDialog may exist.
Note that still only one instance of a particular SettingsDialog subclass may exist at one time.
@type multiInstanceAllowed: bool
"""
if gui._isDebug():
startTime = time.time()
windowStyle = wx.DEFAULT_DIALOG_STYLE | (wx.RESIZE_BORDER if resizeable else 0)
super(SettingsDialog, self).__init__(parent, title=self.title, style=windowStyle)
# the wx.Window must be constructed before we can get the handle.
import windowUtils
self.scaleFactor = windowUtils.getWindowScalingFactor(self.GetHandle())
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(settingsSizerOrientation)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL | wx.EXPAND, proportion=1)
self.mainSizer.Add(wx.StaticLine(self), flag=wx.EXPAND)
buttonFlags = wx.OK|wx.CANCEL|(wx.APPLY if hasApplyButton else 0)
self.mainSizer.Add(
self.CreateButtonSizer(flags=buttonFlags),
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.ALL|wx.ALIGN_RIGHT
)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
self.Bind(wx.EVT_BUTTON,self.onOk,id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON,self.onCancel,id=wx.ID_CANCEL)
self.Bind(wx.EVT_BUTTON,self.onApply,id=wx.ID_APPLY)
self.postInit()
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
def makeSettings(self, sizer):
"""Populate the dialog with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def postInit(self):
"""Called after the dialog has been created.
For example, this might be used to set focus to the desired control.
Sub-classes may override this method.
"""
def onOk(self, evt):
"""Take action in response to the OK button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_OK)
def onCancel(self, evt):
"""Take action in response to the Cancel button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_CANCEL)
def onApply(self, evt):
"""Take action in response to the Apply button being pressed.
Sub-classes may extend or override this method.
This base method should be called to run the postInit method.
"""
self.postInit()
self.SetReturnCode(wx.ID_APPLY)
def scaleSize(self, size):
"""Helper method to scale a size using the logical DPI
@param size: The size (x,y) as a tuple or a single numerical type to scale
@returns: The scaled size, returned as the same type"""
if isinstance(size, tuple):
return (self.scaleFactor * size[0], self.scaleFactor * size[1])
return self.scaleFactor * size
# An event and event binder that will notify the containers that they should
# redo the layout in whatever way makes sense for their particular content.
_RWLayoutNeededEvent, EVT_RW_LAYOUT_NEEDED = wx.lib.newevent.NewCommandEvent()
class SettingsPanel(wx.Panel):
"""A settings panel, to be used in a multi category settings dialog.
A settings panel consists of one or more settings controls.
Action may be taken in response to the parent dialog's OK or Cancel buttons.
To use this panel:
* Set L{title} to the title of the category.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, extend L{onPanelActivated} to perform actions after the category has been selected in the list of categories, such as synthesizer or braille display list population.
* Optionally, extend L{onPanelDeactivated} to perform actions after the category has been deselected (i.e. another category is selected) in the list of categories.
* Optionally, extend one or both of L{onSave} or L{onDiscard} to perform actions in response to the parent dialog's OK or Cancel buttons, respectively.
@ivar title: The title of the settings panel, also listed in the list of settings categories.
@type title: str
"""
title=""
def __init__(self, parent):
"""
@param parent: The parent for this panel; C{None} for no parent.
@type parent: wx.Window
"""
if gui._isDebug():
startTime = time.time()
super(SettingsPanel, self).__init__(parent, wx.ID_ANY)
# the wx.Window must be constructed before we can get the handle.
import windowUtils
self.scaleFactor = windowUtils.getWindowScalingFactor(self.GetHandle())
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(wx.VERTICAL)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, flag=wx.ALL)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
def makeSettings(self, sizer):
"""Populate the panel with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def onPanelActivated(self):
"""Called after the panel has been activated (i.e. de corresponding category is selected in the list of categories).
For example, this might be used for resource intensive tasks.
Sub-classes should extendthis method.
"""
self.Show()
def onPanelDeactivated(self):
"""Called after the panel has been deactivated (i.e. another category has been selected in the list of categories).
Sub-classes should extendthis method.
"""
self.Hide()
def onSave(self):
"""Take action in response to the parent's dialog OK or apply button being pressed.
Sub-classes should override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when OK is pressed.
"""
raise NotImplementedError
def onDiscard(self):
"""Take action in response to the parent's dialog Cancel button being pressed.
Sub-classes should override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when Cancel is pressed.
"""
def _sendLayoutUpdatedEvent(self):
"""Notify any wx parents that may be listening that they should redo their layout in whatever way
makes sense for them. It is expected that sub-classes call this method in response to changes in
the number of GUI items in their panel.
"""
event = _RWLayoutNeededEvent(self.GetId())
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
def scaleSize(self, size):
"""Helper method to scale a size using the logical DPI
@param size: The size (x,y) as a tuple or a single numerical type to scale
@returns: The scaled size, returned as the same type"""
if isinstance(size, tuple):
return (self.scaleFactor * size[0], self.scaleFactor * size[1])
return self.scaleFactor * size
class MultiCategorySettingsDialog(SettingsDialog):
"""A settings dialog with multiple settings categories.
A multi category settings dialog consists of a list view with settings categories on the left side,
and a settings panel on the right side of the dialog.
Furthermore, in addition to Ok and Cancel buttons, it has an Apply button by default,
which is different from the default behavior of L{SettingsDialog}.
To use this dialog: set title and populate L{categoryClasses} with subclasses of SettingsPanel.
Make sure that L{categoryClasses} only contains panels that are available on a particular system.
For example, if a certain category of settings is only supported on Windows 10 and higher,
that category should be left out of L{categoryClasses}
"""
title=""
categoryClasses=[]
class CategoryUnavailableError(RuntimeError): pass
def __init__(self, parent, initialCategory=None):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param initialCategory: The initial category to select when opening this dialog
@type parent: SettingsPanel
"""
if initialCategory and not issubclass(initialCategory,SettingsPanel):
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise TypeError("initialCategory should be an instance of SettingsPanel")
if initialCategory and initialCategory not in self.categoryClasses:
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise MultiCategorySettingsDialog.CategoryUnavailableError(
"The provided initial category is not a part of this dialog"
)
self.initialCategory = initialCategory
self.currentCategory = None
self.setPostInitFocus = None
# dictionary key is index of category in self.catList, value is the instance. Partially filled, check for KeyError
self.catIdToInstanceMap = {}
super(MultiCategorySettingsDialog, self).__init__(
parent,
resizeable=True,
hasApplyButton=True,
settingsSizerOrientation=wx.HORIZONTAL
)
# setting the size must be done after the parent is constructed.
self.SetMinSize(self.scaleSize(self.MIN_SIZE))
self.SetSize(self.scaleSize(self.INITIAL_SIZE))
# the size has changed, so recenter on the screen
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
# Initial / min size for the dialog. This size was chosen as a medium fit, so the
# smaller settings panels are not surrounded by too much space but most of
# the panels fit. Vertical scrolling is acceptable. Horizontal scrolling less
# so, the width was chosen to eliminate horizontal scroll bars. If a panel
# exceeds the the initial width a debugWarning will be added to the log.
INITIAL_SIZE = (800, 480)
MIN_SIZE = (470, 240) # Min height required to show the OK, Cancel, Apply buttons
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.sHelper = sHelper
# Translators: The label for the list of categories in a multi category settings dialog.
categoriesLabelText=_("&Categories:")
categoriesLabel = wx.StaticText(self, label=categoriesLabelText)
# since the categories list and the container both expand in height, the y
# portion is essentially a "min" height.
# These sizes are set manually so that the initial proportions within the dialog look correct. If these sizes are
# not given, then I believe the proportion arguments (as given to the gridBagSizer.AddGrowableColumn) are used
# to set their relative sizes. We want the proportion argument to be used for resizing, but not the initial size.
catListDim = (150, 10)
catListDim = self.scaleSize(catListDim)
initialScaledWidth = self.scaleSize(self.INITIAL_SIZE[0])
spaceForBorderWidth = self.scaleSize(20)
catListWidth = catListDim[0]
containerDim = (initialScaledWidth - catListWidth - spaceForBorderWidth, self.scaleSize(10))
self.catListCtrl = nvdaControls.AutoWidthColumnListCtrl(
self,
autoSizeColumnIndex=0,
size=catListDim,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_NO_HEADER
)
# This list consists of only one column.
# The provided column header is just a placeholder, as it is hidden due to the wx.LC_NO_HEADER style flag.
self.catListCtrl.InsertColumn(0,categoriesLabelText)
# Put the settings panel in a scrolledPanel, we don't know how large the settings panels might grow. If they exceed
# the maximum size, its important all items can be accessed visually.
# Save the ID for the panel, this panel will have its name changed when the categories are changed. This name is
# exposed via the IAccessibleName property.
global NvdaSettingsCategoryPanelId
NvdaSettingsCategoryPanelId = wx.NewId()
self.container = scrolledpanel.ScrolledPanel(
parent = self,
id = NvdaSettingsCategoryPanelId,
style = wx.TAB_TRAVERSAL | wx.BORDER_THEME,
size=containerDim
)
# Th min size is reset so that they can be reduced to below their "size" constraint.
self.container.SetMinSize((1,1))
self.catListCtrl.SetMinSize((1,1))
self.containerSizer = wx.BoxSizer(wx.VERTICAL)
self.container.SetSizer(self.containerSizer)
for cls in self.categoryClasses:
if not issubclass(cls,SettingsPanel):
raise RuntimeError("Invalid category class %s provided in %s.categoryClasses"%(cls.__name__,self.__class__.__name__))
# It's important here that the listItems are added to catListCtrl in the same order that they exist in categoryClasses.
# the ListItem index / Id is used to index categoryClasses, and used as the key in catIdToInstanceMap
self.catListCtrl.Append((cls.title,))
# populate the GUI with the initial category
initialCatIndex = 0 if not self.initialCategory else self.categoryClasses.index(self.initialCategory)
self._doCategoryChange(initialCatIndex)
self.catListCtrl.Select(initialCatIndex)
# we must focus the initial category in the category list.
self.catListCtrl.Focus(initialCatIndex)
self.setPostInitFocus = self.container.SetFocus if self.initialCategory else self.catListCtrl.SetFocus
self.gridBagSizer=gridBagSizer=wx.GridBagSizer(
hgap=guiHelper.SPACE_BETWEEN_BUTTONS_HORIZONTAL,
vgap=guiHelper.SPACE_BETWEEN_BUTTONS_VERTICAL
)
# add the label, the categories list, and the settings panel to a 2 by 2 grid.
# The label should span two columns, so that the start of the categories list
# and the start of the settings panel are at the same vertical position.
gridBagSizer.Add(categoriesLabel, pos=(0,0), span=(1,2))
gridBagSizer.Add(self.catListCtrl, pos=(1,0), flag=wx.EXPAND)
gridBagSizer.Add(self.container, pos=(1,1), flag=wx.EXPAND)
# Make the row with the listCtrl and settings panel grow vertically.
gridBagSizer.AddGrowableRow(1)
# Make the columns with the listCtrl and settings panel grow horizontally if the dialog is resized.
# They should grow 1:3, since the settings panel is much more important, and already wider
# than the listCtrl.
gridBagSizer.AddGrowableCol(0, proportion=1)
gridBagSizer.AddGrowableCol(1, proportion=3)
sHelper.sizer.Add(gridBagSizer, flag=wx.EXPAND, proportion=1)
self.container.Layout()
self.catListCtrl.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onCategoryChange)
self.Bind(wx.EVT_CHAR_HOOK, self.onCharHook)
self.Bind(EVT_RW_LAYOUT_NEEDED, self._onPanelLayoutChanged)
def _getCategoryPanel(self, catId):
panel = self.catIdToInstanceMap.get(catId, None)
if not panel:
try:
cls = self.categoryClasses[catId]
except IndexError:
raise ValueError("Unable to create panel for unknown category ID: {}".format(catId))
panel = cls(parent=self.container)
panel.Hide()
self.containerSizer.Add(panel, flag=wx.ALL, border=guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL)
self.catIdToInstanceMap[catId] = panel
panelWidth = panel.Size[0]
availableWidth = self.containerSizer.GetSize()[0]
if panelWidth > availableWidth and gui._isDebug():
log.debugWarning(
("Panel width ({1}) too large for: {0} Try to reduce the width of this panel, or increase width of " +
"MultiCategorySettingsDialog.MIN_SIZE"
).format(cls, panel.Size[0])
)
return panel
def postInit(self):
# By default after the dialog is created, focus lands on the button group for wx.Dialogs. However this is not where
# we want focus. We only want to modify focus after creation (makeSettings), but postInit is also called after
# onApply, so we reset the setPostInitFocus function.
if self.setPostInitFocus:
self.setPostInitFocus()
self.setPostInitFocus = None
else:
# when postInit is called without a setPostInitFocus ie because onApply was called
# then set the focus to the listCtrl. This is a good starting point for a "fresh state"
self.catListCtrl.SetFocus()
def onCharHook(self,evt):
"""Listens for keyboard input and switches panels for control+tab"""
if not self.catListCtrl:
# Dialog has not yet been constructed.
# Allow another handler to take the event, and return early.
evt.Skip()
return
key = evt.GetKeyCode()
listHadFocus = self.catListCtrl.HasFocus()
if evt.ControlDown() and key==wx.WXK_TAB:
# Focus the categories list. If we don't, the panel won't hide correctly
if not listHadFocus:
self.catListCtrl.SetFocus()
index = self.catListCtrl.GetFirstSelected()
newIndex=index-1 if evt.ShiftDown() else index+1
# Less than first wraps to the last index, greater than last wraps to first index.
newIndex=newIndex % self.catListCtrl.ItemCount
self.catListCtrl.Select(newIndex)
# we must focus the new selection in the category list to trigger the change of category.
self.catListCtrl.Focus(newIndex)
if not listHadFocus and self.currentCategory:
self.currentCategory.SetFocus()
elif listHadFocus and key == wx.WXK_RETURN:
# The list control captures the return key, but we want it to save the settings.
self.onOk(evt)
else:
evt.Skip()
def _onPanelLayoutChanged(self,evt):
# call layout and SetupScrolling on the container so that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
# when child elements get smaller the scrolledPanel does not
# erase the old contents and must be redrawn
self.container.Refresh()
def _doCategoryChange(self, newCatId):
oldCat = self.currentCategory
# Freeze and Thaw are called to stop visual artifact's while the GUI
# is being rebuilt. Without this, the controls can sometimes be seen being
# added.
self.container.Freeze()
try:
newCat = self._getCategoryPanel(newCatId)
except ValueError as e:
newCatTitle = self.catListCtrl.GetItemText(newCatId)
log.error("Unable to change to category: {}".format(newCatTitle), exc_info=e)
return
if oldCat:
oldCat.onPanelDeactivated()
self.currentCategory = newCat
newCat.onPanelActivated()
# call Layout and SetupScrolling on the container to make sure that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
# Set the label for the container, this is exposed via the Name property on an NVDAObject.
# For one or another reason, doing this before SetupScrolling causes this to be ignored by NVDA in some cases.
# Translators: This is the label for a category within the settings dialog. It is announced when the user presses `ctl+tab` or `ctrl+shift+tab` while focus is on a control withing the NVDA settings dialog. The %s will be replaced with the name of the panel (eg: General, Speech, Braille, etc)
self.container.SetLabel(_("%s Settings Category")%newCat.title)
self.container.Thaw()
def onCategoryChange(self, evt):
currentCat = self.currentCategory
newIndex = evt.GetIndex()
if not currentCat or newIndex != self.categoryClasses.index(currentCat.__class__):
self._doCategoryChange(newIndex)
else:
evt.Skip()
def onOk(self,evt):
for panel in self.catIdToInstanceMap.itervalues():
panel.onSave()
panel.Destroy()
super(MultiCategorySettingsDialog,self).onOk(evt)
def onCancel(self,evt):
for panel in self.catIdToInstanceMap.itervalues():
panel.onDiscard()
panel.Destroy()
super(MultiCategorySettingsDialog,self).onCancel(evt)
def onApply(self,evt):
for panel in self.catIdToInstanceMap.itervalues():
panel.onSave()
super(MultiCategorySettingsDialog,self).onApply(evt)
class GeneralSettingsPanel(SettingsPanel):
# Translators: This is the label for the general settings panel.
title = _("General")
LOG_LEVELS = (
# Translators: One of the log levels of NVDA (the info mode shows info as NVDA runs).
(log.INFO, _("info")),
# Translators: One of the log levels of NVDA (the debug warning shows debugging messages and warnings as NVDA runs).
(log.DEBUGWARNING, _("debug warning")),
# Translators: One of the log levels of NVDA (the input/output shows keyboard commands and/or braille commands as well as speech and/or braille output of NVDA).
(log.IO, _("input/output")),
# Translators: One of the log levels of NVDA (the debug mode shows debug messages as NVDA runs).
(log.DEBUG, _("debug"))
)
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.languageNames = languageHandler.getAvailableLanguages()
languageChoices = [x[1] for x in self.languageNames]
# Translators: The label for a setting in general settings to select NVDA's interface language (once selected, NVDA must be restarted; the option user default means the user's Windows language will be used).
languageLabelText = _("&Language (requires restart to fully take effect):")
self.languageList=settingsSizerHelper.addLabeledControl(languageLabelText, wx.Choice, choices=languageChoices)
self.languageList.SetToolTip(wx.ToolTip("Choose the language NVDA's messages and user interface should be presented in."))
try:
self.oldLanguage=config.conf["general"]["language"]
index=[x[0] for x in self.languageNames].index(self.oldLanguage)
self.languageList.SetSelection(index)
except:
pass
if globalVars.appArgs.secure:
self.languageList.Disable()
# Translators: The label for a setting in general settings to save current configuration when NVDA exits (if it is not checked, user needs to save configuration before quitting NVDA).
self.saveOnExitCheckBox=wx.CheckBox(self,label=_("&Save configuration on exit"))
self.saveOnExitCheckBox.SetValue(config.conf["general"]["saveConfigurationOnExit"])
if globalVars.appArgs.secure:
self.saveOnExitCheckBox.Disable()
settingsSizerHelper.addItem(self.saveOnExitCheckBox)
# Translators: The label for a setting in general settings to ask before quitting NVDA (if not checked, NVDA will exit without asking the user for action).
self.askToExitCheckBox=wx.CheckBox(self,label=_("Sho&w exit options when exiting NVDA"))
self.askToExitCheckBox.SetValue(config.conf["general"]["askToExit"])
settingsSizerHelper.addItem(self.askToExitCheckBox)
# Translators: The label for a setting in general settings to play sounds when NVDA starts or exits.
self.playStartAndExitSoundsCheckBox=wx.CheckBox(self,label=_("&Play sounds when starting or exiting NVDA"))
self.playStartAndExitSoundsCheckBox.SetValue(config.conf["general"]["playStartAndExitSounds"])
settingsSizerHelper.addItem(self.playStartAndExitSoundsCheckBox)
# Translators: The label for a setting in general settings to select logging level of NVDA as it runs (available options and what they are logged are found under comments for the logging level messages themselves).
logLevelLabelText=_("L&ogging level:")
logLevelChoices = [name for level, name in self.LOG_LEVELS]
self.logLevelList = settingsSizerHelper.addLabeledControl(logLevelLabelText, wx.Choice, choices=logLevelChoices)
curLevel = log.getEffectiveLevel()
for index, (level, name) in enumerate(self.LOG_LEVELS):
if level == curLevel:
self.logLevelList.SetSelection(index)
break
else:
log.debugWarning("Could not set log level list to current log level")
# Translators: The label for a setting in general settings to allow NVDA to start after logging onto Windows (if checked, NvDA will start automatically after loggin into Windows; if not, user must start NVDA by pressing the shortcut key (CTRL+Alt+N by default).
self.startAfterLogonCheckBox = wx.CheckBox(self, label=_("&Automatically start NVDA after I log on to Windows"))
self.startAfterLogonCheckBox.SetValue(config.getStartAfterLogon())
if globalVars.appArgs.secure or not config.isInstalledCopy():
self.startAfterLogonCheckBox.Disable()
settingsSizerHelper.addItem(self.startAfterLogonCheckBox)
# Translators: The label for a setting in general settings to allow NVDA to come up in Windows login screen (useful if user needs to enter passwords or if multiple user accounts are present to allow user to choose the correct account).
self.startOnLogonScreenCheckBox = wx.CheckBox(self, label=_("Use NVDA on the Windows logon screen (requires administrator privileges)"))
self.startOnLogonScreenCheckBox.SetValue(config.getStartOnLogonScreen())
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.startOnLogonScreenCheckBox.Disable()
settingsSizerHelper.addItem(self.startOnLogonScreenCheckBox)
# Translators: The label for a button in general settings to copy current user settings to system settings (to allow current settings to be used in secure screens such as User Account Control (UAC) dialog).
self.copySettingsButton= wx.Button(self, label=_("Use currently saved settings on the logon and other secure screens (requires administrator privileges)"))
self.copySettingsButton.Bind(wx.EVT_BUTTON,self.onCopySettings)
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.copySettingsButton.Disable()
settingsSizerHelper.addItem(self.copySettingsButton)
if updateCheck:
# Translators: The label of a checkbox in general settings to toggle automatic checking for updated versions of NVDA (if not checked, user must check for updates manually).
item=self.autoCheckForUpdatesCheckBox=wx.CheckBox(self,label=_("Automatically check for &updates to NVDA"))
item.Value=config.conf["update"]["autoCheck"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle startup notifications
# for a pending NVDA update.
item=self.notifyForPendingUpdateCheckBox=wx.CheckBox(self,label=_("Notify for &pending update on startup"))
item.Value=config.conf["update"]["startupNotification"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
def onCopySettings(self,evt):
for packageType in ('addons','appModules','globalPlugins','brailleDisplayDrivers','synthDrivers'):
if len(os.listdir(os.path.join(globalVars.appArgs.configPath,packageType)))>0:
if gui.messageBox(
# Translators: A message to warn the user when attempting to copy current settings to system settings.
_("Add-ons were detected in your user settings directory. Copying these to the system profile could be a security risk. Do you still wish to copy your settings?"),
# Translators: The title of the warning dialog displayed when trying to copy settings for use in secure screens.
_("Warning"),wx.YES|wx.NO|wx.ICON_WARNING,self
)==wx.NO:
return
break
progressDialog = gui.IndeterminateProgressDialog(gui.mainFrame,
# Translators: The title of the dialog presented while settings are being copied
_("Copying Settings"),
# Translators: The message displayed while settings are being copied to the system configuration (for use on Windows logon etc)
_("Please wait while settings are copied to the system configuration."))
while True:
try:
gui.ExecAndPump(config.setSystemConfigToCurrentConfig)
res=True
break
except installer.RetriableFailure:
log.debugWarning("Error when copying settings to system config",exc_info=True)
# Translators: a message dialog asking to retry or cancel when copying settings fails
message=_("Unable to copy a file. Perhaps it is currently being used by another process or you have run out of disc space on the drive you are copying to.")
# Translators: the title of a retry cancel dialog when copying settings fails
title=_("Error Copying")
if winUser.MessageBox(None,message,title,winUser.MB_RETRYCANCEL)==winUser.IDRETRY:
continue
res=False
break
except:
log.debugWarning("Error when copying settings to system config",exc_info=True)
res=False
break
progressDialog.done()
del progressDialog
if not res:
# Translators: The message displayed when errors were found while trying to copy current configuration to system settings.
gui.messageBox(_("Error copying NVDA user settings"),_("Error"),wx.OK|wx.ICON_ERROR,self)
else:
# Translators: The message displayed when copying configuration to system settings was successful.
gui.messageBox(_("Successfully copied NVDA user settings"),_("Success"),wx.OK|wx.ICON_INFORMATION,self)
def onSave(self):
newLanguage=[x[0] for x in self.languageNames][self.languageList.GetSelection()]
config.conf["general"]["language"]=newLanguage
config.conf["general"]["saveConfigurationOnExit"]=self.saveOnExitCheckBox.IsChecked()
config.conf["general"]["askToExit"]=self.askToExitCheckBox.IsChecked()
config.conf["general"]["playStartAndExitSounds"]=self.playStartAndExitSoundsCheckBox.IsChecked()
logLevel=self.LOG_LEVELS[self.logLevelList.GetSelection()][0]
config.conf["general"]["loggingLevel"]=logHandler.levelNames[logLevel]
logHandler.setLogLevelFromConfig()
if self.startAfterLogonCheckBox.IsEnabled():
config.setStartAfterLogon(self.startAfterLogonCheckBox.GetValue())
if self.startOnLogonScreenCheckBox.IsEnabled():
try:
config.setStartOnLogonScreen(self.startOnLogonScreenCheckBox.GetValue())
except (WindowsError, RuntimeError):
gui.messageBox(_("This change requires administrator privileges."), _("Insufficient Privileges"), style=wx.OK | wx.ICON_ERROR, parent=self)
if updateCheck:
config.conf["update"]["autoCheck"]=self.autoCheckForUpdatesCheckBox.IsChecked()
config.conf["update"]["startupNotification"]=self.notifyForPendingUpdateCheckBox.IsChecked()
updateCheck.terminate()
updateCheck.initialize()
if self.oldLanguage!=newLanguage:
if gui.messageBox(
# Translators: The message displayed after NVDA interface language has been changed.
_("For the new language to take effect, the configuration must be saved and NVDA must be restarted. Press enter to save and restart NVDA, or cancel to manually save and exit at a later time."),
# Translators: The title of the dialog which appears when the user changed NVDA's interface language.
_("Language Configuration Change"),wx.OK|wx.CANCEL|wx.ICON_WARNING,self
)==wx.OK:
config.conf.save()
queueHandler.queueFunction(queueHandler.eventQueue,core.restart)
class SpeechSettingsPanel(SettingsPanel):
# Translators: This is the label for the speech panel
title = _("Speech")
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the synthesizer on the speech panel.
synthLabel = _("&Synthesizer")
synthGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=synthLabel), wx.HORIZONTAL))
settingsSizerHelper.addItem(synthGroup)
# Use a ExpandoTextCtrl because even when readonly it accepts focus from keyboard, which
# standard readonly TextCtrl does not. ExpandoTextCtrl is a TE_MULTILINE control, however
# by default it renders as a single line. Standard TextCtrl with TE_MULTILINE has two lines,
# and a vertical scroll bar. This is not neccessary for the single line of text we wish to
# display here.
synthDesc = getSynth().description
self.synthNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), value=synthDesc, style=wx.TE_READONLY)
# Translators: This is the label for the button used to change synthesizer,
# it appears in the context of a synthesizer group on the speech settings panel.
changeSynthBtn = wx.Button(self, label=_("C&hange..."))
synthGroup.addItem(
guiHelper.associateElements(
self.synthNameCtrl,
changeSynthBtn
)
)
changeSynthBtn.Bind(wx.EVT_BUTTON,self.onChangeSynth)
self.voicePanel = VoiceSettingsPanel(self)
settingsSizerHelper.addItem(self.voicePanel)
def onChangeSynth(self, evt):
changeSynth = SynthesizerSelectionDialog(self, multiInstanceAllowed=True)
ret = changeSynth.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentSynth(self):
synthDesc = getSynth().description
self.synthNameCtrl.SetValue(synthDesc)
def onPanelActivated(self):
# call super after all panel updates have been completed, we dont want the panel to show until this is complete.
self.voicePanel.onPanelActivated()
super(SpeechSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.voicePanel.onPanelDeactivated()
super(SpeechSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.voicePanel.onDiscard()
def onSave(self):
self.voicePanel.onSave()
class SynthesizerSelectionDialog(SettingsDialog):
# Translators: This is the label for the synthesizer selection dialog
title = _("Select Synthesizer")
synthNames = []
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is a label for the select
# synthesizer combobox in the synthesizer dialog.
synthListLabelText=_("&Synthesizer:")
self.synthList = settingsSizerHelper.addLabeledControl(synthListLabelText, wx.Choice, choices=[])
self.updateSynthesizerList()
# Translators: This is the label for the select output
# device combo in the synthesizer dialog. Examples of
# of an output device are default soundcard, usb
# headphones, etc.
deviceListLabelText = _("Output &device:")
deviceNames=nvwave.getOutputDeviceNames()
self.deviceList = settingsSizerHelper.addLabeledControl(deviceListLabelText, wx.Choice, choices=deviceNames)
try:
selection = deviceNames.index(config.conf["speech"]["outputDevice"])
except ValueError:
selection = 0
self.deviceList.SetSelection(selection)
# Translators: This is a label for the audio ducking combo box in the Synthesizer Settings dialog.
duckingListLabelText=_("Audio &ducking mode:")
self.duckingList=settingsSizerHelper.addLabeledControl(duckingListLabelText, wx.Choice, choices=audioDucking.audioDuckingModes)
index=config.conf['audio']['audioDuckingMode']
self.duckingList.SetSelection(index)
if not audioDucking.isAudioDuckingSupported():
self.duckingList.Disable()
def postInit(self):
# Finally, ensure that focus is on the synthlist
self.synthList.SetFocus()
def updateSynthesizerList(self):
driverList=getSynthList()
self.synthNames=[x[0] for x in driverList]
options=[x[1] for x in driverList]
self.synthList.Clear()
self.synthList.AppendItems(options)
try:
index=self.synthNames.index(getSynth().name)
self.synthList.SetSelection(index)
except:
pass
def onOk(self, evt):
if not self.synthNames:
# The list of synths has not been populated yet, so we didn't change anything in this panel
return
config.conf["speech"]["outputDevice"]=self.deviceList.GetStringSelection()
newSynth=self.synthNames[self.synthList.GetSelection()]
if not setSynth(newSynth):
# Translators: This message is presented when
# NVDA is unable to load the selected
# synthesizer.
gui.messageBox(_("Could not load the %s synthesizer.")%newSynth,_("Synthesizer Error"),wx.OK|wx.ICON_WARNING,self)
return
if audioDucking.isAudioDuckingSupported():
index=self.duckingList.GetSelection()
config.conf['audio']['audioDuckingMode']=index
audioDucking.setAudioDuckingMode(index)
if self.IsModal():
# Hack: we need to update the synth in our parent window before closing.
# Otherwise, NVDA will report the old synth even though the new synth is reflected visually.
self.Parent.updateCurrentSynth()
super(SynthesizerSelectionDialog, self).onOk(evt)
class SynthSettingChanger(object):
"""Functor which acts as calback for GUI events."""
def __init__(self,setting):
self.setting=setting
def __call__(self,evt):
val=evt.GetSelection()
setattr(getSynth(),self.setting.name,val)
class StringSynthSettingChanger(SynthSettingChanger):
"""Same as L{SynthSettingChanger} but handles combobox events."""
def __init__(self,setting,panel):
self.panel=panel
super(StringSynthSettingChanger,self).__init__(setting)
def __call__(self,evt):
if self.setting.name=="voice":
# Cancel speech first so that the voice will change immediately instead of the change being queued.
speech.cancelSpeech()
changeVoice(getSynth(),getattr(self.panel,"_%ss"%self.setting.name)[evt.GetSelection()].ID)
self.panel.updateVoiceSettings(changedSetting=self.setting.name)
else:
setattr(getSynth(),self.setting.name,getattr(self.panel,"_%ss"%self.setting.name)[evt.GetSelection()].ID)
class VoiceSettingsSlider(wx.Slider):
def __init__(self,*args, **kwargs):
super(VoiceSettingsSlider,self).__init__(*args,**kwargs)
self.Bind(wx.EVT_CHAR, self.onSliderChar)
def SetValue(self,i):
super(VoiceSettingsSlider, self).SetValue(i)
evt = wx.CommandEvent(wx.wxEVT_COMMAND_SLIDER_UPDATED,self.GetId())
evt.SetInt(i)
self.ProcessEvent(evt)
# HACK: Win events don't seem to be sent for certain explicitly set values,
# so send our own win event.
# This will cause duplicates in some cases, but NVDA will filter them out.
winUser.user32.NotifyWinEvent(winUser.EVENT_OBJECT_VALUECHANGE,self.Handle,winUser.OBJID_CLIENT,winUser.CHILDID_SELF)
def onSliderChar(self, evt):
key = evt.KeyCode
if key == wx.WXK_UP:
newValue = min(self.Value + self.LineSize, self.Max)
elif key == wx.WXK_DOWN:
newValue = max(self.Value - self.LineSize, self.Min)
elif key == wx.WXK_PRIOR:
newValue = min(self.Value + self.PageSize, self.Max)
elif key == wx.WXK_NEXT:
newValue = max(self.Value - self.PageSize, self.Min)
elif key == wx.WXK_HOME:
newValue = self.Max
elif key == wx.WXK_END:
newValue = self.Min
else:
evt.Skip()
return
self.SetValue(newValue)
class VoiceSettingsPanel(SettingsPanel):
# Translators: This is the label for the voice settings panel.
title = _("Voice")
@classmethod
def _setSliderStepSizes(cls, slider, setting):
slider.SetLineSize(setting.minStep)
slider.SetPageSize(setting.largeStep)
def makeSettingControl(self,setting):
"""Constructs appropriate GUI controls for given L{SynthSetting} such as label and slider.
@param setting: Setting to construct controls for
@type setting: L{SynthSetting}
@returns: WXSizer containing newly created controls.
@rtype: L{wx.BoxSizer}
"""
sizer=wx.BoxSizer(wx.HORIZONTAL)
label=wx.StaticText(self,wx.ID_ANY,label="%s:"%setting.displayNameWithAccelerator)
slider=VoiceSettingsSlider(self,wx.ID_ANY,minValue=0,maxValue=100)
setattr(self,"%sSlider"%setting.name,slider)
slider.Bind(wx.EVT_SLIDER,SynthSettingChanger(setting))
self._setSliderStepSizes(slider,setting)
slider.SetValue(getattr(getSynth(),setting.name))
sizer.Add(label)
sizer.Add(slider)
if self.lastControl:
slider.MoveAfterInTabOrder(self.lastControl)
self.lastControl=slider
return sizer
def makeStringSettingControl(self,setting):
"""Same as L{makeSettingControl} but for string settings. Returns sizer with label and combobox."""
labelText="%s:"%setting.displayNameWithAccelerator
synth=getSynth()
setattr(self,"_%ss"%setting.name,getattr(synth,"available%ss"%setting.name.capitalize()).values())
l=getattr(self,"_%ss"%setting.name)###
labeledControl=guiHelper.LabeledControlHelper(self, labelText, wx.Choice, choices=[x.name for x in l])
lCombo = labeledControl.control
setattr(self,"%sList"%setting.name,lCombo)
try:
cur=getattr(synth,setting.name)
i=[x.ID for x in l].index(cur)
lCombo.SetSelection(i)
except ValueError:
pass
lCombo.Bind(wx.EVT_CHOICE,StringSynthSettingChanger(setting,self))
if self.lastControl:
lCombo.MoveAfterInTabOrder(self.lastControl)
self.lastControl=lCombo
return labeledControl.sizer
def makeBooleanSettingControl(self,setting):
"""Same as L{makeSettingControl} but for boolean settings. Returns checkbox."""
checkbox=wx.CheckBox(self,wx.ID_ANY,label=setting.displayNameWithAccelerator)
setattr(self,"%sCheckbox"%setting.name,checkbox)
checkbox.Bind(wx.EVT_CHECKBOX,
lambda evt: setattr(getSynth(),setting.name,evt.IsChecked()))
checkbox.SetValue(getattr(getSynth(),setting.name))
if self.lastControl:
checkbox.MoveAfterInTabOrder(self.lastControl)
self.lastControl=checkbox
return checkbox
def onPanelActivated(self):
if getSynth().name is not self._synth.name:
if gui._isDebug():
log.debug("refreshing voice panel")
self.sizerDict.clear()
self.settingsSizer.Clear(deleteWindows=True)
self.makeSettings(self.settingsSizer)
super(VoiceSettingsPanel,self).onPanelActivated()
def makeSettings(self, settingsSizer):
self.sizerDict={}
self.lastControl=None
#Create controls for Synth Settings
self.updateVoiceSettings()
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
autoLanguageSwitchingText = _("Automatic language switching (when supported)")
self.autoLanguageSwitchingCheckbox = settingsSizerHelper.addItem(wx.CheckBox(self,label=autoLanguageSwitchingText))
self.autoLanguageSwitchingCheckbox.SetValue(config.conf["speech"]["autoLanguageSwitching"])
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, different voices for dialects will be used to read text in that dialect).
autoDialectSwitchingText =_("Automatic dialect switching (when supported)")
self.autoDialectSwitchingCheckbox=settingsSizerHelper.addItem(wx.CheckBox(self,label=autoDialectSwitchingText))
self.autoDialectSwitchingCheckbox.SetValue(config.conf["speech"]["autoDialectSwitching"])
# Translators: This is the label for a combobox in the
# voice settings panel (possible choices are none, some, most and all).
punctuationLabelText = _("Punctuation/symbol &level:")
symbolLevelLabels=characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
symbolLevelChoices =[symbolLevelLabels[level] for level in characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS]
self.symbolLevelList = settingsSizerHelper.addLabeledControl(punctuationLabelText, wx.Choice, choices=symbolLevelChoices)
curLevel = config.conf["speech"]["symbolLevel"]
self.symbolLevelList.SetSelection(characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS.index(curLevel))
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
trustVoiceLanguageText = _("Trust voice's language when processing characters and symbols")
self.trustVoiceLanguageCheckbox = settingsSizerHelper.addItem(wx.CheckBox(self,label=trustVoiceLanguageText))
self.trustVoiceLanguageCheckbox.SetValue(config.conf["speech"]["trustVoiceLanguage"])
# Translators: This is a label for a setting in voice settings (an edit box to change voice pitch for capital letters; the higher the value, the pitch will be higher).
capPitchChangeLabelText=_("Capital pitch change percentage")
self.capPitchChangeEdit=settingsSizerHelper.addLabeledControl(capPitchChangeLabelText, nvdaControls.SelectOnFocusSpinCtrl,
min=int(config.conf.getConfigValidationParameter(["speech", getSynth().name, "capPitchChange"], "min")),
max=int(config.conf.getConfigValidationParameter(["speech", getSynth().name, "capPitchChange"], "max")),
initial=config.conf["speech"][getSynth().name]["capPitchChange"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
sayCapForCapsText = _("Say &cap before capitals")
self.sayCapForCapsCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self,label=sayCapForCapsText))
self.sayCapForCapsCheckBox.SetValue(config.conf["speech"][getSynth().name]["sayCapForCapitals"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
beepForCapsText =_("&Beep for capitals")
self.beepForCapsCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self, label = beepForCapsText))
self.beepForCapsCheckBox.SetValue(config.conf["speech"][getSynth().name]["beepForCapitals"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
useSpellingFunctionalityText = _("Use &spelling functionality if supported")
self.useSpellingFunctionalityCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self, label = useSpellingFunctionalityText))
self.useSpellingFunctionalityCheckBox.SetValue(config.conf["speech"][getSynth().name]["useSpellingFunctionality"])
def updateVoiceSettings(self, changedSetting=None):
"""Creates, hides or updates existing GUI controls for all of supported settings."""
synth=self._synth=getSynth()
#firstly check already created options
for name,sizer in self.sizerDict.iteritems():
if name == changedSetting:
# Changing a setting shouldn't cause that setting itself to disappear.
continue
if not synth.isSupported(name):
self.settingsSizer.Hide(sizer)
#Create new controls, update already existing
for setting in synth.supportedSettings:
if setting.name == changedSetting:
# Changing a setting shouldn't cause that setting's own values to change.
continue
if setting.name in self.sizerDict: #update a value
self.settingsSizer.Show(self.sizerDict[setting.name])
if isinstance(setting,NumericSynthSetting):
getattr(self,"%sSlider"%setting.name).SetValue(getattr(synth,setting.name))
elif isinstance(setting,BooleanSynthSetting):
getattr(self,"%sCheckbox"%setting.name).SetValue(getattr(synth,setting.name))
else:
l=getattr(self,"_%ss"%setting.name)
lCombo=getattr(self,"%sList"%setting.name)
try:
cur=getattr(synth,setting.name)
i=[x.ID for x in l].index(cur)
lCombo.SetSelection(i)
except ValueError:
pass
else: #create a new control
if isinstance(setting,NumericSynthSetting):
settingMaker=self.makeSettingControl
elif isinstance(setting,BooleanSynthSetting):
settingMaker=self.makeBooleanSettingControl
else:
settingMaker=self.makeStringSettingControl
s=settingMaker(setting)
self.sizerDict[setting.name]=s
self.settingsSizer.Insert(len(self.sizerDict)-1,s,border=10,flag=wx.BOTTOM)
#Update graphical layout of the dialog
self.settingsSizer.Layout()
def onDiscard(self):
#unbind change events for string settings as wx closes combo boxes on cancel
for setting in getSynth().supportedSettings:
if isinstance(setting,(NumericSynthSetting,BooleanSynthSetting)): continue
getattr(self,"%sList"%setting.name).Unbind(wx.EVT_CHOICE)
#restore settings
getSynth().loadSettings()
super(VoiceSettingsPanel,self).onDiscard()
def onSave(self):
synth = getSynth()
synth.saveSettings()
config.conf["speech"]["autoLanguageSwitching"]=self.autoLanguageSwitchingCheckbox.IsChecked()
config.conf["speech"]["autoDialectSwitching"]=self.autoDialectSwitchingCheckbox.IsChecked()
config.conf["speech"]["symbolLevel"]=characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS[self.symbolLevelList.GetSelection()]
config.conf["speech"]["trustVoiceLanguage"]=self.trustVoiceLanguageCheckbox.IsChecked()
config.conf["speech"][synth.name]["capPitchChange"]=self.capPitchChangeEdit.Value
config.conf["speech"][synth.name]["sayCapForCapitals"]=self.sayCapForCapsCheckBox.IsChecked()
config.conf["speech"][synth.name]["beepForCapitals"]=self.beepForCapsCheckBox.IsChecked()
config.conf["speech"][synth.name]["useSpellingFunctionality"]=self.useSpellingFunctionalityCheckBox.IsChecked()
class KeyboardSettingsPanel(SettingsPanel):
# Translators: This is the label for the keyboard settings panel.
title = _("Keyboard")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a combobox in the
# keyboard settings panel.
kbdLabelText = _("&Keyboard layout:")
layouts=keyboardHandler.KeyboardInputGesture.LAYOUTS
self.kbdNames=sorted(layouts)
kbdChoices = [layouts[layout] for layout in self.kbdNames]
self.kbdList=sHelper.addLabeledControl(kbdLabelText, wx.Choice, choices=kbdChoices)
try:
index=self.kbdNames.index(config.conf['keyboard']['keyboardLayout'])
self.kbdList.SetSelection(index)
except:
log.debugWarning("Could not set Keyboard layout list to current layout",exc_info=True)
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
capsAsNVDAText = _("Use CapsLock as an NVDA modifier key")
self.capsAsNVDAModifierCheckBox=sHelper.addItem(wx.CheckBox(self,label=capsAsNVDAText))
self.capsAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
numpadInsertAsModText = _("Use numpad Insert as an NVDA modifier key")
self.numpadInsertAsNVDAModifierCheckBox=sHelper.addItem(wx.CheckBox(self,label=numpadInsertAsModText))
self.numpadInsertAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
extendedInsertAsModText = _("Use extended Insert as an NVDA modifier key")
self.extendedInsertAsNVDAModifierCheckBox=sHelper.addItem(wx.CheckBox(self,label=extendedInsertAsModText))
self.extendedInsertAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
charsText = _("Speak typed &characters")
self.charsCheckBox=sHelper.addItem(wx.CheckBox(self,label=charsText))
self.charsCheckBox.SetValue(config.conf["keyboard"]["speakTypedCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speakTypedWordsText = _("Speak typed &words")
self.wordsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speakTypedWordsText))
self.wordsCheckBox.SetValue(config.conf["keyboard"]["speakTypedWords"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForCharText = _("Speech interrupt for typed characters")
self.speechInterruptForCharsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForCharText))
self.speechInterruptForCharsCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForEnterText = _("Speech interrupt for Enter key")
self.speechInterruptForEnterCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForEnterText))
self.speechInterruptForEnterCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForEnter"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
allowSkimReadingInSayAllText = _("Allow skim &reading in Say All")
self.skimReadingInSayAllCheckBox=sHelper.addItem(wx.CheckBox(self,label=allowSkimReadingInSayAllText))
self.skimReadingInSayAllCheckBox.SetValue(config.conf["keyboard"]["allowSkimReadingInSayAll"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
beepForLowercaseWithCapsLockText = _("Beep if typing lowercase letters when caps lock is on")
self.beepLowercaseCheckBox=sHelper.addItem(wx.CheckBox(self,label=beepForLowercaseWithCapsLockText))
self.beepLowercaseCheckBox.SetValue(config.conf["keyboard"]["beepForLowercaseWithCapslock"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
commandKeysText = _("Speak command &keys")
self.commandKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=commandKeysText))
self.commandKeysCheckBox.SetValue(config.conf["keyboard"]["speakCommandKeys"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
alertForSpellingErrorsText = _("Play sound for &spelling errors while typing")
self.alertForSpellingErrorsCheckBox=sHelper.addItem(wx.CheckBox(self,label=alertForSpellingErrorsText))
self.alertForSpellingErrorsCheckBox.SetValue(config.conf["keyboard"]["alertForSpellingErrors"])
if not config.conf["documentFormatting"]["reportSpellingErrors"]:
self.alertForSpellingErrorsCheckBox.Disable()
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
handleInjectedKeysText = _("Handle keys from other &applications")
self.handleInjectedKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=handleInjectedKeysText))
self.handleInjectedKeysCheckBox.SetValue(config.conf["keyboard"]["handleInjectedKeys"])
def onSave(self):
# #2871: check wether at least one key is the nvda key.
if not self.capsAsNVDAModifierCheckBox.IsChecked() and not self.numpadInsertAsNVDAModifierCheckBox.IsChecked() and not self.extendedInsertAsNVDAModifierCheckBox.IsChecked():
log.debugWarning("No NVDA key set")
gui.messageBox(
# Translators: Message to report wrong configuration of the NVDA key
_("At least one key must be used as the NVDA key."),
# Translators: The title of the message box
_("Error"), wx.OK|wx.ICON_ERROR,self)
return
layout=self.kbdNames[self.kbdList.GetSelection()]
config.conf['keyboard']['keyboardLayout']=layout
config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"]=self.capsAsNVDAModifierCheckBox.IsChecked()
config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]=self.numpadInsertAsNVDAModifierCheckBox.IsChecked()
config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"]=self.extendedInsertAsNVDAModifierCheckBox.IsChecked()
config.conf["keyboard"]["speakTypedCharacters"]=self.charsCheckBox.IsChecked()
config.conf["keyboard"]["speakTypedWords"]=self.wordsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForCharacters"]=self.speechInterruptForCharsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForEnter"]=self.speechInterruptForEnterCheckBox.IsChecked()
config.conf["keyboard"]["allowSkimReadingInSayAll"]=self.skimReadingInSayAllCheckBox.IsChecked()
config.conf["keyboard"]["beepForLowercaseWithCapslock"]=self.beepLowercaseCheckBox.IsChecked()
config.conf["keyboard"]["speakCommandKeys"]=self.commandKeysCheckBox.IsChecked()
config.conf["keyboard"]["alertForSpellingErrors"]=self.alertForSpellingErrorsCheckBox.IsChecked()
config.conf["keyboard"]["handleInjectedKeys"]=self.handleInjectedKeysCheckBox.IsChecked()
class MouseSettingsPanel(SettingsPanel):
# Translators: This is the label for the mouse settings panel.
title = _("Mouse")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
shapeChangesText = _("Report mouse &shape changes")
self.shapeCheckBox=sHelper.addItem(wx.CheckBox(self,label=shapeChangesText))
self.shapeCheckBox.SetValue(config.conf["mouse"]["reportMouseShapeChanges"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
mouseTrackingText=_("Enable mouse &tracking")
self.mouseTrackingCheckBox=sHelper.addItem(wx.CheckBox(self,label=mouseTrackingText))
self.mouseTrackingCheckBox.SetValue(config.conf["mouse"]["enableMouseTracking"])
# Translators: This is the label for a combobox in the
# mouse settings panel.
textUnitLabelText=_("Text &unit resolution:")
import textInfos
self.textUnits=[textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD,textInfos.UNIT_LINE,textInfos.UNIT_PARAGRAPH]
textUnitsChoices = [textInfos.unitLabels[x] for x in self.textUnits]
self.textUnitComboBox=sHelper.addLabeledControl(textUnitLabelText, wx.Choice, choices=textUnitsChoices)
try:
index=self.textUnits.index(config.conf["mouse"]["mouseTextUnit"])
except:
index=0
self.textUnitComboBox.SetSelection(index)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
reportObjectRoleText = _("Report &role when mouse enters object")
self.reportObjectRoleCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportObjectRoleText))
self.reportObjectRoleCheckBox.SetValue(config.conf["mouse"]["reportObjectRoleOnMouseEnter"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioText = _("&Play audio coordinates when mouse moves")
self.audioCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioText))
self.audioCheckBox.SetValue(config.conf["mouse"]["audioCoordinatesOnMouseMove"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioDetectBrightnessText = _("&Brightness controls audio coordinates volume")
self.audioDetectBrightnessCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioDetectBrightnessText))
self.audioDetectBrightnessCheckBox.SetValue(config.conf["mouse"]["audioCoordinates_detectBrightness"])
def onSave(self):
config.conf["mouse"]["reportMouseShapeChanges"]=self.shapeCheckBox.IsChecked()
config.conf["mouse"]["enableMouseTracking"]=self.mouseTrackingCheckBox.IsChecked()
config.conf["mouse"]["mouseTextUnit"]=self.textUnits[self.textUnitComboBox.GetSelection()]
config.conf["mouse"]["reportObjectRoleOnMouseEnter"]=self.reportObjectRoleCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinatesOnMouseMove"]=self.audioCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinates_detectBrightness"]=self.audioDetectBrightnessCheckBox.IsChecked()
class ReviewCursorPanel(SettingsPanel):
# Translators: This is the label for the review cursor settings panel.
title = _("Review Cursor")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followFocusCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Follow system &focus"))
self.followFocusCheckBox.SetValue(config.conf["reviewCursor"]["followFocus"])
settingsSizer.Add(self.followFocusCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followCaretCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Follow System &Caret"))
self.followCaretCheckBox.SetValue(config.conf["reviewCursor"]["followCaret"])
settingsSizer.Add(self.followCaretCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followMouseCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Follow &mouse cursor"))
self.followMouseCheckBox.SetValue(config.conf["reviewCursor"]["followMouse"])
settingsSizer.Add(self.followMouseCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.simpleReviewModeCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Simple review mode"))
self.simpleReviewModeCheckBox.SetValue(config.conf["reviewCursor"]["simpleReviewMode"])
settingsSizer.Add(self.simpleReviewModeCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["reviewCursor"]["followFocus"]=self.followFocusCheckBox.IsChecked()
config.conf["reviewCursor"]["followCaret"]=self.followCaretCheckBox.IsChecked()
config.conf["reviewCursor"]["followMouse"]=self.followMouseCheckBox.IsChecked()
config.conf["reviewCursor"]["simpleReviewMode"]=self.simpleReviewModeCheckBox.IsChecked()
class InputCompositionPanel(SettingsPanel):
# Translators: This is the label for the Input Composition settings panel.
title = _("Input Composition")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.autoReportAllCandidatesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Automatically report all available &candidates"))
self.autoReportAllCandidatesCheckBox.SetValue(config.conf["inputComposition"]["autoReportAllCandidates"])
settingsSizer.Add(self.autoReportAllCandidatesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.announceSelectedCandidateCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Announce &selected candidate"))
self.announceSelectedCandidateCheckBox.SetValue(config.conf["inputComposition"]["announceSelectedCandidate"])
settingsSizer.Add(self.announceSelectedCandidateCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.candidateIncludesShortCharacterDescriptionCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Always include short character &description when announcing candidates"))
self.candidateIncludesShortCharacterDescriptionCheckBox.SetValue(config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"])
settingsSizer.Add(self.candidateIncludesShortCharacterDescriptionCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportReadingStringChangesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Report changes to the &reading string"))
self.reportReadingStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportReadingStringChanges"])
settingsSizer.Add(self.reportReadingStringChangesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportCompositionStringChangesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Report changes to the &composition string"))
self.reportCompositionStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportCompositionStringChanges"])
settingsSizer.Add(self.reportCompositionStringChangesCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["inputComposition"]["autoReportAllCandidates"]=self.autoReportAllCandidatesCheckBox.IsChecked()
config.conf["inputComposition"]["announceSelectedCandidate"]=self.announceSelectedCandidateCheckBox.IsChecked()
config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]=self.candidateIncludesShortCharacterDescriptionCheckBox.IsChecked()
config.conf["inputComposition"]["reportReadingStringChanges"]=self.reportReadingStringChangesCheckBox.IsChecked()
config.conf["inputComposition"]["reportCompositionStringChanges"]=self.reportCompositionStringChangesCheckBox.IsChecked()
class ObjectPresentationPanel(SettingsPanel):
# Translators: This is the label for the object presentation panel.
title = _("Object Presentation")
progressLabels = (
# Translators: An option for progress bar output in the Object Presentation dialog
# which disables reporting of progress bars.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("off", _("off")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by speaking.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("speak", _("Speak")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("beep", _("Beep")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by both speaking and beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("both", _("Speak and beep")),
)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportToolTipsText = _("Report &tooltips")
self.tooltipCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportToolTipsText))
self.tooltipCheckBox.SetValue(config.conf["presentation"]["reportTooltips"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
balloonText = _("Report &help balloons")
self.balloonCheckBox=sHelper.addItem(wx.CheckBox(self,label=balloonText))
self.balloonCheckBox.SetValue(config.conf["presentation"]["reportHelpBalloons"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
shortcutText = _("Report object shortcut &keys")
self.shortcutCheckBox=sHelper.addItem(wx.CheckBox(self,label=shortcutText))
self.shortcutCheckBox.SetValue(config.conf["presentation"]["reportKeyboardShortcuts"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
positionInfoText = _("Report object &position information")
self.positionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=positionInfoText))
self.positionInfoCheckBox.SetValue(config.conf["presentation"]["reportObjectPositionInformation"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
guessPositionInfoText=_("Guess object &position information when unavailable")
self.guessPositionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=guessPositionInfoText))
self.guessPositionInfoCheckBox.SetValue(config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
descriptionText = _("Report object &descriptions")
self.descriptionCheckBox=sHelper.addItem(wx.CheckBox(self,label=descriptionText))
self.descriptionCheckBox.SetValue(config.conf["presentation"]["reportObjectDescriptions"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
progressLabelText = _("Progress &bar output:")
progressChoices = [name for setting, name in self.progressLabels]
self.progressList=sHelper.addLabeledControl(progressLabelText, wx.Choice, choices=progressChoices)
for index, (setting, name) in enumerate(self.progressLabels):
if setting == config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]:
self.progressList.SetSelection(index)
break
else:
log.debugWarning("Could not set progress list to current report progress bar updates setting")
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportBackgroundProgressBarsText = _("Report background progress bars")
self.reportBackgroundProgressBarsCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportBackgroundProgressBarsText))
self.reportBackgroundProgressBarsCheckBox.SetValue(config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
dynamicContentText = _("Report dynamic &content changes")
self.dynamicContentCheckBox=sHelper.addItem(wx.CheckBox(self,label=dynamicContentText))
self.dynamicContentCheckBox.SetValue(config.conf["presentation"]["reportDynamicContentChanges"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
autoSuggestionsLabelText = _("Play a sound when &auto-suggestions appear")
self.autoSuggestionSoundsCheckBox=sHelper.addItem(wx.CheckBox(self,label=autoSuggestionsLabelText))
self.autoSuggestionSoundsCheckBox.SetValue(config.conf["presentation"]["reportAutoSuggestionsWithSound"])
def onSave(self):
config.conf["presentation"]["reportTooltips"]=self.tooltipCheckBox.IsChecked()
config.conf["presentation"]["reportHelpBalloons"]=self.balloonCheckBox.IsChecked()
config.conf["presentation"]["reportKeyboardShortcuts"]=self.shortcutCheckBox.IsChecked()
config.conf["presentation"]["reportObjectPositionInformation"]=self.positionInfoCheckBox.IsChecked()
config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"]=self.guessPositionInfoCheckBox.IsChecked()
config.conf["presentation"]["reportObjectDescriptions"]=self.descriptionCheckBox.IsChecked()
config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]=self.progressLabels[self.progressList.GetSelection()][0]
config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"]=self.reportBackgroundProgressBarsCheckBox.IsChecked()
config.conf["presentation"]["reportDynamicContentChanges"]=self.dynamicContentCheckBox.IsChecked()
config.conf["presentation"]["reportAutoSuggestionsWithSound"]=self.autoSuggestionSoundsCheckBox.IsChecked()
class BrowseModePanel(SettingsPanel):
# Translators: This is the label for the browse mode settings panel.
title = _("Browse Mode")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a textfield in the
# browse mode settings panel.
maxLengthLabel=wx.StaticText(self,-1,label=_("&Maximum number of characters on one line"))
settingsSizer.Add(maxLengthLabel)
self.maxLengthEdit=nvdaControls.SelectOnFocusSpinCtrl(self,
min=10, max=250, # min and max are not enforced in the config for virtualBuffers.maxLineLength
initial=config.conf["virtualBuffers"]["maxLineLength"])
settingsSizer.Add(self.maxLengthEdit,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a textfield in the
# browse mode settings panel.
pageLinesLabel=wx.StaticText(self,-1,label=_("&Number of lines per page"))
settingsSizer.Add(pageLinesLabel)
self.pageLinesEdit=nvdaControls.SelectOnFocusSpinCtrl(self,
min=5, max=150, # min and max are not enforced in the config for virtualBuffers.linesPerPage
initial=config.conf["virtualBuffers"]["linesPerPage"])
settingsSizer.Add(self.pageLinesEdit,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.useScreenLayoutCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Use &screen layout (when supported)"))
self.useScreenLayoutCheckBox.SetValue(config.conf["virtualBuffers"]["useScreenLayout"])
settingsSizer.Add(self.useScreenLayoutCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.autoSayAllCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Automatic &Say All on page load"))
self.autoSayAllCheckBox.SetValue(config.conf["virtualBuffers"]["autoSayAllOnPageLoad"])
settingsSizer.Add(self.autoSayAllCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.layoutTablesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("Include l&ayout tables"))
self.layoutTablesCheckBox.SetValue(config.conf["documentFormatting"]["includeLayoutTables"])
settingsSizer.Add(self.layoutTablesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.autoPassThroughOnFocusChangeCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Automatic focus mode for focus changes"))
self.autoPassThroughOnFocusChangeCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
settingsSizer.Add(self.autoPassThroughOnFocusChangeCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.autoPassThroughOnCaretMoveCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Automatic focus mode for caret movement"))
self.autoPassThroughOnCaretMoveCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
settingsSizer.Add(self.autoPassThroughOnCaretMoveCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.passThroughAudioIndicationCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Audio indication of focus and browse modes"))
self.passThroughAudioIndicationCheckBox.SetValue(config.conf["virtualBuffers"]["passThroughAudioIndication"])
settingsSizer.Add(self.passThroughAudioIndicationCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
self.trapNonCommandGesturesCheckBox=wx.CheckBox(self,wx.NewId(),label=_("&Trap all non-command gestures from reaching the document"))
self.trapNonCommandGesturesCheckBox.SetValue(config.conf["virtualBuffers"]["trapNonCommandGestures"])
settingsSizer.Add(self.trapNonCommandGesturesCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["virtualBuffers"]["maxLineLength"]=self.maxLengthEdit.GetValue()
config.conf["virtualBuffers"]["linesPerPage"]=self.pageLinesEdit.GetValue()
config.conf["virtualBuffers"]["useScreenLayout"]=self.useScreenLayoutCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoSayAllOnPageLoad"]=self.autoSayAllCheckBox.IsChecked()
config.conf["documentFormatting"]["includeLayoutTables"]=self.layoutTablesCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]=self.autoPassThroughOnFocusChangeCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]=self.autoPassThroughOnCaretMoveCheckBox.IsChecked()
config.conf["virtualBuffers"]["passThroughAudioIndication"]=self.passThroughAudioIndicationCheckBox.IsChecked()
config.conf["virtualBuffers"]["trapNonCommandGestures"]=self.trapNonCommandGesturesCheckBox.IsChecked()
class DocumentFormattingPanel(SettingsPanel):
# Translators: This is the label for the document formatting panel.
title = _("Document Formatting")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is a label appearing on the document formatting settings panel.
panelText =_("The following options control the types of document formatting reported by NVDA.")
sHelper.addItem(wx.StaticText(self, label=panelText))
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
fontGroupText = _("Font")
fontGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=fontGroupText), wx.VERTICAL))
sHelper.addItem(fontGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontNameText = _("&Font name")
self.fontNameCheckBox=fontGroup.addItem(wx.CheckBox(self, label=fontNameText))
self.fontNameCheckBox.SetValue(config.conf["documentFormatting"]["reportFontName"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontSizeText = _("Font &size")
self.fontSizeCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontSizeText))
self.fontSizeCheckBox.SetValue(config.conf["documentFormatting"]["reportFontSize"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontAttributesText = _("Font attri&butes")
self.fontAttrsCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontAttributesText))
self.fontAttrsCheckBox.SetValue(config.conf["documentFormatting"]["reportFontAttributes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
emphasisText=_("E&mphasis")
self.emphasisCheckBox=fontGroup.addItem(wx.CheckBox(self,label=emphasisText))
self.emphasisCheckBox.SetValue(config.conf["documentFormatting"]["reportEmphasis"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
styleText =_("St&yle")
self.styleCheckBox=fontGroup.addItem(wx.CheckBox(self,label=styleText))
self.styleCheckBox.SetValue(config.conf["documentFormatting"]["reportStyle"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
colorsText = _("&Colors")
self.colorCheckBox=fontGroup.addItem(wx.CheckBox(self,label=colorsText))
self.colorCheckBox.SetValue(config.conf["documentFormatting"]["reportColor"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
documentInfoGroupText = _("Document information")
docInfoGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=documentInfoGroupText), wx.VERTICAL))
sHelper.addItem(docInfoGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
commentsText = _("Co&mments")
self.commentsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=commentsText))
self.commentsCheckBox.SetValue(config.conf["documentFormatting"]["reportComments"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
revisionsText = _("&Editor revisions")
self.revisionsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=revisionsText))
self.revisionsCheckBox.SetValue(config.conf["documentFormatting"]["reportRevisions"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
spellingErrorText = _("Spelling e&rrors")
self.spellingErrorsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=spellingErrorText))
self.spellingErrorsCheckBox.SetValue(config.conf["documentFormatting"]["reportSpellingErrors"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
pageAndSpaceGroupText = _("Pages and spacing")
pageAndSpaceGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=pageAndSpaceGroupText), wx.VERTICAL))
sHelper.addItem(pageAndSpaceGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
pageText = _("&Pages")
self.pageCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=pageText))
self.pageCheckBox.SetValue(config.conf["documentFormatting"]["reportPage"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
lineText = _("Line &numbers")
self.lineNumberCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineText))
self.lineNumberCheckBox.SetValue(config.conf["documentFormatting"]["reportLineNumber"])
# Translators: This is the label for a combobox controlling the reporting of line indentation in the
# Document Formatting dialog (possible choices are Off, Speech, Tones, or Both.
lineIndentationText = _("Line &indentation reporting:")
indentChoices=[
#Translators: A choice in a combo box in the document formatting dialog to report No line Indentation.
_("Off"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with Speech.
pgettext('line indentation setting', "Speech"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with tones.
_("Tones"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with both Speech and tones.
_("Both Speech and Tones")
]
self.lineIndentationCombo = pageAndSpaceGroup.addLabeledControl(lineIndentationText, wx.Choice, choices=indentChoices)
#We use bitwise operations because it saves us a four way if statement.
curChoice = config.conf["documentFormatting"]["reportLineIndentationWithTones"] << 1 | config.conf["documentFormatting"]["reportLineIndentation"]
self.lineIndentationCombo.SetSelection(curChoice)
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report paragraph indentation if available.
paragraphIndentationText = _("&Paragraph indentation")
self.paragraphIndentationCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=paragraphIndentationText))
self.paragraphIndentationCheckBox.SetValue(config.conf["documentFormatting"]["reportParagraphIndentation"])
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report line spacing if available.
lineSpacingText=_("&Line spacing")
self.lineSpacingCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineSpacingText))
self.lineSpacingCheckBox.SetValue(config.conf["documentFormatting"]["reportLineSpacing"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
alignmentText = _("&Alignment")
self.alignmentCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=alignmentText))
self.alignmentCheckBox.SetValue(config.conf["documentFormatting"]["reportAlignment"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
tablesGroupText = _("Table information")
tablesGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=tablesGroupText), wx.VERTICAL))
sHelper.addItem(tablesGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tablesCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("&Tables")))
self.tablesCheckBox.SetValue(config.conf["documentFormatting"]["reportTables"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableHeadersCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Row/column h&eaders")))
self.tableHeadersCheckBox.SetValue(config.conf["documentFormatting"]["reportTableHeaders"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableCellCoordsCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Cell c&oordinates")))
self.tableCellCoordsCheckBox.SetValue(config.conf["documentFormatting"]["reportTableCellCoords"])
borderChoices=[
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Off"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Styles"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Both Colors and Styles"),
]
# Translators: This is the label for a combobox in the
# document formatting settings panel.
self.borderComboBox=tablesGroup.addLabeledControl(_("Cell borders:"), wx.Choice, choices=borderChoices)
curChoice = 0
if config.conf["documentFormatting"]["reportBorderStyle"]:
if config.conf["documentFormatting"]["reportBorderColor"]:
curChoice = 2
else:
curChoice = 1
self.borderComboBox.SetSelection(curChoice)
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
elementsGroupText = _("Elements")
elementsGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=elementsGroupText), wx.VERTICAL))
sHelper.addItem(elementsGroup, flag=wx.EXPAND, proportion=1)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.headingsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Headings")))
self.headingsCheckBox.SetValue(config.conf["documentFormatting"]["reportHeadings"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.linksCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Lin&ks")))
self.linksCheckBox.SetValue(config.conf["documentFormatting"]["reportLinks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.listsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Lists")))
self.listsCheckBox.SetValue(config.conf["documentFormatting"]["reportLists"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.blockQuotesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Block "es")))
self.blockQuotesCheckBox.SetValue(config.conf["documentFormatting"]["reportBlockQuotes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.landmarksCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Lan&dmarks")))
self.landmarksCheckBox.SetValue(config.conf["documentFormatting"]["reportLandmarks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.framesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Fra&mes")))
self.framesCheckBox.Value=config.conf["documentFormatting"]["reportFrames"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.clickableCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Clickable")))
self.clickableCheckBox.Value=config.conf["documentFormatting"]["reportClickable"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
detectFormatAfterCursorText =_("Report formatting changes after the cursor (can cause a lag)")
self.detectFormatAfterCursorCheckBox=wx.CheckBox(self, label=detectFormatAfterCursorText)
self.detectFormatAfterCursorCheckBox.SetValue(config.conf["documentFormatting"]["detectFormatAfterCursor"])
sHelper.addItem(self.detectFormatAfterCursorCheckBox)
def onSave(self):
config.conf["documentFormatting"]["detectFormatAfterCursor"]=self.detectFormatAfterCursorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontName"]=self.fontNameCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontSize"]=self.fontSizeCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontAttributes"]=self.fontAttrsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportColor"]=self.colorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportComments"]=self.commentsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportRevisions"]=self.revisionsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportEmphasis"]=self.emphasisCheckBox.IsChecked()
config.conf["documentFormatting"]["reportAlignment"]=self.alignmentCheckBox.IsChecked()
config.conf["documentFormatting"]["reportStyle"]=self.styleCheckBox.IsChecked()
config.conf["documentFormatting"]["reportSpellingErrors"]=self.spellingErrorsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportPage"]=self.pageCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineNumber"]=self.lineNumberCheckBox.IsChecked()
choice = self.lineIndentationCombo.GetSelection()
config.conf["documentFormatting"]["reportLineIndentation"] = choice in (1, 3)
config.conf["documentFormatting"]["reportLineIndentationWithTones"] = choice in (2, 3)
config.conf["documentFormatting"]["reportParagraphIndentation"]=self.paragraphIndentationCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineSpacing"]=self.lineSpacingCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTables"]=self.tablesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableHeaders"]=self.tableHeadersCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableCellCoords"]=self.tableCellCoordsCheckBox.IsChecked()
choice = self.borderComboBox.GetSelection()
config.conf["documentFormatting"]["reportBorderStyle"] = choice in (1,2)
config.conf["documentFormatting"]["reportBorderColor"] = (choice == 2)
config.conf["documentFormatting"]["reportLinks"]=self.linksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportHeadings"]=self.headingsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLists"]=self.listsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportBlockQuotes"]=self.blockQuotesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLandmarks"]=self.landmarksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFrames"]=self.framesCheckBox.Value
config.conf["documentFormatting"]["reportClickable"]=self.clickableCheckBox.Value
class TouchInteractionPanel(SettingsPanel):
# Translators: This is the label for the touch interaction settings panel.
title = _("Touch Interaction")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# touch interaction settings panel.
self.touchTypingCheckBox=wx.CheckBox(self,wx.NewId(),label=_("&Touch typing mode"))
self.touchTypingCheckBox.SetValue(config.conf["touch"]["touchTyping"])
settingsSizer.Add(self.touchTypingCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["touch"]["touchTyping"]=self.touchTypingCheckBox.IsChecked()
class UwpOcrPanel(SettingsPanel):
# Translators: The title of the Windows 10 OCR panel.
title = _("Windows 10 OCR")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Lazily import this.
from contentRecog import uwpOcr
self.languageCodes = uwpOcr.getLanguages()
languageChoices = [
languageHandler.getLanguageDescription(languageHandler.normalizeLanguage(lang))
for lang in self.languageCodes]
# Translators: Label for an option in the Windows 10 OCR dialog.
languageLabel = _("Recognition &language:")
self.languageChoice = sHelper.addLabeledControl(languageLabel, wx.Choice, choices=languageChoices)
try:
langIndex = self.languageCodes.index(config.conf["uwpOcr"]["language"])
self.languageChoice.Selection = langIndex
except ValueError:
self.languageChoice.Selection = 0
def onSave(self):
lang = self.languageCodes[self.languageChoice.Selection]
config.conf["uwpOcr"]["language"] = lang
class DictionaryEntryDialog(wx.Dialog):
TYPE_LABELS = {
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_ANYWHERE: _("&Anywhere"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_WORD: _("Whole &word"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_REGEXP: _("Regular &expression")
}
TYPE_LABELS_ORDERING = (speechDictHandler.ENTRY_TYPE_ANYWHERE, speechDictHandler.ENTRY_TYPE_WORD, speechDictHandler.ENTRY_TYPE_REGEXP)
# Translators: This is the label for the edit dictionary entry dialog.
def __init__(self, parent, title=_("Edit Dictionary Entry")):
super(DictionaryEntryDialog,self).__init__(parent,title=title)
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is a label for an edit field in add dictionary entry dialog.
patternLabelText = _("&Pattern")
self.patternTextCtrl=sHelper.addLabeledControl(patternLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog and in punctuation/symbol pronunciation dialog.
replacementLabelText = _("&Replacement")
self.replacementTextCtrl=sHelper.addLabeledControl(replacementLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog.
commentLabelText = _("&Comment")
self.commentTextCtrl=sHelper.addLabeledControl(commentLabelText, wx.TextCtrl)
# Translators: This is a label for a checkbox in add dictionary entry dialog.
caseSensitiveText = _("Case &sensitive")
self.caseSensitiveCheckBox=sHelper.addItem(wx.CheckBox(self,label=caseSensitiveText))
# Translators: This is a label for a set of radio buttons in add dictionary entry dialog.
typeText = _("&Type")
typeChoices = [DictionaryEntryDialog.TYPE_LABELS[i] for i in DictionaryEntryDialog.TYPE_LABELS_ORDERING]
self.typeRadioBox=sHelper.addItem(wx.RadioBox(self,label=typeText, choices=typeChoices))
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK|wx.CANCEL))
mainSizer.Add(sHelper.sizer,border=20,flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.setType(speechDictHandler.ENTRY_TYPE_ANYWHERE)
self.patternTextCtrl.SetFocus()
self.Bind(wx.EVT_BUTTON,self.onOk,id=wx.ID_OK)
def getType(self):
typeRadioValue = self.typeRadioBox.GetSelection()
if typeRadioValue == wx.NOT_FOUND:
return speechDictHandler.ENTRY_TYPE_ANYWHERE
return DictionaryEntryDialog.TYPE_LABELS_ORDERING[typeRadioValue]
def onOk(self,evt):
if not self.patternTextCtrl.GetValue():
# Translators: This is an error message to let the user know that the pattern field in the dictionary entry is not valid.
gui.messageBox(_("A pattern is required."), _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
self.patternTextCtrl.SetFocus()
return
try:
self.dictEntry=speechDictHandler.SpeechDictEntry(self.patternTextCtrl.GetValue(),self.replacementTextCtrl.GetValue(),self.commentTextCtrl.GetValue(),bool(self.caseSensitiveCheckBox.GetValue()),self.getType())
except Exception as e:
log.debugWarning("Could not add dictionary entry due to (regex error) : %s" % e)
# Translators: This is an error message to let the user know that the dictionary entry is not valid.
gui.messageBox(_("Regular Expression error: \"%s\".")%e, _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
return
evt.Skip()
def setType(self, type):
self.typeRadioBox.SetSelection(DictionaryEntryDialog.TYPE_LABELS_ORDERING.index(type))
class DictionaryDialog(SettingsDialog):
TYPE_LABELS = {t: l.replace("&", "") for t, l in DictionaryEntryDialog.TYPE_LABELS.iteritems()}
def __init__(self,parent,title,speechDict):
self.title = title
self.speechDict = speechDict
self.tempSpeechDict=speechDictHandler.SpeechDict()
self.tempSpeechDict.extend(self.speechDict)
globalVars.speechDictionaryProcessing=False
super(DictionaryDialog, self).__init__(parent)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the combo box of dictionary entries in speech dictionary dialog.
entriesLabelText=_("&Dictionary entries")
self.dictList=sHelper.addLabeledControl(entriesLabelText, wx.ListCtrl, style=wx.LC_REPORT|wx.LC_SINGLE_SEL,size=(550,350))
# Translators: The label for a column in dictionary entries list used to identify comments for the entry.
self.dictList.InsertColumn(0,_("Comment"),width=150)
# Translators: The label for a column in dictionary entries list used to identify pattern (original word or a pattern).
self.dictList.InsertColumn(1,_("Pattern"),width=150)
# Translators: The label for a column in dictionary entries list and in a list of symbols from symbol pronunciation dialog used to identify replacement for a pattern or a symbol
self.dictList.InsertColumn(2,_("Replacement"),width=150)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is case sensitive or not.
self.dictList.InsertColumn(3,_("case"),width=50)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is a regular expression, matches whole words, or matches anywhere.
self.dictList.InsertColumn(4,_("Type"),width=50)
self.offOn = (_("off"),_("on"))
for entry in self.tempSpeechDict:
self.dictList.Append((entry.comment,entry.pattern,entry.replacement,self.offOn[int(entry.caseSensitive)],DictionaryDialog.TYPE_LABELS[entry.type]))
self.editingIndex=-1
self.dictList.Bind(wx.EVT_CHAR, self.onListChar)
bHelper = guiHelper.ButtonHelper(orientation=wx.HORIZONTAL)
addButtonID=wx.NewId()
# Translators: The label for a button in speech dictionaries dialog to add new entries.
bHelper.addButton(self, addButtonID,_("&Add"),wx.DefaultPosition)
editButtonID=wx.NewId()
# Translators: The label for a button in speech dictionaries dialog to edit existing entries.
bHelper.addButton(self, editButtonID,_("&Edit"),wx.DefaultPosition)
removeButtonID=wx.NewId()
bHelper.addButton(self, removeButtonID,_("&Remove"),wx.DefaultPosition)
sHelper.addItem(bHelper)
self.Bind(wx.EVT_BUTTON,self.OnAddClick,id=addButtonID)
self.Bind(wx.EVT_BUTTON,self.OnEditClick,id=editButtonID)
self.Bind(wx.EVT_BUTTON,self.OnRemoveClick,id=removeButtonID)
def postInit(self):
self.dictList.SetFocus()
def onListChar(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
# The enter key should be propagated to the dialog and thus activate the default button,
# but this is broken (wx ticket #3725).
# Therefore, we must catch the enter key here.
# Activate the OK button.
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK))
else:
evt.Skip()
def onCancel(self,evt):
globalVars.speechDictionaryProcessing=True
super(DictionaryDialog, self).onCancel(evt)
def onOk(self,evt):
globalVars.speechDictionaryProcessing=True
if self.tempSpeechDict!=self.speechDict:
del self.speechDict[:]
self.speechDict.extend(self.tempSpeechDict)
self.speechDict.save()
super(DictionaryDialog, self).onOk(evt)
def OnAddClick(self,evt):
# Translators: This is the label for the add dictionary entry dialog.
entryDialog=DictionaryEntryDialog(self,title=_("Add Dictionary Entry"))
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict.append(entryDialog.dictEntry)
self.dictList.Append((entryDialog.commentTextCtrl.GetValue(),entryDialog.patternTextCtrl.GetValue(),entryDialog.replacementTextCtrl.GetValue(),self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())],DictionaryDialog.TYPE_LABELS[entryDialog.getType()]))
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.Select(index,on=0)
index=self.dictList.GetNextSelected(index)
addedIndex=self.dictList.GetItemCount()-1
self.dictList.Select(addedIndex)
self.dictList.Focus(addedIndex)
self.dictList.SetFocus()
entryDialog.Destroy()
def OnEditClick(self,evt):
if self.dictList.GetSelectedItemCount()!=1:
return
editIndex=self.dictList.GetFirstSelected()
if editIndex<0:
return
entryDialog=DictionaryEntryDialog(self)
entryDialog.patternTextCtrl.SetValue(self.tempSpeechDict[editIndex].pattern)
entryDialog.replacementTextCtrl.SetValue(self.tempSpeechDict[editIndex].replacement)
entryDialog.commentTextCtrl.SetValue(self.tempSpeechDict[editIndex].comment)
entryDialog.caseSensitiveCheckBox.SetValue(self.tempSpeechDict[editIndex].caseSensitive)
entryDialog.setType(self.tempSpeechDict[editIndex].type)
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict[editIndex]=entryDialog.dictEntry
self.dictList.SetStringItem(editIndex,0,entryDialog.commentTextCtrl.GetValue())
self.dictList.SetStringItem(editIndex,1,entryDialog.patternTextCtrl.GetValue())
self.dictList.SetStringItem(editIndex,2,entryDialog.replacementTextCtrl.GetValue())
self.dictList.SetStringItem(editIndex,3,self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())])
self.dictList.SetStringItem(editIndex,4,DictionaryDialog.TYPE_LABELS[entryDialog.getType()])
self.dictList.SetFocus()
entryDialog.Destroy()
def OnRemoveClick(self,evt):
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.DeleteItem(index)
del self.tempSpeechDict[index]
index=self.dictList.GetNextSelected(index)
self.dictList.SetFocus()
class BrailleSettingsPanel(SettingsPanel):
# Translators: This is the label for the braille panel
title = _("Braille")
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the braille display on the braille panel.
displayLabel = _("Braille &display")
displayGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=displayLabel), wx.HORIZONTAL))
settingsSizerHelper.addItem(displayGroup)
displayDesc = braille.handler.display.description
self.displayNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), value=displayDesc, style=wx.TE_READONLY)
# Translators: This is the label for the button used to change braille display,
# it appears in the context of a braille display group on the braille settings panel.
changeDisplayBtn = wx.Button(self, label=_("C&hange..."))
displayGroup.addItem(
guiHelper.associateElements(
self.displayNameCtrl,
changeDisplayBtn
)
)
changeDisplayBtn.Bind(wx.EVT_BUTTON,self.onChangeDisplay)
self.brailleSubPanel = BrailleSettingsSubPanel(self)
settingsSizerHelper.addItem(self.brailleSubPanel)
def onChangeDisplay(self, evt):
changeDisplay = BrailleDisplaySelectionDialog(self, multiInstanceAllowed=True)
ret = changeDisplay.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentDisplay(self):
displayDesc = braille.handler.display.description
self.displayNameCtrl.SetValue(displayDesc)
def onPanelActivated(self):
self.brailleSubPanel.onPanelActivated()
super(BrailleSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.brailleSubPanel.onPanelDeactivated()
super(BrailleSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.brailleSubPanel.onDiscard()
def onSave(self):
self.brailleSubPanel.onSave()
class BrailleDisplaySelectionDialog(SettingsDialog):
# Translators: This is the label for the braille display selection dialog.
title = _("Select Braille Display")
displayNames = []
possiblePorts = []
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for a setting in braille settings to choose a braille display.
displayLabelText = _("Braille &display:")
self.displayList = sHelper.addLabeledControl(displayLabelText, wx.Choice, choices=[])
self.Bind(wx.EVT_CHOICE, self.onDisplayNameChanged, self.displayList)
# Translators: The label for a setting in braille settings to choose the connection port (if the selected braille display supports port selection).
portsLabelText = _("&Port:")
self.portsList = sHelper.addLabeledControl(portsLabelText, wx.Choice, choices=[])
self.updateBrailleDisplayLists()
def postInit(self):
# Finally, ensure that focus is on the list of displays.
self.displayList.SetFocus()
def updateBrailleDisplayLists(self):
driverList = braille.getDisplayList()
self.displayNames = [driver[0] for driver in driverList]
displayChoices = [driver[1] for driver in driverList]
self.displayList.Clear()
self.displayList.AppendItems(displayChoices)
try:
selection = self.displayNames.index(braille.handler.display.name)
self.displayList.SetSelection(selection)
except:
pass
self.updatePossiblePorts()
def updatePossiblePorts(self):
displayName = self.displayNames[self.displayList.GetSelection()]
displayCls = braille._getDisplayDriver(displayName)
self.possiblePorts = []
try:
self.possiblePorts.extend(displayCls.getPossiblePorts().iteritems())
except NotImplementedError:
pass
if self.possiblePorts:
self.portsList.SetItems([p[1] for p in self.possiblePorts])
try:
selectedPort = config.conf["braille"][displayName].get("port")
portNames = [p[0] for p in self.possiblePorts]
selection = portNames.index(selectedPort)
except (KeyError, ValueError):
# Display name not in config or port not valid
selection = 0
self.portsList.SetSelection(selection)
# If no port selection is possible or only automatic selection is available, disable the port selection control
enable = len(self.possiblePorts) > 0 and not (len(self.possiblePorts) == 1 and self.possiblePorts[0][0] == "auto")
self.portsList.Enable(enable)
def onDisplayNameChanged(self, evt):
self.updatePossiblePorts()
def onOk(self, evt):
if not self.displayNames:
# The list of displays has not been populated yet, so we didn't change anything in this panel
return
display = self.displayNames[self.displayList.GetSelection()]
if display not in config.conf["braille"]:
config.conf["braille"][display] = {}
if self.possiblePorts:
port = self.possiblePorts[self.portsList.GetSelection()][0]
config.conf["braille"][display]["port"] = port
if not braille.handler.setDisplayByName(display):
gui.messageBox(_("Could not load the %s display.")%display, _("Braille Display Error"), wx.OK|wx.ICON_WARNING, self)
return
if self.IsModal():
# Hack: we need to update the display in our parent window before closing.
# Otherwise, NVDA will report the old display even though the new display is reflected visually.
self.Parent.updateCurrentDisplay()
super(BrailleDisplaySelectionDialog, self).onOk(evt)
class BrailleSettingsSubPanel(SettingsPanel):
def makeSettings(self, settingsSizer):
if gui._isDebug():
startTime = time.time()
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
tables = brailleTables.listTables()
# Translators: The label for a setting in braille settings to select the output table (the braille table used to read braille text on the braille display).
outputsLabelText = _("&Output table:")
outTables = [table for table in tables if table.output]
self.outTableNames = [table.fileName for table in outTables]
outTableChoices = [table.displayName for table in outTables]
self.outTableList = sHelper.addLabeledControl(outputsLabelText, wx.Choice, choices=outTableChoices)
try:
selection = self.outTableNames.index(config.conf["braille"]["translationTable"])
self.outTableList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading output tables completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to select the input table (the braille table used to type braille characters on a braille keyboard).
inputLabelText = _("&Input table:")
self.inTables = [table for table in tables if table.input]
inTableChoices = [table.displayName for table in self.inTables]
self.inTableList = sHelper.addLabeledControl(inputLabelText, wx.Choice, choices=inTableChoices)
try:
selection = self.inTables.index(brailleInput.handler.table)
self.inTableList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading input tables completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to expand the current word under cursor to computer braille.
expandAtCursorText = _("E&xpand to computer braille for the word at the cursor")
self.expandAtCursorCheckBox = sHelper.addItem(wx.CheckBox(self, wx.ID_ANY, label=expandAtCursorText))
self.expandAtCursorCheckBox.SetValue(config.conf["braille"]["expandAtCursor"])
# Translators: The label for a setting in braille settings to show the cursor.
showCursorLabelText = _("&Show cursor")
self.showCursorCheckBox = sHelper.addItem(wx.CheckBox(self, label=showCursorLabelText))
self.showCursorCheckBox.Bind(wx.EVT_CHECKBOX, self.onShowCursorChange)
self.showCursorCheckBox.SetValue(config.conf["braille"]["showCursor"])
# Translators: The label for a setting in braille settings to enable cursor blinking.
cursorBlinkLabelText = _("Blink cursor")
self.cursorBlinkCheckBox = sHelper.addItem(wx.CheckBox(self, label=cursorBlinkLabelText))
self.cursorBlinkCheckBox.Bind(wx.EVT_CHECKBOX, self.onBlinkCursorChange)
self.cursorBlinkCheckBox.SetValue(config.conf["braille"]["cursorBlink"])
if not self.showCursorCheckBox.GetValue():
self.cursorBlinkCheckBox.Disable()
# Translators: The label for a setting in braille settings to change cursor blink rate in milliseconds (1 second is 1000 milliseconds).
cursorBlinkRateLabelText = _("Cursor blink rate (ms)")
minBlinkRate = int(config.conf.getConfigValidationParameter(["braille", "cursorBlinkRate"], "min"))
maxBlinkRate = int(config.conf.getConfigValidationParameter(["braille", "cursorBlinkRate"], "max"))
self.cursorBlinkRateEdit = sHelper.addLabeledControl(cursorBlinkRateLabelText, nvdaControls.SelectOnFocusSpinCtrl,
min=minBlinkRate, max=maxBlinkRate, initial=config.conf["braille"]["cursorBlinkRate"])
if not self.showCursorCheckBox.GetValue() or not self.cursorBlinkCheckBox.GetValue() :
self.cursorBlinkRateEdit.Disable()
self.cursorShapes = [s[0] for s in braille.CURSOR_SHAPES]
cursorShapeChoices = [s[1] for s in braille.CURSOR_SHAPES]
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to focus.
cursorShapeFocusLabelText = _("Cursor shape for &focus:")
self.cursorShapeFocusList = sHelper.addLabeledControl(cursorShapeFocusLabelText, wx.Choice, choices=cursorShapeChoices)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeFocus"])
self.cursorShapeFocusList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeFocusList.Disable()
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to review.
cursorShapeReviewLabelText = _("Cursor shape for &review:")
self.cursorShapeReviewList = sHelper.addLabeledControl(cursorShapeReviewLabelText, wx.Choice, choices=cursorShapeChoices)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeReview"])
self.cursorShapeReviewList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeReviewList.Disable()
if gui._isDebug():
log.debug("Loading cursor settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to change how long a message stays on the braille display (in seconds).
messageTimeoutText = _("Message &timeout (sec)")
self.messageTimeoutEdit = sHelper.addLabeledControl(messageTimeoutText, nvdaControls.SelectOnFocusSpinCtrl,
min=int(config.conf.getConfigValidationParameter(["braille", "messageTimeout"], "min")),
max=int(config.conf.getConfigValidationParameter(["braille", "messageTimeout"], "max")),
initial=config.conf["braille"]["messageTimeout"])
# Translators: The label for a setting in braille settings to display a message on the braille display indefinitely.
noMessageTimeoutLabelText = _("Show &messages indefinitely")
self.noMessageTimeoutCheckBox = sHelper.addItem(wx.CheckBox(self, label=noMessageTimeoutLabelText))
self.noMessageTimeoutCheckBox.Bind(wx.EVT_CHECKBOX, self.onNoMessageTimeoutChange)
self.noMessageTimeoutCheckBox.SetValue(config.conf["braille"]["noMessageTimeout"])
if self.noMessageTimeoutCheckBox.GetValue():
self.messageTimeoutEdit.Disable()
if gui._isDebug():
log.debug("Loading timeout settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to set whether braille should be tethered to focus or review cursor.
tetherListText = _("Tether B&raille:")
# Translators: The value for a setting in the braille settings, to set whether braille should be tethered to focus or review cursor.
tetherChoices = [x[1] for x in braille.handler.tetherValues]
self.tetherList = sHelper.addLabeledControl(tetherListText, wx.Choice, choices=tetherChoices)
tetherChoice=braille.handler.TETHER_AUTO if config.conf["braille"]["autoTether"] else config.conf["braille"]["tetherTo"]
selection = (x for x,y in enumerate(braille.handler.tetherValues) if y[0]==tetherChoice).next()
try:
self.tetherList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading tether settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to read by paragraph (if it is checked, the commands to move the display by lines moves the display by paragraphs instead).
readByParagraphText = _("Read by ¶graph")
self.readByParagraphCheckBox = sHelper.addItem(wx.CheckBox(self, label=readByParagraphText))
self.readByParagraphCheckBox.Value = config.conf["braille"]["readByParagraph"]
# Translators: The label for a setting in braille settings to enable word wrap (try to avoid spliting words at the end of the braille display).
wordWrapText = _("Avoid splitting &words when possible")
self.wordWrapCheckBox = sHelper.addItem(wx.CheckBox(self, label=wordWrapText))
self.wordWrapCheckBox.Value = config.conf["braille"]["wordWrap"]
# Translators: The label for a setting in braille settings to select how the context for the focus object should be presented on a braille display.
focusContextPresentationLabelText = _("Focus context presentation:")
self.focusContextPresentationValues = [x[0] for x in braille.focusContextPresentations]
focusContextPresentationChoices = [x[1] for x in braille.focusContextPresentations]
self.focusContextPresentationList = sHelper.addLabeledControl(focusContextPresentationLabelText, wx.Choice, choices=focusContextPresentationChoices)
try:
index=self.focusContextPresentationValues.index(config.conf["braille"]["focusContextPresentation"])
except:
index=0
self.focusContextPresentationList.SetSelection(index)
if gui._isDebug():
log.debug("Finished making settings, now at %.2f seconds from start"%(time.time() - startTime))
def onSave(self):
config.conf["braille"]["translationTable"] = self.outTableNames[self.outTableList.GetSelection()]
brailleInput.handler.table = self.inTables[self.inTableList.GetSelection()]
config.conf["braille"]["expandAtCursor"] = self.expandAtCursorCheckBox.GetValue()
config.conf["braille"]["showCursor"] = self.showCursorCheckBox.GetValue()
config.conf["braille"]["cursorBlink"] = self.cursorBlinkCheckBox.GetValue()
config.conf["braille"]["cursorBlinkRate"] = self.cursorBlinkRateEdit.GetValue()
config.conf["braille"]["cursorShapeFocus"] = self.cursorShapes[self.cursorShapeFocusList.GetSelection()]
config.conf["braille"]["cursorShapeReview"] = self.cursorShapes[self.cursorShapeReviewList.GetSelection()]
config.conf["braille"]["noMessageTimeout"] = self.noMessageTimeoutCheckBox.GetValue()
config.conf["braille"]["messageTimeout"] = self.messageTimeoutEdit.GetValue()
tetherChoice = braille.handler.tetherValues[self.tetherList.GetSelection()][0]
if tetherChoice==braille.handler.TETHER_AUTO:
config.conf["braille"]["autoTether"] = True
config.conf["braille"]["tetherTo"] = braille.handler.TETHER_FOCUS
else:
config.conf["braille"]["autoTether"] = False
braille.handler.setTether(tetherChoice, auto=False)
config.conf["braille"]["readByParagraph"] = self.readByParagraphCheckBox.Value
config.conf["braille"]["wordWrap"] = self.wordWrapCheckBox.Value
config.conf["braille"]["focusContextPresentation"] = self.focusContextPresentationValues[self.focusContextPresentationList.GetSelection()]
def onShowCursorChange(self, evt):
self.cursorBlinkCheckBox.Enable(evt.IsChecked())
self.cursorBlinkRateEdit.Enable(evt.IsChecked() and self.cursorBlinkCheckBox.GetValue())
self.cursorShapeFocusList.Enable(evt.IsChecked())
self.cursorShapeReviewList.Enable(evt.IsChecked())
def onBlinkCursorChange(self, evt):
self.cursorBlinkRateEdit.Enable(evt.IsChecked())
def onNoMessageTimeoutChange(self, evt):
self.messageTimeoutEdit.Enable(not evt.IsChecked())
""" The Id of the category panel in the multi category settings dialog, this is set when the dialog is created
and returned to None when the dialog is destroyed. This can be used by an AppModule for NVDA to identify and announce
changes in name for the panel when categories are changed"""
NvdaSettingsCategoryPanelId = None
""" The name of the config profile currently being edited, if any.
This is set when the currently edited configuration profile is determined and returned to None when the dialog is destroyed.
This can be used by an AppModule for NVDA to identify and announce
changes in the name of the edited configuration profile when categories are changed"""
NvdaSettingsDialogActiveConfigProfile = None
class NVDASettingsDialog(MultiCategorySettingsDialog):
# Translators: This is the label for the NVDA settings dialog.
title = _("NVDA")
categoryClasses=[
GeneralSettingsPanel,
SpeechSettingsPanel,
BrailleSettingsPanel,
KeyboardSettingsPanel,
MouseSettingsPanel,
ReviewCursorPanel,
InputCompositionPanel,
ObjectPresentationPanel,
BrowseModePanel,
DocumentFormattingPanel,
]
if touchHandler.touchSupported():
categoryClasses.append(TouchInteractionPanel)
if winVersion.isUwpOcrAvailable():
categoryClasses.append(UwpOcrPanel)
def makeSettings(self, settingsSizer):
# Ensure that after the settings dialog is created the name is set correctly
super(NVDASettingsDialog, self).makeSettings(settingsSizer)
self._doOnCategoryChange()
def _doOnCategoryChange(self):
global NvdaSettingsDialogActiveConfigProfile
NvdaSettingsDialogActiveConfigProfile = config.conf.profiles[-1].name
if not NvdaSettingsDialogActiveConfigProfile or isinstance(self.currentCategory, GeneralSettingsPanel):
# Translators: The profile name for normal configuration
NvdaSettingsDialogActiveConfigProfile = _("normal configuration")
self.SetTitle(self._getDialogTitle())
def _getDialogTitle(self):
return u"{dialogTitle}: {panelTitle} ({configProfile})".format(
dialogTitle=self.title,
panelTitle=self.currentCategory.title,
configProfile=NvdaSettingsDialogActiveConfigProfile
)
def onCategoryChange(self,evt):
super(NVDASettingsDialog,self).onCategoryChange(evt)
if evt.Skipped:
return
self._doOnCategoryChange()
def Destroy(self):
global NvdaSettingsCategoryPanelId, NvdaSettingsDialogActiveConfigProfile
NvdaSettingsCategoryPanelId = None
NvdaSettingsDialogActiveConfigProfile = None
super(NVDASettingsDialog, self).Destroy()
class AddSymbolDialog(wx.Dialog):
def __init__(self, parent):
# Translators: This is the label for the add symbol dialog.
super(AddSymbolDialog,self).__init__(parent, title=_("Add Symbol"))
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is the label for the edit field in the add symbol dialog.
symbolText = _("Symbol:")
self.identifierTextCtrl = sHelper.addLabeledControl(symbolText, wx.TextCtrl)
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK | wx.CANCEL))
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.identifierTextCtrl.SetFocus()
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
class SpeechSymbolsDialog(SettingsDialog):
def __init__(self,parent):
try:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData(speech.getCurrentLanguage())
except LookupError:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData("en")
self.symbolProcessor = symbolProcessor
# Translators: This is the label for the symbol pronunciation dialog.
# %s is replaced by the language for which symbol pronunciation is being edited.
self.title = _("Symbol Pronunciation (%s)")%languageHandler.getLanguageDescription(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).__init__(parent)
def makeSettings(self, settingsSizer):
symbols = self.symbols = [copy.copy(symbol) for symbol in self.symbolProcessor.computedSymbols.itervalues()]
self.pendingRemovals = {}
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for symbols list in symbol pronunciation dialog.
symbolsText = _("&Symbols")
self.symbolsList = sHelper.addLabeledControl(symbolsText, nvdaControls.AutoWidthColumnListCtrl, autoSizeColumnIndex=0, style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
# Translators: The label for a column in symbols list used to identify a symbol.
self.symbolsList.InsertColumn(0, _("Symbol"))
self.symbolsList.InsertColumn(1, _("Replacement"))
# Translators: The label for a column in symbols list used to identify a symbol's speech level (either none, some, most, all or character).
self.symbolsList.InsertColumn(2, _("Level"))
# Translators: The label for a column in symbols list which specifies when the actual symbol will be sent to the synthesizer (preserved).
# See the "Punctuation/Symbol Pronunciation" section of the User Guide for details.
self.symbolsList.InsertColumn(3, _("Preserve"))
for symbol in symbols:
item = self.symbolsList.Append((symbol.displayName,))
self.updateListItem(item, symbol)
self.symbolsList.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onListItemFocused)
self.symbolsList.Bind(wx.EVT_CHAR, self.onListChar)
# Translators: The label for the group of controls in symbol pronunciation dialog to change the pronunciation of a symbol.
changeSymbolText = _("Change selected symbol")
changeSymbolHelper = sHelper.addItem(guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=changeSymbolText), wx.VERTICAL)))
# Used to ensure that event handlers call Skip(). Not calling skip can cause focus problems for controls. More
# generally the advice on the wx documentation is: "In general, it is recommended to skip all non-command events
# to allow the default handling to take place. The command events are, however, normally not skipped as usually
# a single command such as a button click or menu item selection must only be processed by one handler."
def skipEventAndCall(handler):
def wrapWithEventSkip(event):
if event:
event.Skip()
return handler()
return wrapWithEventSkip
# Translators: The label for the edit field in symbol pronunciation dialog to change the replacement text of a symbol.
replacementText = _("&Replacement")
self.replacementEdit = changeSymbolHelper.addLabeledControl(replacementText, wx.TextCtrl)
self.replacementEdit.Bind(wx.EVT_TEXT, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change the speech level of a symbol.
levelText = _("&Level")
symbolLevelLabels = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
levelChoices = [symbolLevelLabels[level] for level in characterProcessing.SPEECH_SYMBOL_LEVELS]
self.levelList = changeSymbolHelper.addLabeledControl(levelText, wx.Choice, choices=levelChoices)
self.levelList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change when a symbol is sent to the synthesizer.
preserveText = _("&Send actual symbol to synthesizer")
symbolPreserveLabels = characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS
preserveChoices = [symbolPreserveLabels[mode] for mode in characterProcessing.SPEECH_SYMBOL_PRESERVES]
self.preserveList = changeSymbolHelper.addLabeledControl(preserveText, wx.Choice, choices=preserveChoices)
self.preserveList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
# disable the "change symbol" controls until a valid item is selected.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
bHelper = sHelper.addItem(guiHelper.ButtonHelper(orientation=wx.HORIZONTAL))
# Translators: The label for a button in the Symbol Pronunciation dialog to add a new symbol.
addButton = bHelper.addButton(self, label=_("&Add"))
# Translators: The label for a button in the Symbol Pronunciation dialog to remove a symbol.
self.removeButton = bHelper.addButton(self, label=_("Re&move"))
self.removeButton.Disable()
addButton.Bind(wx.EVT_BUTTON, self.OnAddClick)
self.removeButton.Bind(wx.EVT_BUTTON, self.OnRemoveClick)
self.editingItem = None
def postInit(self):
self.symbolsList.SetFocus()
def updateListItem(self, item, symbol):
self.symbolsList.SetStringItem(item, 1, symbol.replacement)
self.symbolsList.SetStringItem(item, 2, characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS[symbol.level])
self.symbolsList.SetStringItem(item, 3, characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS[symbol.preserve])
def onSymbolEdited(self):
if self.editingItem is not None:
# Update the symbol the user was just editing.
item = self.editingItem
symbol = self.symbols[item]
symbol.replacement = self.replacementEdit.Value
symbol.level = characterProcessing.SPEECH_SYMBOL_LEVELS[self.levelList.Selection]
symbol.preserve = characterProcessing.SPEECH_SYMBOL_PRESERVES[self.preserveList.Selection]
self.updateListItem(item, symbol)
def onListItemFocused(self, evt):
# Update the editing controls to reflect the newly selected symbol.
item = evt.GetIndex()
symbol = self.symbols[item]
self.editingItem = item
# ChangeValue and Selection property used because they do not cause EVNT_CHANGED to be fired.
self.replacementEdit.ChangeValue(symbol.replacement)
self.levelList.Selection = characterProcessing.SPEECH_SYMBOL_LEVELS.index(symbol.level)
self.preserveList.Selection = characterProcessing.SPEECH_SYMBOL_PRESERVES.index(symbol.preserve)
self.removeButton.Enabled = not self.symbolProcessor.isBuiltin(symbol.identifier)
self.replacementEdit.Enable()
self.levelList.Enable()
self.preserveList.Enable()
evt.Skip()
def onListChar(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
# The enter key should be propagated to the dialog and thus activate the default button,
# but this is broken (wx ticket #3725).
# Therefore, we must catch the enter key here.
# Activate the OK button.
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK))
else:
evt.Skip()
def OnAddClick(self, evt):
with AddSymbolDialog(self) as entryDialog:
if entryDialog.ShowModal() != wx.ID_OK:
return
identifier = entryDialog.identifierTextCtrl.GetValue()
if not identifier:
return
for index, symbol in enumerate(self.symbols):
if identifier == symbol.identifier:
# Translators: An error reported in the Symbol Pronunciation dialog when adding a symbol that is already present.
gui.messageBox(_('Symbol "%s" is already present.') % identifier,
_("Error"), wx.OK | wx.ICON_ERROR)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
self.symbolsList.SetFocus()
return
addedSymbol = characterProcessing.SpeechSymbol(identifier)
try:
del self.pendingRemovals[identifier]
except KeyError:
pass
addedSymbol.displayName = identifier
addedSymbol.replacement = ""
addedSymbol.level = characterProcessing.SYMLVL_ALL
addedSymbol.preserve = characterProcessing.SYMPRES_NEVER
self.symbols.append(addedSymbol)
item = self.symbolsList.Append((addedSymbol.displayName,))
self.updateListItem(item, addedSymbol)
self.symbolsList.Select(item)
self.symbolsList.Focus(item)
self.symbolsList.SetFocus()
def OnRemoveClick(self, evt):
index = self.symbolsList.GetFirstSelected()
symbol = self.symbols[index]
self.pendingRemovals[symbol.identifier] = symbol
# Deleting from self.symbolsList focuses the next item before deleting,
# so it must be done *before* we delete from self.symbols.
self.symbolsList.DeleteItem(index)
del self.symbols[index]
index = min(index, self.symbolsList.ItemCount - 1)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index, so set editingItem.
self.editingItem = index
self.symbolsList.SetFocus()
def onOk(self, evt):
self.onSymbolEdited()
self.editingItem = None
for symbol in self.pendingRemovals.itervalues():
self.symbolProcessor.deleteSymbol(symbol)
for symbol in self.symbols:
if not symbol.replacement:
continue
self.symbolProcessor.updateSymbol(symbol)
try:
self.symbolProcessor.userSymbols.save()
except IOError as e:
log.error("Error saving user symbols info: %s" % e)
characterProcessing._localeSpeechSymbolProcessors.invalidateLocaleData(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).onOk(evt)
class InputGesturesDialog(SettingsDialog):
# Translators: The title of the Input Gestures dialog where the user can remap input gestures for commands.
title = _("Input Gestures")
def makeSettings(self, settingsSizer):
filterSizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of a text field to search for gestures in the Input Gestures dialog.
filterLabel = wx.StaticText(self, label=pgettext("inputGestures", "&Filter by:"))
filter = wx.TextCtrl(self)
filterSizer.Add(filterLabel, flag=wx.ALIGN_CENTER_VERTICAL)
filterSizer.AddSpacer(guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL)
filterSizer.Add(filter, proportion=1)
settingsSizer.Add(filterSizer, flag=wx.EXPAND)
settingsSizer.AddSpacer(5)
filter.Bind(wx.EVT_TEXT, self.onFilterChange, filter)
tree = self.tree = wx.TreeCtrl(self, size=wx.Size(600, 400), style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE )
self.treeRoot = tree.AddRoot("root")
tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.onTreeSelect)
settingsSizer.Add(tree, proportion=1, flag=wx.EXPAND)
self.gestures = inputCore.manager.getAllGestureMappings(obj=gui.mainFrame.prevFocus, ancestors=gui.mainFrame.prevFocusAncestors)
self.populateTree()
settingsSizer.AddSpacer(guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_VERTICAL)
bHelper = guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button to add a gesture in the Input Gestures dialog.
self.addButton = bHelper.addButton(self, label=_("&Add"))
self.addButton.Bind(wx.EVT_BUTTON, self.onAdd)
self.addButton.Disable()
# Translators: The label of a button to remove a gesture in the Input Gestures dialog.
self.removeButton = bHelper.addButton(self, label=_("&Remove"))
self.removeButton.Bind(wx.EVT_BUTTON, self.onRemove)
self.removeButton.Disable()
self.pendingAdds = set()
self.pendingRemoves = set()
settingsSizer.Add(bHelper.sizer)
def postInit(self):
self.tree.SetFocus()
def populateTree(self, filter=''):
if filter:
#This regexp uses a positive lookahead (?=...) for every word in the filter, which just makes sure the word is present in the string to be tested without matching position or order.
# #5060: Escape the filter text to prevent unexpected matches and regexp errors.
# Because we're escaping, words must then be split on "\ ".
filter = re.escape(filter)
filterReg = re.compile(r'(?=.*?' + r')(?=.*?'.join(filter.split('\ ')) + r')', re.U|re.IGNORECASE)
for category in sorted(self.gestures):
treeCat = self.tree.AppendItem(self.treeRoot, category)
commands = self.gestures[category]
for command in sorted(commands):
if filter and not filterReg.match(command):
continue
treeCom = self.tree.AppendItem(treeCat, command)
commandInfo = commands[command]
self.tree.SetItemPyData(treeCom, commandInfo)
for gesture in commandInfo.gestures:
treeGes = self.tree.AppendItem(treeCom, self._formatGesture(gesture))
self.tree.SetItemPyData(treeGes, gesture)
if not self.tree.ItemHasChildren(treeCat):
self.tree.Delete(treeCat)
elif filter:
self.tree.Expand(treeCat)
def onFilterChange(self, evt):
filter=evt.GetEventObject().GetValue()
self.tree.DeleteChildren(self.treeRoot)
self.populateTree(filter)
def _formatGesture(self, identifier):
try:
source, main = inputCore.getDisplayTextForGestureIdentifier(identifier)
# Translators: Describes a gesture in the Input Gestures dialog.
# {main} is replaced with the main part of the gesture; e.g. alt+tab.
# {source} is replaced with the gesture's source; e.g. laptop keyboard.
return _("{main} ({source})").format(main=main, source=source)
except LookupError:
return identifier
def onTreeSelect(self, evt):
item = self.tree.Selection
data = self.tree.GetItemPyData(item)
isCommand = isinstance(data, inputCore.AllGesturesScriptInfo)
isGesture = isinstance(data, basestring)
self.addButton.Enabled = isCommand or isGesture
self.removeButton.Enabled = isGesture
def onAdd(self, evt):
if inputCore.manager._captureFunc:
return
treeCom = self.tree.Selection
scriptInfo = self.tree.GetItemPyData(treeCom)
if not isinstance(scriptInfo, inputCore.AllGesturesScriptInfo):
treeCom = self.tree.GetItemParent(treeCom)
scriptInfo = self.tree.GetItemPyData(treeCom)
# Translators: The prompt to enter a gesture in the Input Gestures dialog.
treeGes = self.tree.AppendItem(treeCom, _("Enter input gesture:"))
self.tree.SelectItem(treeGes)
self.tree.SetFocus()
def addGestureCaptor(gesture):
if gesture.isModifier:
return False
inputCore.manager._captureFunc = None
wx.CallAfter(self._addCaptured, treeGes, scriptInfo, gesture)
return False
inputCore.manager._captureFunc = addGestureCaptor
def _addCaptured(self, treeGes, scriptInfo, gesture):
gids = gesture.normalizedIdentifiers
if len(gids) > 1:
# Multiple choices. Present them in a pop-up menu.
menu = wx.Menu()
for gid in gids:
disp = self._formatGesture(gid)
item = menu.Append(wx.ID_ANY, disp)
self.Bind(wx.EVT_MENU,
lambda evt, gid=gid, disp=disp: self._addChoice(treeGes, scriptInfo, gid, disp),
item)
self.PopupMenu(menu)
if not self.tree.GetItemPyData(treeGes):
# No item was selected, so use the first.
self._addChoice(treeGes, scriptInfo, gids[0],
self._formatGesture(gids[0]))
menu.Destroy()
else:
self._addChoice(treeGes, scriptInfo, gids[0],
self._formatGesture(gids[0]))
def _addChoice(self, treeGes, scriptInfo, gid, disp):
entry = (gid, scriptInfo.moduleName, scriptInfo.className, scriptInfo.scriptName)
try:
# If this was just removed, just undo it.
self.pendingRemoves.remove(entry)
except KeyError:
self.pendingAdds.add(entry)
self.tree.SetItemText(treeGes, disp)
self.tree.SetItemPyData(treeGes, gid)
scriptInfo.gestures.append(gid)
self.onTreeSelect(None)
def onRemove(self, evt):
treeGes = self.tree.Selection
gesture = self.tree.GetItemPyData(treeGes)
treeCom = self.tree.GetItemParent(treeGes)
scriptInfo = self.tree.GetItemPyData(treeCom)
entry = (gesture, scriptInfo.moduleName, scriptInfo.className, scriptInfo.scriptName)
try:
# If this was just added, just undo it.
self.pendingAdds.remove(entry)
except KeyError:
self.pendingRemoves.add(entry)
self.tree.Delete(treeGes)
scriptInfo.gestures.remove(gesture)
self.tree.SetFocus()
def onOk(self, evt):
for gesture, module, className, scriptName in self.pendingRemoves:
try:
inputCore.manager.userGestureMap.remove(gesture, module, className, scriptName)
except ValueError:
# The user wants to unbind a gesture they didn't define.
inputCore.manager.userGestureMap.add(gesture, module, className, None)
for gesture, module, className, scriptName in self.pendingAdds:
try:
# The user might have unbound this gesture,
# so remove this override first.
inputCore.manager.userGestureMap.remove(gesture, module, className, None)
except ValueError:
pass
inputCore.manager.userGestureMap.add(gesture, module, className, scriptName)
if self.pendingAdds or self.pendingRemoves:
# Only save if there is something to save.
try:
inputCore.manager.userGestureMap.save()
except:
log.debugWarning("", exc_info=True)
# Translators: An error displayed when saving user defined input gestures fails.
gui.messageBox(_("Error saving user defined gestures - probably read only file system."),
_("Error"), wx.OK | wx.ICON_ERROR)
super(InputGesturesDialog, self).onOk(evt)
| 1 | 22,177 | I actually think it makes sense to reposition this checkbox after the notifyForPendingUpdateCheckBox. The current order of check boxes is a bit arbitrary now. | nvaccess-nvda | py |
@@ -309,6 +309,14 @@ var opts struct {
Targets []core.BuildLabel `position-arg-name:"targets" description:"Additional targets to load rules from"`
} `positional-args:"true"`
} `command:"rules" description:"Prints built-in rules to stdout as JSON"`
+ Changes struct {
+ Before string `short:"b" long:"before" description:"Revision to check out for the state before"`
+ After string `short:"a" long:"after" required:"true" description:"Revision to check out for the state after"`
+ CheckoutCommand string `long:"checkout_command" default:"git checkout %s" description:"Command to run to check out the before/after revisions."`
+ Args struct {
+ Files cli.StdinStrings `positional-arg-name:"files" description:"Files to consider changed"`
+ } `positional-args:"true"`
+ } `command:"changes" description:"Calculates the difference between two different states of the build graph"`
} `command:"query" description:"Queries information about the build graph"`
}
| 1 | package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"path"
"runtime"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/jessevdk/go-flags"
"gopkg.in/op/go-logging.v1"
"build"
"cache"
"clean"
"cli"
"core"
"export"
"follow"
"fs"
"gc"
"hashes"
"help"
"metrics"
"output"
"parse"
"query"
"run"
"sync"
"test"
"tool"
"update"
"utils"
"watch"
)
var log = logging.MustGetLogger("plz")
var config *core.Configuration
var opts struct {
Usage string `usage:"Please is a high-performance multi-language build system.\n\nIt uses BUILD files to describe what to build and how to build it.\nSee https://please.build for more information about how it works and what Please can do for you."`
BuildFlags struct {
Config string `short:"c" long:"config" description:"Build config to use. Defaults to opt."`
Arch cli.Arch `short:"a" long:"arch" description:"Architecture to compile for."`
RepoRoot cli.Filepath `short:"r" long:"repo_root" description:"Root of repository to build."`
KeepGoing bool `short:"k" long:"keep_going" description:"Don't stop on first failed target."`
NumThreads int `short:"n" long:"num_threads" description:"Number of concurrent build operations. Default is number of CPUs + 2."`
Include []string `short:"i" long:"include" description:"Label of targets to include in automatic detection."`
Exclude []string `short:"e" long:"exclude" description:"Label of targets to exclude from automatic detection."`
Option ConfigOverrides `short:"o" long:"override" env:"PLZ_OVERRIDES" env-delim:";" description:"Options to override from .plzconfig (e.g. -o please.selfupdate:false)"`
Profile string `long:"profile" env:"PLZ_CONFIG_PROFILE" description:"Configuration profile to load; e.g. --profile=dev will load .plzconfig.dev if it exists."`
} `group:"Options controlling what to build & how to build it"`
OutputFlags struct {
Verbosity int `short:"v" long:"verbosity" description:"Verbosity of output (higher number = more output, default 1 -> warnings and errors only)" default:"1"`
LogFile cli.Filepath `long:"log_file" description:"File to echo full logging output to" default:"plz-out/log/build.log"`
LogFileLevel int `long:"log_file_level" description:"Log level for file output" default:"4"`
InteractiveOutput bool `long:"interactive_output" description:"Show interactive output ina terminal"`
PlainOutput bool `short:"p" long:"plain_output" description:"Don't show interactive output."`
Colour bool `long:"colour" description:"Forces coloured output from logging & other shell output."`
NoColour bool `long:"nocolour" description:"Forces colourless output from logging & other shell output."`
TraceFile cli.Filepath `long:"trace_file" description:"File to write Chrome tracing output into"`
ShowAllOutput bool `long:"show_all_output" description:"Show all output live from all commands. Implies --plain_output."`
CompletionScript bool `long:"completion_script" description:"Prints the bash / zsh completion script to stdout"`
Version bool `long:"version" description:"Print the version of the tool"`
} `group:"Options controlling output & logging"`
FeatureFlags struct {
NoUpdate bool `long:"noupdate" description:"Disable Please attempting to auto-update itself."`
NoCache bool `long:"nocache" description:"Disable caches (NB. not incrementality)"`
NoHashVerification bool `long:"nohash_verification" description:"Hash verification errors are nonfatal."`
NoLock bool `long:"nolock" description:"Don't attempt to lock the repo exclusively. Use with care."`
KeepWorkdirs bool `long:"keep_workdirs" description:"Don't clean directories in plz-out/tmp after successfully building targets."`
} `group:"Options that enable / disable certain features"`
Profile string `long:"profile_file" hidden:"true" description:"Write profiling output to this file"`
MemProfile string `long:"mem_profile_file" hidden:"true" description:"Write a memory profile to this file"`
ProfilePort int `long:"profile_port" hidden:"true" description:"Serve profiling info on this port."`
ParsePackageOnly bool `description:"Parses a single package only. All that's necessary for some commands." no-flag:"true"`
Complete string `long:"complete" hidden:"true" env:"PLZ_COMPLETE" description:"Provide completion options for this build target."`
VisibilityParse bool `description:"Parse all targets that the original targets are visible to. Used for some query steps." no-flag:"true"`
Build struct {
Prepare bool `long:"prepare" description:"Prepare build directory for these targets but don't build them."`
Shell bool `long:"shell" description:"Like --prepare, but opens a shell in the build directory with the appropriate environment variables."`
ShowStatus bool `long:"show_status" hidden:"true" description:"Show status of each target in output after build"`
Args struct { // Inner nesting is necessary to make positional-args work :(
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"`
} `positional-args:"true" required:"true"`
} `command:"build" description:"Builds one or more targets"`
Rebuild struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" required:"true" description:"Targets to rebuild"`
} `positional-args:"true" required:"true"`
} `command:"rebuild" description:"Forces a rebuild of one or more targets"`
Hash struct {
Detailed bool `long:"detailed" description:"Produces a detailed breakdown of the hash"`
Update bool `short:"u" long:"update" description:"Rewrites the hashes in the BUILD file to the new values"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"`
} `positional-args:"true" required:"true"`
} `command:"hash" description:"Calculates hash for one or more targets"`
Test struct {
FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"`
NumRuns int `long:"num_runs" short:"n" description:"Number of times to run each test target."`
TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."`
ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."`
Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."`
Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."`
// Slightly awkward since we can specify a single test with arguments or multiple test targets.
Args struct {
Target core.BuildLabel `positional-arg-name:"target" description:"Target to test"`
Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors"`
} `positional-args:"true"`
} `command:"test" description:"Builds and tests one or more targets"`
Cover struct {
FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"`
NoCoverageReport bool `long:"nocoverage_report" description:"Suppress the per-file coverage report displayed in the shell"`
LineCoverageReport bool `short:"l" long:"line_coverage_report" description:" Show a line-by-line coverage report for all affected files."`
NumRuns int `short:"n" long:"num_runs" description:"Number of times to run each test target."`
IncludeAllFiles bool `short:"a" long:"include_all_files" description:"Include all dependent files in coverage (default is just those from relevant packages)"`
IncludeFile []string `long:"include_file" description:"Filenames to filter coverage display to"`
TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."`
CoverageResultsFile cli.Filepath `long:"coverage_results_file" default:"plz-out/log/coverage.json" description:"File to write combined coverage results to."`
ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."`
Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."`
Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."`
Args struct {
Target core.BuildLabel `positional-arg-name:"target" description:"Target to test" group:"one test"`
Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors" group:"one test"`
} `positional-args:"true"`
} `command:"cover" description:"Builds and tests one or more targets, and calculates coverage."`
Run struct {
Env bool `long:"env" description:"Overrides environment variables (e.g. PATH) in the new process."`
Parallel struct {
NumTasks int `short:"n" long:"num_tasks" default:"10" description:"Maximum number of subtasks to run in parallel"`
Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."`
PositionalArgs struct {
Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"`
} `positional-args:"true" required:"true"`
Args []string `short:"a" long:"arg" description:"Arguments to pass to the called processes."`
} `command:"parallel" description:"Runs a sequence of targets in parallel"`
Sequential struct {
Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."`
PositionalArgs struct {
Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"`
} `positional-args:"true" required:"true"`
Args []string `short:"a" long:"arg" description:"Arguments to pass to the called processes."`
} `command:"sequential" description:"Runs a sequence of targets sequentially."`
Args struct {
Target core.BuildLabel `positional-arg-name:"target" required:"true" description:"Target to run"`
Args []string `positional-arg-name:"arguments" description:"Arguments to pass to target when running (to pass flags to the target, put -- before them)"`
} `positional-args:"true"`
} `command:"run" subcommands-optional:"true" description:"Builds and runs a single target"`
Clean struct {
NoBackground bool `long:"nobackground" short:"f" description:"Don't fork & detach until clean is finished."`
Remote bool `long:"remote" description:"Clean entire remote cache when no targets are given (default is local only)"`
Args struct { // Inner nesting is necessary to make positional-args work :(
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to clean (default is to clean everything)"`
} `positional-args:"true"`
} `command:"clean" description:"Cleans build artifacts" subcommands-optional:"true"`
Watch struct {
Run bool `short:"r" long:"run" description:"Runs the specified targets when they change (default is to build or test as appropriate)."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" required:"true" description:"Targets to watch the sources of for changes"`
} `positional-args:"true" required:"true"`
} `command:"watch" description:"Watches sources of targets for changes and rebuilds them"`
Update struct {
Force bool `long:"force" description:"Forces a re-download of the new version."`
NoVerify bool `long:"noverify" description:"Skips signature verification of downloaded version"`
Latest bool `long:"latest" description:"Update to latest available version (overrides config)."`
Version cli.Version `long:"version" description:"Updates to a particular version (overrides config)."`
} `command:"update" description:"Checks for an update and updates if needed."`
Op struct {
} `command:"op" description:"Re-runs previous command."`
Init struct {
Dir cli.Filepath `long:"dir" description:"Directory to create config in" default:"."`
BazelCompatibility bool `long:"bazel_compat" description:"Initialises config for Bazel compatibility mode."`
} `command:"init" description:"Initialises a .plzconfig file in the current directory"`
Gc struct {
Conservative bool `short:"c" long:"conservative" description:"Runs a more conservative / safer GC."`
TargetsOnly bool `short:"t" long:"targets_only" description:"Only print the targets to delete"`
SrcsOnly bool `short:"s" long:"srcs_only" description:"Only print the source files to delete"`
NoPrompt bool `short:"y" long:"no_prompt" description:"Remove targets without prompting"`
DryRun bool `short:"n" long:"dry_run" description:"Don't remove any targets or files, just print what would be done"`
Git bool `short:"g" long:"git" description:"Use 'git rm' to remove unused files instead of just 'rm'."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to limit gc to."`
} `positional-args:"true"`
} `command:"gc" description:"Analyzes the repo to determine unneeded targets."`
Export struct {
Output string `short:"o" long:"output" required:"true" description:"Directory to export into"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to export."`
} `positional-args:"true"`
Outputs struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to export."`
} `positional-args:"true"`
} `command:"outputs" description:"Exports outputs of a set of targets"`
} `command:"export" subcommands-optional:"true" description:"Exports a set of targets and files from the repo."`
Follow struct {
Retries int `long:"retries" description:"Number of times to retry the connection"`
Delay cli.Duration `long:"delay" default:"1s" description:"Delay between timeouts"`
Args struct {
URL cli.URL `positional-arg-name:"URL" required:"true" description:"URL of remote server to connect to, e.g. 10.23.0.5:7777"`
} `positional-args:"true"`
} `command:"follow" description:"Connects to a remote Please instance to stream build events from."`
Help struct {
Args struct {
Topic help.Topic `positional-arg-name:"topic" description:"Topic to display help on"`
} `positional-args:"true"`
} `command:"help" alias:"halp" description:"Displays help about various parts of plz or its build rules"`
Tool struct {
Args struct {
Tool tool.Tool `positional-arg-name:"tool" description:"Tool to invoke (jarcat, lint, etc)"`
Args []string `positional-arg-name:"arguments" description:"Arguments to pass to the tool"`
} `positional-args:"true"`
} `command:"tool" hidden:"true" description:"Invoke one of Please's sub-tools"`
Query struct {
Deps struct {
Unique bool `long:"unique" short:"u" description:"Only output each dependency once"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"deps" description:"Queries the dependencies of a target."`
ReverseDeps struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"reverseDeps" alias:"revdeps" description:"Queries all the reverse dependencies of a target."`
SomePath struct {
Args struct {
Target1 core.BuildLabel `positional-arg-name:"target1" description:"First build target" required:"true"`
Target2 core.BuildLabel `positional-arg-name:"target2" description:"Second build target" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"somepath" description:"Queries for a path between two targets"`
AllTargets struct {
Hidden bool `long:"hidden" description:"Show hidden targets as well"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query"`
} `positional-args:"true"`
} `command:"alltargets" description:"Lists all targets in the graph"`
Print struct {
Fields []string `short:"f" long:"field" description:"Individual fields to print of the target"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to print" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"print" description:"Prints a representation of a single target"`
Completions struct {
Cmd string `long:"cmd" description:"Command to complete for" default:"build"`
Args struct {
Fragments cli.StdinStrings `positional-arg-name:"fragment" description:"Initial fragment to attempt to complete"`
} `positional-args:"true"`
} `command:"completions" subcommands-optional:"true" description:"Prints possible completions for a string."`
AffectedTargets struct {
Tests bool `long:"tests" description:"Shows only affected tests, no other targets."`
Intransitive bool `long:"intransitive" description:"Shows only immediately affected targets, not transitive dependencies."`
Args struct {
Files cli.StdinStrings `positional-arg-name:"files" required:"true" description:"Files to query affected tests for"`
} `positional-args:"true"`
} `command:"affectedtargets" description:"Prints any targets affected by a set of files."`
Input struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display inputs for" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"input" alias:"inputs" description:"Prints all transitive inputs of a target."`
Output struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display outputs for" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"output" alias:"outputs" description:"Prints all outputs of a target."`
Graph struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to render graph for"`
} `positional-args:"true"`
} `command:"graph" description:"Prints a JSON representation of the build graph."`
WhatOutputs struct {
EchoFiles bool `long:"echo_files" description:"Echo the file for which the printed output is responsible."`
Args struct {
Files cli.StdinStrings `positional-arg-name:"files" required:"true" description:"Files to query targets responsible for"`
} `positional-args:"true"`
} `command:"whatoutputs" description:"Prints out target(s) responsible for outputting provided file(s)"`
Rules struct {
Args struct {
Targets []core.BuildLabel `position-arg-name:"targets" description:"Additional targets to load rules from"`
} `positional-args:"true"`
} `command:"rules" description:"Prints built-in rules to stdout as JSON"`
} `command:"query" description:"Queries information about the build graph"`
}
// Definitions of what we do for each command.
// Functions are called after args are parsed and return true for success.
var buildFunctions = map[string]func() bool{
"build": func() bool {
success, _ := runBuild(opts.Build.Args.Targets, true, false)
return success
},
"rebuild": func() bool {
// It would be more pure to require --nocache for this, but in basically any context that
// you use 'plz rebuild', you don't want the cache coming in and mucking things up.
// 'plz clean' followed by 'plz build' would still work in those cases, anyway.
opts.FeatureFlags.NoCache = true
success, _ := runBuild(opts.Rebuild.Args.Targets, true, false)
return success
},
"hash": func() bool {
success, state := runBuild(opts.Hash.Args.Targets, true, false)
if opts.Hash.Detailed {
for _, target := range state.ExpandOriginalTargets() {
build.PrintHashes(state, state.Graph.TargetOrDie(target))
}
}
if opts.Hash.Update {
hashes.RewriteHashes(state, state.ExpandOriginalTargets())
}
return success
},
"test": func() bool {
targets := testTargets(opts.Test.Args.Target, opts.Test.Args.Args, opts.Test.Failed, opts.Test.TestResultsFile)
os.RemoveAll(string(opts.Test.TestResultsFile))
success, state := runBuild(targets, true, true)
test.WriteResultsToFileOrDie(state.Graph, string(opts.Test.TestResultsFile))
return success || opts.Test.FailingTestsOk
},
"cover": func() bool {
if opts.BuildFlags.Config != "" {
log.Warning("Build config overridden; coverage may not be available for some languages")
} else {
opts.BuildFlags.Config = "cover"
}
targets := testTargets(opts.Cover.Args.Target, opts.Cover.Args.Args, opts.Cover.Failed, opts.Cover.TestResultsFile)
os.RemoveAll(string(opts.Cover.TestResultsFile))
os.RemoveAll(string(opts.Cover.CoverageResultsFile))
success, state := runBuild(targets, true, true)
test.WriteResultsToFileOrDie(state.Graph, string(opts.Cover.TestResultsFile))
test.AddOriginalTargetsToCoverage(state, opts.Cover.IncludeAllFiles)
test.RemoveFilesFromCoverage(state.Coverage, state.Config.Cover.ExcludeExtension)
test.WriteCoverageToFileOrDie(state.Coverage, string(opts.Cover.CoverageResultsFile))
if opts.Cover.LineCoverageReport {
output.PrintLineCoverageReport(state, opts.Cover.IncludeFile)
} else if !opts.Cover.NoCoverageReport {
output.PrintCoverage(state, opts.Cover.IncludeFile)
}
return success || opts.Cover.FailingTestsOk
},
"run": func() bool {
if success, state := runBuild([]core.BuildLabel{opts.Run.Args.Target}, true, false); success {
run.Run(state, opts.Run.Args.Target, opts.Run.Args.Args, opts.Run.Env)
}
return false // We should never return from run.Run so if we make it here something's wrong.
},
"parallel": func() bool {
if success, state := runBuild(opts.Run.Parallel.PositionalArgs.Targets, true, false); success {
os.Exit(run.Parallel(state, state.ExpandOriginalTargets(), opts.Run.Parallel.Args, opts.Run.Parallel.NumTasks, opts.Run.Parallel.Quiet, opts.Run.Env))
}
return false
},
"sequential": func() bool {
if success, state := runBuild(opts.Run.Sequential.PositionalArgs.Targets, true, false); success {
os.Exit(run.Sequential(state, state.ExpandOriginalTargets(), opts.Run.Sequential.Args, opts.Run.Sequential.Quiet, opts.Run.Env))
}
return false
},
"clean": func() bool {
config.Cache.DirClean = false
if len(opts.Clean.Args.Targets) == 0 {
if len(opts.BuildFlags.Include) == 0 && len(opts.BuildFlags.Exclude) == 0 {
// Clean everything, doesn't require parsing at all.
if !opts.Clean.Remote {
// Don't construct the remote caches if they didn't pass --remote.
config.Cache.RPCURL = ""
config.Cache.HTTPURL = ""
}
clean.Clean(config, newCache(config), !opts.Clean.NoBackground)
return true
}
opts.Clean.Args.Targets = core.WholeGraph
}
if success, state := runBuild(opts.Clean.Args.Targets, false, false); success {
clean.Targets(state, state.ExpandOriginalTargets(), !opts.FeatureFlags.NoCache)
return true
}
return false
},
"watch": func() bool {
success, state := runBuild(opts.Watch.Args.Targets, false, false)
if success {
watch.Watch(state, state.ExpandOriginalTargets(), opts.Watch.Run)
}
return success
},
"update": func() bool {
fmt.Printf("Up to date (version %s).\n", core.PleaseVersion)
return true // We'd have died already if something was wrong.
},
"op": func() bool {
cmd := core.ReadLastOperationOrDie()
log.Notice("OP PLZ: %s", strings.Join(cmd, " "))
// Annoyingly we don't seem to have any access to execvp() which would be rather useful here...
executable, err := os.Executable()
if err == nil {
err = syscall.Exec(executable, append([]string{executable}, cmd...), os.Environ())
}
log.Fatalf("SORRY OP: %s", err) // On success Exec never returns.
return false
},
"gc": func() bool {
success, state := runBuild(core.WholeGraph, false, false)
if success {
state.OriginalTargets = state.Config.Gc.Keep
gc.GarbageCollect(state, opts.Gc.Args.Targets, state.ExpandOriginalTargets(), state.Config.Gc.Keep, state.Config.Gc.KeepLabel,
opts.Gc.Conservative, opts.Gc.TargetsOnly, opts.Gc.SrcsOnly, opts.Gc.NoPrompt, opts.Gc.DryRun, opts.Gc.Git)
}
return success
},
"export": func() bool {
success, state := runBuild(opts.Export.Args.Targets, false, false)
if success {
export.ToDir(state, opts.Export.Output, state.ExpandOriginalTargets())
}
return success
},
"follow": func() bool {
// This is only temporary, ConnectClient will alter it to match the server.
state := core.NewBuildState(1, nil, opts.OutputFlags.Verbosity, config)
return follow.ConnectClient(state, opts.Follow.Args.URL.String(), opts.Follow.Retries, time.Duration(opts.Follow.Delay))
},
"outputs": func() bool {
success, state := runBuild(opts.Export.Outputs.Args.Targets, true, false)
if success {
export.Outputs(state, opts.Export.Output, state.ExpandOriginalTargets())
}
return success
},
"help": func() bool {
return help.Help(string(opts.Help.Args.Topic))
},
"tool": func() bool {
tool.Run(config, opts.Tool.Args.Tool, opts.Tool.Args.Args)
return false // If the function returns (which it shouldn't), something went wrong.
},
"deps": func() bool {
return runQuery(true, opts.Query.Deps.Args.Targets, func(state *core.BuildState) {
query.Deps(state, state.ExpandOriginalTargets(), opts.Query.Deps.Unique)
})
},
"reverseDeps": func() bool {
opts.VisibilityParse = true
return runQuery(false, opts.Query.ReverseDeps.Args.Targets, func(state *core.BuildState) {
query.ReverseDeps(state.Graph, state.ExpandOriginalTargets())
})
},
"somepath": func() bool {
return runQuery(true,
[]core.BuildLabel{opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2},
func(state *core.BuildState) {
query.SomePath(state.Graph, opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2)
},
)
},
"alltargets": func() bool {
return runQuery(true, opts.Query.AllTargets.Args.Targets, func(state *core.BuildState) {
query.AllTargets(state.Graph, state.ExpandOriginalTargets(), opts.Query.AllTargets.Hidden)
})
},
"print": func() bool {
return runQuery(false, opts.Query.Print.Args.Targets, func(state *core.BuildState) {
query.Print(state.Graph, state.ExpandOriginalTargets(), opts.Query.Print.Fields)
})
},
"affectedtargets": func() bool {
files := opts.Query.AffectedTargets.Args.Files
targets := core.WholeGraph
if opts.Query.AffectedTargets.Intransitive {
state := core.NewBuildState(1, nil, 1, config)
targets = core.FindOwningPackages(state, files)
}
return runQuery(true, targets, func(state *core.BuildState) {
query.AffectedTargets(state.Graph, files.Get(), opts.BuildFlags.Include, opts.BuildFlags.Exclude, opts.Query.AffectedTargets.Tests, !opts.Query.AffectedTargets.Intransitive)
})
},
"input": func() bool {
return runQuery(true, opts.Query.Input.Args.Targets, func(state *core.BuildState) {
query.TargetInputs(state.Graph, state.ExpandOriginalTargets())
})
},
"output": func() bool {
return runQuery(true, opts.Query.Output.Args.Targets, func(state *core.BuildState) {
query.TargetOutputs(state.Graph, state.ExpandOriginalTargets())
})
},
"completions": func() bool {
// Somewhat fiddly because the inputs are not necessarily well-formed at this point.
opts.ParsePackageOnly = true
fragments := opts.Query.Completions.Args.Fragments.Get()
if opts.Query.Completions.Cmd == "help" {
// Special-case completing help topics rather than build targets.
if len(fragments) == 0 {
help.Topics("")
} else {
help.Topics(fragments[0])
}
return true
}
if len(fragments) == 0 || len(fragments) == 1 && strings.Trim(fragments[0], "/ ") == "" {
os.Exit(0) // Don't do anything for empty completion, it's normally too slow.
}
labels, parseLabels, hidden := query.CompletionLabels(config, fragments, core.RepoRoot)
if success, state := Please(parseLabels, config, false, false, false); success {
binary := opts.Query.Completions.Cmd == "run"
test := opts.Query.Completions.Cmd == "test" || opts.Query.Completions.Cmd == "cover"
query.Completions(state.Graph, labels, binary, test, hidden)
return true
}
return false
},
"graph": func() bool {
return runQuery(true, opts.Query.Graph.Args.Targets, func(state *core.BuildState) {
if len(opts.Query.Graph.Args.Targets) == 0 {
state.OriginalTargets = opts.Query.Graph.Args.Targets // It special-cases doing the full graph.
}
query.Graph(state, state.ExpandOriginalTargets())
})
},
"whatoutputs": func() bool {
return runQuery(true, core.WholeGraph, func(state *core.BuildState) {
query.WhatOutputs(state.Graph, opts.Query.WhatOutputs.Args.Files.Get(), opts.Query.WhatOutputs.EchoFiles)
})
},
"rules": func() bool {
targets := opts.Query.Rules.Args.Targets
success, state := Please(opts.Query.Rules.Args.Targets, config, true, true, false)
if !success {
return false
}
targets = state.ExpandOriginalTargets()
parse.PrintRuleArgs(state, targets)
return true
},
}
// ConfigOverrides are used to implement completion on the -o flag.
type ConfigOverrides map[string]string
// Complete implements the flags.Completer interface.
func (overrides ConfigOverrides) Complete(match string) []flags.Completion {
return core.DefaultConfiguration().Completions(match)
}
// Used above as a convenience wrapper for query functions.
func runQuery(needFullParse bool, labels []core.BuildLabel, onSuccess func(state *core.BuildState)) bool {
opts.OutputFlags.PlainOutput = true // No point displaying this for one of these queries.
config.Cache.DirClean = false
if !needFullParse {
opts.ParsePackageOnly = true
}
if len(labels) == 0 {
labels = core.WholeGraph
}
if success, state := runBuild(labels, false, false); success {
onSuccess(state)
return true
}
return false
}
func please(tid int, state *core.BuildState, parsePackageOnly bool, include, exclude []string) {
for {
label, dependor, t := state.NextTask()
switch t {
case core.Stop, core.Kill:
return
case core.Parse, core.SubincludeParse:
t := t
label := label
dependor := dependor
state.ParsePool <- func() {
parse.Parse(tid, state, label, dependor, parsePackageOnly, include, exclude, t == core.SubincludeParse)
if opts.VisibilityParse && state.IsOriginalTarget(label) {
parseForVisibleTargets(state, label)
}
state.TaskDone()
}
case core.Build, core.SubincludeBuild:
build.Build(tid, state, label)
state.TaskDone()
case core.Test:
test.Test(tid, state, label)
state.TaskDone()
}
}
}
// parseForVisibleTargets adds parse tasks for any targets that the given label is visible to.
func parseForVisibleTargets(state *core.BuildState, label core.BuildLabel) {
if target := state.Graph.Target(label); target != nil {
for _, vis := range target.Visibility {
findOriginalTask(state, vis, false)
}
}
}
// prettyOutputs determines from input flags whether we should show 'pretty' output (ie. interactive).
func prettyOutput(interactiveOutput bool, plainOutput bool, verbosity int) bool {
if interactiveOutput && plainOutput {
log.Fatal("Can't pass both --interactive_output and --plain_output")
}
return interactiveOutput || (!plainOutput && cli.StdErrIsATerminal && verbosity < 4)
}
// newCache constructs a new cache based on the current config / flags.
func newCache(config *core.Configuration) core.Cache {
if opts.FeatureFlags.NoCache {
return nil
}
return cache.NewCache(config)
}
// Please starts & runs the main build process through to its completion.
func Please(targets []core.BuildLabel, config *core.Configuration, prettyOutput, shouldBuild, shouldTest bool) (bool, *core.BuildState) {
if opts.BuildFlags.NumThreads > 0 {
config.Please.NumThreads = opts.BuildFlags.NumThreads
} else if config.Please.NumThreads <= 0 {
config.Please.NumThreads = runtime.NumCPU() + 2
}
debugTests := opts.Test.Debug || opts.Cover.Debug
if opts.BuildFlags.Config != "" {
config.Build.Config = opts.BuildFlags.Config
} else if debugTests {
config.Build.Config = "dbg"
}
c := newCache(config)
state := core.NewBuildState(config.Please.NumThreads, c, opts.OutputFlags.Verbosity, config)
state.VerifyHashes = !opts.FeatureFlags.NoHashVerification
state.NumTestRuns = opts.Test.NumRuns + opts.Cover.NumRuns // Only one of these can be passed.
state.TestArgs = append(opts.Test.Args.Args, opts.Cover.Args.Args...) // Similarly here.
state.NeedCoverage = !opts.Cover.Args.Target.IsEmpty()
state.NeedBuild = shouldBuild
state.NeedTests = shouldTest
state.NeedHashesOnly = len(opts.Hash.Args.Targets) > 0
state.PrepareOnly = opts.Build.Prepare || opts.Build.Shell
state.PrepareShell = opts.Build.Shell
state.CleanWorkdirs = !opts.FeatureFlags.KeepWorkdirs
state.ForceRebuild = len(opts.Rebuild.Args.Targets) > 0
state.ShowTestOutput = opts.Test.ShowOutput || opts.Cover.ShowOutput
state.DebugTests = debugTests
state.ShowAllOutput = opts.OutputFlags.ShowAllOutput
state.SetIncludeAndExclude(opts.BuildFlags.Include, opts.BuildFlags.Exclude)
parse.InitParser(state)
if config.Events.Port != 0 && shouldBuild {
shutdown := follow.InitialiseServer(state, config.Events.Port)
defer shutdown()
}
if config.Events.Port != 0 || config.Display.SystemStats {
go follow.UpdateResources(state)
}
metrics.InitFromConfig(config)
// Acquire the lock before we start building
if (shouldBuild || shouldTest) && !opts.FeatureFlags.NoLock {
core.AcquireRepoLock()
defer core.ReleaseRepoLock()
}
if state.DebugTests && len(targets) != 1 {
log.Fatalf("-d/--debug flag can only be used with a single test target")
}
// Start looking for the initial targets to kick the build off
go findOriginalTasks(state, targets)
// Start up all the build workers
var wg sync.WaitGroup
wg.Add(config.Please.NumThreads)
for i := 0; i < config.Please.NumThreads; i++ {
go func(tid int) {
please(tid, state, opts.ParsePackageOnly, opts.BuildFlags.Include, opts.BuildFlags.Exclude)
wg.Done()
}(i)
}
// Wait until they've all exited, which they'll do once they have no tasks left.
go func() {
wg.Wait()
close(state.Results) // This will signal MonitorState (below) to stop.
}()
// Draw stuff to the screen while there are still results coming through.
shouldRun := !opts.Run.Args.Target.IsEmpty()
success := output.MonitorState(state, config.Please.NumThreads, !prettyOutput, opts.BuildFlags.KeepGoing, shouldBuild, shouldTest, shouldRun, opts.Build.ShowStatus, string(opts.OutputFlags.TraceFile))
metrics.Stop()
build.StopWorkers()
if c != nil {
c.Shutdown()
}
return success, state
}
// findOriginalTasks finds the original parse tasks for the original set of targets.
func findOriginalTasks(state *core.BuildState, targets []core.BuildLabel) {
if state.Config.Bazel.Compatibility && fs.FileExists("WORKSPACE") {
// We have to parse the WORKSPACE file before anything else to understand subrepos.
// This is a bit crap really since it inhibits parallelism for the first step.
parse.Parse(0, state, core.NewBuildLabel("workspace", "all"), core.OriginalTarget, false, state.Include, state.Exclude, false)
}
if opts.BuildFlags.Arch.Arch != "" {
// Set up a new subrepo for this architecture.
state.Graph.AddSubrepo(core.SubrepoForArch(state, opts.BuildFlags.Arch))
}
for _, target := range targets {
if target == core.BuildLabelStdin {
for label := range cli.ReadStdin() {
findOriginalTask(state, core.ParseBuildLabels([]string{label})[0], true)
}
} else {
findOriginalTask(state, target, true)
}
}
state.TaskDone() // initial target adding counts as one.
}
func findOriginalTask(state *core.BuildState, target core.BuildLabel, addToList bool) {
if opts.BuildFlags.Arch.Arch != "" {
target.PackageName = path.Join(opts.BuildFlags.Arch.String(), target.PackageName)
}
if target.IsAllSubpackages() {
for pkg := range utils.FindAllSubpackages(state.Config, target.PackageName, "") {
state.AddOriginalTarget(core.NewBuildLabel(pkg, "all"), addToList)
}
} else {
state.AddOriginalTarget(target, addToList)
}
}
// testTargets handles test targets which can be given in two formats; a list of targets or a single
// target with a list of trailing arguments.
// Alternatively they can be completely omitted in which case we test everything under the working dir.
// One can also pass a 'failed' flag which runs the failed tests from last time.
func testTargets(target core.BuildLabel, args []string, failed bool, resultsFile cli.Filepath) []core.BuildLabel {
if failed {
targets, args := test.LoadPreviousFailures(string(resultsFile))
// Have to reset these - it doesn't matter which gets which.
opts.Test.Args.Args = args
opts.Cover.Args.Args = nil
return targets
} else if target.Name == "" {
return core.InitialPackage()
} else if len(args) > 0 && core.LooksLikeABuildLabel(args[0]) {
opts.Cover.Args.Args = []string{}
opts.Test.Args.Args = []string{}
return append(core.ParseBuildLabels(args), target)
}
return []core.BuildLabel{target}
}
// readConfig sets various things up and reads the initial configuration.
func readConfig(forceUpdate bool) *core.Configuration {
if opts.FeatureFlags.NoHashVerification {
log.Warning("You've disabled hash verification; this is intended to help temporarily while modifying build targets. You shouldn't use this regularly.")
}
config, err := core.ReadConfigFiles([]string{
core.MachineConfigFileName,
core.ExpandHomePath(core.UserConfigFileName),
path.Join(core.RepoRoot, core.ConfigFileName),
path.Join(core.RepoRoot, core.ArchConfigFileName),
path.Join(core.RepoRoot, core.LocalConfigFileName),
}, opts.BuildFlags.Profile)
if err != nil {
log.Fatalf("Error reading config file: %s", err)
} else if err := config.ApplyOverrides(opts.BuildFlags.Option); err != nil {
log.Fatalf("Can't override requested config setting: %s", err)
}
// Now apply any flags that override this
if opts.Update.Latest {
config.Please.Version.Unset()
} else if opts.Update.Version.IsSet {
config.Please.Version = opts.Update.Version
}
update.CheckAndUpdate(config, !opts.FeatureFlags.NoUpdate, forceUpdate, opts.Update.Force, !opts.Update.NoVerify)
return config
}
// Runs the actual build
// Which phases get run are controlled by shouldBuild and shouldTest.
func runBuild(targets []core.BuildLabel, shouldBuild, shouldTest bool) (bool, *core.BuildState) {
if len(targets) == 0 {
targets = core.InitialPackage()
}
pretty := prettyOutput(opts.OutputFlags.InteractiveOutput, opts.OutputFlags.PlainOutput, opts.OutputFlags.Verbosity)
return Please(targets, config, pretty, shouldBuild, shouldTest)
}
// readConfigAndSetRoot reads the .plzconfig files and moves to the repo root.
func readConfigAndSetRoot(forceUpdate bool) *core.Configuration {
if opts.BuildFlags.RepoRoot == "" {
log.Debug("Found repo root at %s", core.MustFindRepoRoot())
} else {
core.RepoRoot = string(opts.BuildFlags.RepoRoot)
}
// Please always runs from the repo root, so move there now.
if err := os.Chdir(core.RepoRoot); err != nil {
log.Fatalf("%s", err)
}
// Reset this now we're at the repo root.
if opts.OutputFlags.LogFile != "" {
if !path.IsAbs(string(opts.OutputFlags.LogFile)) {
opts.OutputFlags.LogFile = cli.Filepath(path.Join(core.RepoRoot, string(opts.OutputFlags.LogFile)))
}
cli.InitFileLogging(string(opts.OutputFlags.LogFile), opts.OutputFlags.LogFileLevel)
}
return readConfig(forceUpdate)
}
// handleCompletions handles shell completion. Typically it just prints to stdout but
// may do a little more if we think we need to handle aliases.
func handleCompletions(parser *flags.Parser, items []flags.Completion) {
if len(items) > 0 {
cli.PrintCompletions(items)
} else {
cli.InitLogging(0) // Ensure this is quiet
opts.FeatureFlags.NoUpdate = true // Ensure we don't try to update
config := readConfigAndSetRoot(false)
if len(config.Aliases) > 0 {
for k, v := range config.Aliases {
parser.AddCommand(k, v, v, &struct{}{})
}
// Run again without this registered as a completion handler
parser.CompletionHandler = nil
parser.ParseArgs(os.Args[1:])
}
}
// Regardless of what happened, always exit with 0 at this point.
os.Exit(0)
}
func main() {
parser, extraArgs, flagsErr := cli.ParseFlags("Please", &opts, os.Args, handleCompletions)
// Note that we must leave flagsErr for later, because it may be affected by aliases.
if opts.OutputFlags.Version {
fmt.Printf("Please version %s\n", core.PleaseVersion)
os.Exit(0) // Ignore other flags if --version was passed.
}
if opts.OutputFlags.Colour {
output.SetColouredOutput(true)
} else if opts.OutputFlags.NoColour {
output.SetColouredOutput(false)
}
if opts.OutputFlags.ShowAllOutput {
opts.OutputFlags.PlainOutput = true
}
// Init logging, but don't do file output until we've chdir'd.
cli.InitLogging(opts.OutputFlags.Verbosity)
command := cli.ActiveCommand(parser.Command)
if opts.Complete != "" {
// Completion via PLZ_COMPLETE env var sidesteps other commands
opts.Query.Completions.Cmd = command
opts.Query.Completions.Args.Fragments = []string{opts.Complete}
command = "completions"
} else if command == "init" {
if flagsErr != nil { // This error otherwise doesn't get checked until later.
cli.ParseFlagsFromArgsOrDie("Please", core.PleaseVersion.String(), &opts, os.Args)
}
// If we're running plz init then we obviously don't expect to read a config file.
utils.InitConfig(string(opts.Init.Dir), opts.Init.BazelCompatibility)
os.Exit(0)
} else if command == "help" || command == "follow" {
config = core.DefaultConfiguration()
if !buildFunctions[command]() {
os.Exit(1)
}
os.Exit(0)
} else if opts.OutputFlags.CompletionScript {
utils.PrintCompletionScript()
os.Exit(0)
}
// Read the config now
config = readConfigAndSetRoot(command == "update")
// Set this in case anything wants to use it soon
core.NewBuildState(config.Please.NumThreads, nil, opts.OutputFlags.Verbosity, config)
// Now we've read the config file, we may need to re-run the parser; the aliases in the config
// can affect how we parse otherwise illegal flag combinations.
if flagsErr != nil || len(extraArgs) > 0 {
for idx, arg := range os.Args[1:] {
// Please should not touch anything that comes after `--`
if arg == "--" {
break
}
for k, v := range config.Aliases {
if arg == k {
// We could insert every token in v into os.Args at this point and then we could have
// aliases defined in terms of other aliases but that seems rather like overkill so just
// stick the replacement in wholesale instead.
os.Args[idx+1] = v
}
}
}
argv := strings.Join(os.Args[1:], " ")
command = cli.ParseFlagsFromArgsOrDie("Please", core.PleaseVersion.String(), &opts, strings.Fields(os.Args[0]+" "+argv))
}
if opts.ProfilePort != 0 {
go func() {
log.Warning("%s", http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", opts.ProfilePort), nil))
}()
}
if opts.Profile != "" {
f, err := os.Create(opts.Profile)
if err != nil {
log.Fatalf("Failed to open profile file: %s", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatalf("could not start profiler: %s", err)
}
defer pprof.StopCPUProfile()
}
if opts.MemProfile != "" {
f, err := os.Create(opts.MemProfile)
if err != nil {
log.Fatalf("Failed to open memory profile file: %s", err)
}
defer f.Close()
defer pprof.WriteHeapProfile(f)
}
if !buildFunctions[command]() {
os.Exit(7) // Something distinctive, is sometimes useful to identify this externally.
}
}
| 1 | 8,159 | Why do we have both `before` and `after`? Naively I would expect this to work as follows: `plz query changed` with no arguments compares the current working directory state to the last commit (i.e. HEAD, i.e. a noop when directly on a git commit). `plz query changed --since [reflike]` compare the current working directory state to the given reflike. So for CI I would expect to run something like `plz query changed --since @{upstream} (or origin/master I guess)`. And on merge I'd either run everything or just `plz query changed --since HEAD^` (given our workflow) - I don't think I'd ever want to do anything else. Either way I think before and after are slightly confusing terms here. | thought-machine-please | go |
@@ -381,6 +381,17 @@ class FDst(_Rex):
return f.server_conn.address and self.re.search(r)
+class FDstIP(_Rex):
+ code = "ip"
+ help = "Match destination ip address"
+ is_binary = False
+
+ def __call__(self, f):
+ if not f.server_conn or not f.server_conn.ip_address:
+ return False
+ return f.server_conn.ip_address and self.re.search(f.server_conn.ip_address[0])
+
+
class FReplay(_Action):
code = "replay"
help = "Match replayed flows" | 1 | """
The following operators are understood:
~q Request
~s Response
Headers:
Patterns are matched against "name: value" strings. Field names are
all-lowercase.
~a Asset content-type in response. Asset content types are:
text/javascript
application/x-javascript
application/javascript
text/css
image/*
application/x-shockwave-flash
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response
~b rex Expression in the body of either request or response
~bq rex Expression in the body of request
~bs rex Expression in the body of response
~t rex Shortcut for content-type header.
~d rex Request domain
~m rex Method
~u rex URL
~c CODE Response code.
rex Equivalent to ~u rex
"""
import functools
import re
import sys
from typing import Callable, ClassVar, Optional, Sequence, Type
import pyparsing as pp
from mitmproxy import flow, http, tcp
def only(*types):
def decorator(fn):
@functools.wraps(fn)
def filter_types(self, flow):
if isinstance(flow, types):
return fn(self, flow)
return False
return filter_types
return decorator
class _Token:
def dump(self, indent=0, fp=sys.stdout):
print("{spacing}{name}{expr}".format(
spacing="\t" * indent,
name=self.__class__.__name__,
expr=getattr(self, "expr", "")
), file=fp)
class _Action(_Token):
code: ClassVar[str]
help: ClassVar[str]
@classmethod
def make(klass, s, loc, toks):
return klass(*toks[1:])
class FErr(_Action):
code = "e"
help = "Match error"
def __call__(self, f):
return True if f.error else False
class FMarked(_Action):
code = "marked"
help = "Match marked flows"
def __call__(self, f):
return bool(f.marked)
class FHTTP(_Action):
code = "http"
help = "Match HTTP flows"
@only(http.HTTPFlow)
def __call__(self, f):
return True
class FWebSocket(_Action):
code = "websocket"
help = "Match WebSocket flows"
@only(http.HTTPFlow)
def __call__(self, f: http.HTTPFlow):
return f.websocket is not None
class FTCP(_Action):
code = "tcp"
help = "Match TCP flows"
@only(tcp.TCPFlow)
def __call__(self, f):
return True
class FReq(_Action):
code = "q"
help = "Match request with no response"
@only(http.HTTPFlow)
def __call__(self, f):
if not f.response:
return True
class FResp(_Action):
code = "s"
help = "Match response"
@only(http.HTTPFlow)
def __call__(self, f):
return bool(f.response)
class _Rex(_Action):
flags = 0
is_binary = True
def __init__(self, expr):
self.expr = expr
if self.is_binary:
expr = expr.encode()
try:
self.re = re.compile(expr, self.flags)
except Exception:
raise ValueError("Cannot compile expression.")
def _check_content_type(rex, message):
return any(
name.lower() == b"content-type" and
rex.search(value)
for name, value in message.headers.fields
)
class FAsset(_Action):
code = "a"
help = "Match asset in response: CSS, JavaScript, images."
ASSET_TYPES = [re.compile(x) for x in [
b"text/javascript",
b"application/x-javascript",
b"application/javascript",
b"text/css",
b"image/.*"
]]
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
for i in self.ASSET_TYPES:
if _check_content_type(i, f.response):
return True
return False
class FContentType(_Rex):
code = "t"
help = "Content-type header"
@only(http.HTTPFlow)
def __call__(self, f):
if _check_content_type(self.re, f.request):
return True
elif f.response and _check_content_type(self.re, f.response):
return True
return False
class FContentTypeRequest(_Rex):
code = "tq"
help = "Request Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
return _check_content_type(self.re, f.request)
class FContentTypeResponse(_Rex):
code = "ts"
help = "Response Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
return _check_content_type(self.re, f.response)
return False
class FHead(_Rex):
code = "h"
help = "Header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
if f.response and self.re.search(bytes(f.response.headers)):
return True
return False
class FHeadRequest(_Rex):
code = "hq"
help = "Request header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
class FHeadResponse(_Rex):
code = "hs"
help = "Response header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and self.re.search(bytes(f.response.headers)):
return True
class FBod(_Rex):
code = "b"
help = "Body"
flags = re.DOTALL
@only(http.HTTPFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
if f.websocket:
for msg in f.websocket.messages:
if self.re.search(msg.content):
return True
elif isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if self.re.search(msg.content):
return True
return False
class FBodRequest(_Rex):
code = "bq"
help = "Request body"
flags = re.DOTALL
@only(http.HTTPFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
if f.websocket:
for msg in f.websocket.messages:
if msg.from_client and self.re.search(msg.content):
return True
elif isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if msg.from_client and self.re.search(msg.content):
return True
class FBodResponse(_Rex):
code = "bs"
help = "Response body"
flags = re.DOTALL
@only(http.HTTPFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
if f.websocket:
for msg in f.websocket.messages:
if not msg.from_client and self.re.search(msg.content):
return True
elif isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if not msg.from_client and self.re.search(msg.content):
return True
class FMethod(_Rex):
code = "m"
help = "Method"
flags = re.IGNORECASE
@only(http.HTTPFlow)
def __call__(self, f):
return bool(self.re.search(f.request.data.method))
class FDomain(_Rex):
code = "d"
help = "Domain"
flags = re.IGNORECASE
is_binary = False
@only(http.HTTPFlow)
def __call__(self, f):
return bool(
self.re.search(f.request.host) or
self.re.search(f.request.pretty_host)
)
class FUrl(_Rex):
code = "u"
help = "URL"
is_binary = False
# FUrl is special, because it can be "naked".
@classmethod
def make(klass, s, loc, toks):
if len(toks) > 1:
toks = toks[1:]
return klass(*toks)
@only(http.HTTPFlow)
def __call__(self, f):
if not f or not f.request:
return False
return self.re.search(f.request.pretty_url)
class FSrc(_Rex):
code = "src"
help = "Match source address"
is_binary = False
def __call__(self, f):
if not f.client_conn or not f.client_conn.peername:
return False
r = "{}:{}".format(f.client_conn.peername[0], f.client_conn.peername[1])
return f.client_conn.peername and self.re.search(r)
class FDst(_Rex):
code = "dst"
help = "Match destination address"
is_binary = False
def __call__(self, f):
if not f.server_conn or not f.server_conn.address:
return False
r = "{}:{}".format(f.server_conn.address[0], f.server_conn.address[1])
return f.server_conn.address and self.re.search(r)
class FReplay(_Action):
code = "replay"
help = "Match replayed flows"
def __call__(self, f):
return f.is_replay is not None
class FReplayClient(_Action):
code = "replayq"
help = "Match replayed client request"
def __call__(self, f):
return f.is_replay == 'request'
class FReplayServer(_Action):
code = "replays"
help = "Match replayed server response"
def __call__(self, f):
return f.is_replay == 'response'
class FMeta(_Rex):
code = "meta"
help = "Flow metadata"
flags = re.MULTILINE
is_binary = False
def __call__(self, f):
m = "\n".join([f"{key}: {value}" for key, value in f.metadata.items()])
return self.re.search(m)
class FMarker(_Rex):
code = "marker"
help = "Match marked flows with specified marker"
is_binary = False
def __call__(self, f):
return self.re.search(f.marked)
class FComment(_Rex):
code = "comment"
help = "Flow comment"
flags = re.MULTILINE
is_binary = False
def __call__(self, f):
return self.re.search(f.comment)
class _Int(_Action):
def __init__(self, num):
self.num = int(num)
class FCode(_Int):
code = "c"
help = "HTTP response code"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and f.response.status_code == self.num:
return True
class FAnd(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return all(i(f) for i in self.lst)
class FOr(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return any(i(f) for i in self.lst)
class FNot(_Token):
def __init__(self, itm):
self.itm = itm[0]
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
self.itm.dump(indent + 1, fp)
def __call__(self, f):
return not self.itm(f)
filter_unary: Sequence[Type[_Action]] = [
FAsset,
FErr,
FHTTP,
FMarked,
FReplay,
FReplayClient,
FReplayServer,
FReq,
FResp,
FTCP,
FWebSocket,
]
filter_rex: Sequence[Type[_Rex]] = [
FBod,
FBodRequest,
FBodResponse,
FContentType,
FContentTypeRequest,
FContentTypeResponse,
FDomain,
FDst,
FHead,
FHeadRequest,
FHeadResponse,
FMethod,
FSrc,
FUrl,
FMeta,
FMarker,
FComment,
]
filter_int = [
FCode
]
def _make():
# Order is important - multi-char expressions need to come before narrow
# ones.
parts = []
for cls in filter_unary:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd()
f.setParseAction(cls.make)
parts.append(f)
# This is a bit of a hack to simulate Word(pyparsing_unicode.printables),
# which has a horrible performance with len(pyparsing.pyparsing_unicode.printables) == 1114060
unicode_words = pp.CharsNotIn("()~'\"" + pp.ParserElement.DEFAULT_WHITE_CHARS)
unicode_words.skipWhitespace = True
regex = (
unicode_words
| pp.QuotedString('"', escChar='\\')
| pp.QuotedString("'", escChar='\\')
)
for cls in filter_rex:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + regex.copy()
f.setParseAction(cls.make)
parts.append(f)
for cls in filter_int:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + pp.Word(pp.nums)
f.setParseAction(cls.make)
parts.append(f)
# A naked rex is a URL rex:
f = regex.copy()
f.setParseAction(FUrl.make)
parts.append(f)
atom = pp.MatchFirst(parts)
expr = pp.infixNotation(
atom,
[(pp.Literal("!").suppress(),
1,
pp.opAssoc.RIGHT,
lambda x: FNot(*x)),
(pp.Literal("&").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FAnd(*x)),
(pp.Literal("|").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FOr(*x)),
])
expr = pp.OneOrMore(expr)
return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)
bnf = _make()
TFilter = Callable[[flow.Flow], bool]
def parse(s: str) -> Optional[TFilter]:
try:
flt = bnf.parseString(s, parseAll=True)[0]
flt.pattern = s
return flt
except pp.ParseException:
return None
except ValueError:
return None
def match(flt, flow):
"""
Matches a flow against a compiled filter expression.
Returns True if matched, False if not.
If flt is a string, it will be compiled as a filter expression.
If the expression is invalid, ValueError is raised.
"""
if isinstance(flt, str):
flt = parse(flt)
if not flt:
raise ValueError("Invalid filter expression.")
if flt:
return flt(flow)
return True
help = []
for a in filter_unary:
help.append(
(f"~{a.code}", a.help)
)
for b in filter_rex:
help.append(
(f"~{b.code} regex", b.help)
)
for c in filter_int:
help.append(
(f"~{c.code} int", c.help)
)
help.sort()
help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
| 1 | 15,743 | Is there a reason why we can't use `~dst`? It feels like that could be good enough.I would like to avoid extending the filter syntax unless there's an urgent need. :) | mitmproxy-mitmproxy | py |
@@ -60,7 +60,7 @@ namespace Nethermind.Core
public long GasUsed { get; set; }
public long GasLimit { get; set; }
public UInt256 Timestamp { get; set; }
- public DateTime TimestampDate => DateTimeOffset.FromUnixTimeSeconds((long) Timestamp).DateTime;
+ public DateTime TimestampDate => DateTimeOffset.FromUnixTimeSeconds((long) Timestamp).LocalDateTime;
public byte[] ExtraData { get; set; }
public Keccak MixHash { get; set; }
public ulong Nonce { get; set; } | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Diagnostics;
using System.IO;
using System.Text;
using System.Threading;
using Nethermind.Core.Crypto;
using Nethermind.Core.Encoding;
using Nethermind.Core.Extensions;
using Nethermind.Dirichlet.Numerics;
namespace Nethermind.Core
{
[DebuggerDisplay("{Hash} ({Number})")]
public class BlockHeader
{
internal BlockHeader()
{
}
public BlockHeader(Keccak parentHash, Keccak ommersHash, Address beneficiary, UInt256 difficulty, long number, long gasLimit, UInt256 timestamp, byte[] extraData)
{
ParentHash = parentHash;
OmmersHash = ommersHash;
Beneficiary = beneficiary;
Difficulty = difficulty;
Number = number;
GasLimit = gasLimit;
Timestamp = timestamp;
ExtraData = extraData;
}
public bool IsGenesis => Number == 0;
public Keccak ParentHash { get; internal set; }
public Keccak OmmersHash { get; set; }
public Address Author { get; set; }
public Address Beneficiary { get; set; }
public Address GasBeneficiary => Author ?? Beneficiary;
public Keccak StateRoot { get; set; }
public Keccak TxRoot { get; set; }
public Keccak ReceiptsRoot { get; set; }
public Bloom Bloom { get; set; }
public UInt256 Difficulty { get; set; }
public long Number { get; set; }
public long GasUsed { get; set; }
public long GasLimit { get; set; }
public UInt256 Timestamp { get; set; }
public DateTime TimestampDate => DateTimeOffset.FromUnixTimeSeconds((long) Timestamp).DateTime;
public byte[] ExtraData { get; set; }
public Keccak MixHash { get; set; }
public ulong Nonce { get; set; }
public Keccak Hash { get; set; }
public UInt256? TotalDifficulty { get; set; }
public byte[] AuRaSignature { get; set; }
public long? AuRaStep { get; set; }
public bool HasBody => OmmersHash != Keccak.OfAnEmptySequenceRlp || TxRoot != Keccak.EmptyTreeHash;
public SealEngineType SealEngineType { get; set; } = SealEngineType.Ethash;
private static HeaderDecoder _headerDecoder = new HeaderDecoder();
public static Keccak CalculateHash(BlockHeader header, RlpBehaviors behaviors = RlpBehaviors.None)
{
Rlp buffer = _headerDecoder.Encode(header, behaviors);
return Keccak.Compute(buffer.Bytes);
}
public static Keccak CalculateHash(Block block) => CalculateHash(block.Header);
public string ToString(string indent)
{
StringBuilder builder = new StringBuilder();
builder.AppendLine($"{indent}Hash: {Hash}");
builder.AppendLine($"{indent}Number: {Number}");
builder.AppendLine($"{indent}Parent: {ParentHash}");
builder.AppendLine($"{indent}Beneficiary: {Beneficiary}");
builder.AppendLine($"{indent}Gas Limit: {GasLimit}");
builder.AppendLine($"{indent}Gas Used: {GasUsed}");
builder.AppendLine($"{indent}Timestamp: {Timestamp}");
builder.AppendLine($"{indent}Extra Data: {(ExtraData ?? new byte[0]).ToHexString()}");
builder.AppendLine($"{indent}Difficulty: {Difficulty}");
builder.AppendLine($"{indent}Mix Hash: {MixHash}");
builder.AppendLine($"{indent}Nonce: {Nonce}");
builder.AppendLine($"{indent}Ommers Hash: {OmmersHash}");
builder.AppendLine($"{indent}Tx Root: {TxRoot}");
builder.AppendLine($"{indent}Receipts Root: {ReceiptsRoot}");
builder.AppendLine($"{indent}State Root: {StateRoot}");
return builder.ToString();
}
public override string ToString()
{
return ToString(string.Empty);
}
public string ToString(Format format)
{
switch (format)
{
case Format.Full:
return ToString(string.Empty);
case Format.FullHashAndNumber:
return Hash == null ? $"{Number} null" : $"{Number} ({Hash})";
default:
return Hash == null ? $"{Number} null" : $"{Number} ({Hash.ToShortString()})";
}
}
[Todo(Improve.Refactor, "Use IFormattable here")]
public enum Format
{
Full,
Short,
FullHashAndNumber
}
}
} | 1 | 22,950 | Why local and not UTC? | NethermindEth-nethermind | .cs |
@@ -172,7 +172,8 @@ ExSqlComp::ReturnStatus ExSqlComp::createServer()
//
if (ret == ERROR)
{
- error(arkcmpErrorServer);
+ if ((!diagArea_->contains(-2013)) && (!diagArea_->contains(-2012))) // avoid generating redundant error
+ error(arkcmpErrorServer);
if (getenv("DEBUG_SERVER"))
MessageBox(NULL, "ExSqlComp:createServer", "error ", MB_OK|MB_ICONINFORMATION);
} | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: ExSqlComp.C
* Description: This file contains the implementation of ExSqlComp class for
* executor to create arkcmp process and send requests
* for compilation and SQLCAT related tasks.
*
* Created: 06/21/96
* Language: C++
*
*
*
*
*****************************************************************************
*/
#include <ctype.h>
#include "Platform.h"
#include "cextdecs/cextdecs.h"
#include "ex_stdh.h" // TEMP, for ISP testing
#include "Platform.h"
#include "ExSqlComp.h"
#include "cli_stdh.h"
#include "CmpErrors.h"
#include "CmpMessage.h"
#include "ComDiags.h"
#include "ShowSchema.h" // GetControlDefaults class
#include "StmtCompilationMode.h"
#include "ComTdb.h"
#include "ExControlArea.h"
#include "ComTransInfo.h"
#include "ex_tcb.h"
#include "ex_stored_proc.h"
#include "ex_transaction.h"
#include "sql_id.h"
#include "ComRtUtils.h"
#include "Ipc.h"
#include "PortProcessCalls.cpp"
#include "seabed/fs.h"
#include "seabed/ms.h"
// -----------------------------------------------------------------------
// Diagnostics error listings for ExSqlComp
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// Internal helper routines for ExSqlComp
// -----------------------------------------------------------------------
inline
NABoolean ExSqlComp::error(Lng32 no)
{
*diagArea_ << DgSqlCode(no);
return TRUE;
}
void ExSqlComp::clearDiags()
{
if (diagArea_)
diagArea_->clear();
else
diagArea_ = ComDiagsArea::allocate(h_);
}
inline void ExSqlComp::initRequests(Requests& req)
{
req.message_ = 0;
req.resendCount_ = 0;
req.waited_ = TRUE;
req.ioStatus_ = INIT;
}
ExSqlComp::ReturnStatus ExSqlComp::changePriority(IpcPriority priority,
NABoolean isDelta)
{
ReturnStatus ret = SUCCESS;
if (! server_)
return ret;
short rc = server_->castToIpcGuardianServer()->changePriority(priority, isDelta);
if (rc != 0)
{
ret = ERROR;
}
return ret;
}
ExSqlComp::ReturnStatus ExSqlComp::createServer()
{
ReturnStatus ret = SUCCESS;
if (sc_) {
delete sc_;
sc_ = NULL;
};
if (!(sc_=new(h_) IpcServerClass(env_, IPC_SQLCOMP_SERVER, allocMethod_,
compilerVersion_,nodeName_)))
{
//
*diagArea_ << DgSqlCode(- CLI_OUT_OF_MEMORY)
<< DgString0("IpcServerClass");
ret = ERROR;
}
else
{
IpcPriority priority = IPC_PRIORITY_DONT_CARE;
if (cliGlobals_->currContext()->getSessionDefaults()->getMxcmpPriority() > 0)
priority = cliGlobals_->currContext()->getSessionDefaults()->
getMxcmpPriority();
else if (cliGlobals_->currContext()->getSessionDefaults()->
getMxcmpPriorityDelta() != 0)
priority =
// env_->getMyProcessPriority() +
cliGlobals_->myPriority() +
cliGlobals_->currContext()->getSessionDefaults()->
getMxcmpPriorityDelta();
if ((priority > 200) ||
(priority < 1))
priority = IPC_PRIORITY_DONT_CARE;
ComDiagsArea* diags = 0;
if ( !( server_ = sc_->allocateServerProcess(&diags, h_,nodeName_,
IPC_CPU_DONT_CARE,
priority,
1, TRUE, TRUE, 2, NULL, NULL, FALSE, NULL
) ) )
ret = ERROR;
if (diags)
{
diagArea_->mergeAfter(*diags);
diags->deAllocate();
ret = ERROR;
}
//Server process allocations may have changed the define context
cliGlobals_->currContext()->checkAndSetCurrentDefineContext();
}
//
if (ret == ERROR)
{
error(arkcmpErrorServer);
if (getenv("DEBUG_SERVER"))
MessageBox(NULL, "ExSqlComp:createServer", "error ", MB_OK|MB_ICONINFORMATION);
}
return ret;
}
ExSqlComp::ReturnStatus ExSqlComp::establishConnection()
{
ReturnStatus ret = SUCCESS;
if (sqlcompMessage_) {
delete sqlcompMessage_;
sqlcompMessage_ = NULL;
};
if ( !(sqlcompMessage_ = new(h_) CmpMessageStream (env_, this) ))
{
*diagArea_ << DgSqlCode(- CLI_OUT_OF_MEMORY)
<< DgString0("CmpMessageStreaam");
ret = ERROR;
}
else
{
sqlcompMessage_->addRecipient(server_->getControlConnection());
CmpMessageConnectionType connectionType(connectionType_, h_);
(*sqlcompMessage_) << connectionType;
sqlcompMessage_->setWaited(TRUE); // set to waited as default
Requests tempRequests = outstandingSendBuffers_;
outstandingSendBuffers_.message_ = NULL;
outstandingSendBuffers_.resendCount_ = 99; // do not resend.
sqlcompMessage_->send(TRUE); // always waited when establishing connection
outstandingSendBuffers_ = tempRequests;
if (badConnection_)
ret = ERROR;
}
//
if (ret == ERROR)
error(arkcmpErrorConnection);
return ret;
}
ExSqlComp::ReturnStatus ExSqlComp::startSqlcomp(void)
{
ReturnStatus ret = SUCCESS;
badConnection_ = FALSE;
breakReceived_ = FALSE;
// all the connection and control processing are done in waited mode.
if ( (ret=createServer()) == ERROR )
return ret;
if ( (ret=establishConnection()) == ERROR )
return ret;
if ( (ret=resendControls()) == ERROR )
return ret;
// on NT, the environment is not shipped to mxcmp when the process
// is created as an NSK lite process.
// Send it now.
if ( (ret=refreshEnvs()) == ERROR )
return ret;
return ret;
}
ExSqlComp::ReturnStatus ExSqlComp::resendRequest()
{
// Do not resend the request if it is an ISP request,
// If arkcmp exits in the middle of stored procedure execution for
// any reason, don't start arkcmp again since the execution state can't
// be retrieved.
IpcMessageType typ = outstandingSendBuffers_.message_ ?
outstandingSendBuffers_.message_->getType() :
CmpMessageObj::NULL_REQUEST;
ExTransaction *ta = cliGlobals_->currContext()->getTransaction();
// removing this check for now. If the user transaction is still active
// we do not need to report an error.
/* long sqlCode = (ta->xnInProgress() && !ta->implicitXn()) ?
arkcmpErrorUserTxnAndArkcmpGone : 0;*/
Lng32 sqlCode=0;
//
if (
#ifdef _DEBUG
getenv("ARKCMP_NORESEND_DEBUG") ||
#endif
sqlCode ||
outstandingSendBuffers_.resendCount_ >= 1 ||
currentISPRequest_ || // an ISP request
typ == CmpMessageObj::INTERNALSP_REQUEST ||
typ == CmpMessageObj::INTERNALSP_GETNEXT)
{
error(sqlCode ? sqlCode : arkcmpErrorResend);
badConnection_ = TRUE;
initRequests(outstandingSendBuffers_);
currentISPRequest_ = 0;
outstandingSendBuffers_.ioStatus_ = FINISHED;
return ERROR;
}
// if this transaction was
// implicitly started by the executor clean it up now. otherwise
// this executor will get stuck in a repeated 8841 cycle.
// If autocommit is on it is safe to continue restarting a new Xn and
// continuing.
//If autocommit is off, cleanup but return an error.
if (ta->xnInProgress() && ta->implicitXn() && ta->userEndedExeXn())
{
if (!ta->autoCommit())
{
error(-CLI_USER_ENDED_EXE_XN);
badConnection_ = TRUE;
initRequests(outstandingSendBuffers_);
outstandingSendBuffers_.ioStatus_ = FINISHED;
ta->cleanupTransaction();
return ERROR;
}
else
ta->cleanupTransaction();
}
// Start the process again.
badConnection_ = FALSE;
breakReceived_ = FALSE;
// save request.
// set outstandingSendBuffers_.message_ to 0 so that its memory won't be
// deallocated in estableConnection().
Requests tempRequests = outstandingSendBuffers_;
outstandingSendBuffers_.message_ = 0;
ReturnStatus ret = SUCCESS;
outstandingSendBuffers_.resendCount_++;
ret = createServer();
if (ret == ERROR) return ret;
Lng32 r = outstandingSendBuffers_.resendCount_; // save
ret = establishConnection();
if (ret == ERROR) return ret;
// Setup the arkcmp context again.
ret = resendControls();
if (ret == ERROR) return ret;
ret = refreshEnvs();
if (ret == ERROR) return ret;
// Send the message again. Use the same waited mode in the request.
outstandingSendBuffers_.resendCount_ = r; // restore
// restore request.
outstandingSendBuffers_ = tempRequests;
if (outstandingSendBuffers_.message_)
ret = sendR(outstandingSendBuffers_.message_,outstandingSendBuffers_.waited_);
return ret;
}
inline
ExSqlComp::ReturnStatus ExSqlComp::sendR(CmpMessageObj* c, NABoolean w)
{
ReturnStatus ret;
// Get the index into the cli compiler array so that we use the correct
// arkcmpInitFailed entry and not the default.
short indexIntoCliCompilerArray = 0;
indexIntoCliCompilerArray = cliGlobals_->currContext()->getIndexToCompilerArray();
//
if (badConnection_)
{
deleteServerStruct();
badConnection_ = FALSE;
breakReceived_ = FALSE;
}
if (!sqlcompMessage_)
{
ret = startSqlcomp();
if (ret == ERROR)
return ret;
}
//
if(this->isShared() && (cliGlobals_->currContext() != lastContext_))
{
ret = resendControls(TRUE /*context switch? */);
if( ret == ERROR)
{
*diagArea_ << DgSqlCode(-CLI_SEND_ARKCMP_CONTROL);
return ret;
}
}
sqlcompMessage_->clearAllObjects();
(*sqlcompMessage_) << *c;
sqlcompMessage_->setWaited(w); // stored the waited mode
// send the message.
Int64 transid = cliGlobals_->currContext()->getTransaction()->getExeXnId();
recentIpcTimestamp_ = NA_JulianTimestamp();
sqlcompMessage_->send(w, transid);
if (badConnection_)
return ERROR;
// Any arkcmp initialization failure, we constantly harp on it so user will
// be cautious. If arkcmp then crashes and is restarted, render everything
// unusable (the Executor is still ok, though -- previously PREPAREd stmts
// can still be run).
ContextCli::ArkcmpFailMode fm =
cliGlobals_->currContext()->arkcmpInitFailed(indexIntoCliCompilerArray);
if (fm == ContextCli::arkcmpWARN_)
{
error(+ CLI_ARKCMP_INIT_FAILED);
return WARNING;
}
else if (fm == ContextCli::arkcmpERROR_)
{
error(- CLI_ARKCMP_INIT_FAILED);
return ERROR;
}
return SUCCESS;
}
// Wait for request to complete. Called by Statement::releaseTransaction.
void ExSqlComp::completeRequests()
{
sqlcompMessage_->waitOnMsgStream(IpcInfiniteTimeout);
recentIpcTimestamp_ = NA_JulianTimestamp();
}
// The return status of ERROR here is only useful for WAITED requests --
// which are the default, e.g. used by refreshEnvs(), resendControls(),
// and the CLI.
inline
ExSqlComp::ReturnStatus ExSqlComp::waitForReply()
{
sqlcompMessage_->waitOnMsgStream(IpcImmediately);
recentIpcTimestamp_ = NA_JulianTimestamp();
return (outstandingSendBuffers_.ioStatus_ == FINISHED) ? SUCCESS : ERROR;
}
ExSqlComp::OperationStatus ExSqlComp::status(Int64 reqId)
{
OperationStatus s = FINISHED;
waitForReply();
s = outstandingSendBuffers_.ioStatus_;
return s;
}
// --------------------------------------------------------------------------
// Parse the info fetched from Describe::bindNode(). Genesis 10-981211-5986.
//
static NABoolean pairLenTxt(Int32 &len, const char *&txt, const char *&cqd)
{
len = 0;
for (Int32 i=3; i--; cqd++)
{
// if (isdigit(*cqd)) len = (10 * len) + *cqd - '0';
// else if (isspace(*cqd)) len = (10 * len);
if (isDigit8859_1((unsigned char)*cqd)) len = (10 * len) + *cqd - '0';
else if (isSpace8859_1((unsigned char)*cqd)) len = (10 * len);
else return TRUE; // error
}
txt = cqd;
cqd += len;
return FALSE;
}
void ExSqlComp::appendControls(ExControlArea *dest, ExControlArea *src){
Queue *srcList = src->getControlList();
ExControlEntry *ctl = NULL;
for (srcList->position(); ctl = (ExControlEntry *)srcList->getNext(); )
{
dest->addControl(ctl->type(), ctl->getReset(),
ctl->getSqlText(), ctl->getSqlTextLen(),
ctl->getValue(1), ctl->getLen(1),
ctl->getValue(2), ctl->getLen(2),
ctl->getValue(3), ctl->getLen(3),
ctl->getActionType(),
ctl->getResendType(),
ctl->isNonResettable());
}
}
//
static ExSqlComp::ReturnStatus saveControls(ExControlArea *ca, const char *cqd)
{
#ifdef _DEBUG
if (getenv("SQLCOMP_DEBUG")) {
size_t len = str_len(cqd);
cerr << len << "\t";
if (len < 800)
cerr << cqd << endl;
else {
char *cqd_ = (char *)cqd;
char a = cqd_[800];
cqd_[800] = '\0';
cerr << cqd << endl;
cqd_[800] = a;
}
cerr << endl;
}
#endif
ExSqlComp::ReturnStatus ret = ExSqlComp::SUCCESS;
char sqlText[2200];
Int32 lenP = str_len(ca->getText(DEFAULT_)); // len of prefix:
str_cpy_all(sqlText, ca->getText(DEFAULT_), lenP); // "CQD"
sqlText[lenP++] = ' '; // "CQD "
while (*cqd)
{
Int32 lenN, lenV, lenX = lenP;
const char *nam, *val;
if (pairLenTxt(lenN, nam, cqd) ||
pairLenTxt(lenV, val, cqd) ||
lenN == 0) // name can't be blank; value can be.
{ ret = ExSqlComp::ERROR; break; }
str_cpy_all(&sqlText[lenX], nam, lenN);
lenX += lenN;
sqlText[lenX++] = ' '; // "CQD nam "
sqlText[lenX++] = '\'';
str_cpy_all(&sqlText[lenX], val, lenV);
lenX += lenV;
sqlText[lenX++] = '\'';
sqlText[lenX++] = ';'; // "CQD nam 'val';"
//ca->addControllAreaOnly(DEFAULT_, -1, sqlText, lenX, nam, lenN, val, lenV, 0, 0, ComTdbControl::NONE_,
ca->addControl(DEFAULT_, -1, sqlText, lenX, nam, lenN, val, lenV, 0, 0, ComTdbControl::NONE_,
ExControlEntry::UPON_CMP_CRASH);
}
static const char cqdResetReset[] = "* RESET RESET;";
str_cpy_all(&sqlText[lenP], cqdResetReset, sizeof(cqdResetReset));
lenP += sizeof(cqdResetReset);
ca->addControl(DEFAULT_, 2, sqlText, lenP,
0, 0, 0, 0, 0, 0, ComTdbControl::NONE_,
ExControlEntry::UPON_CMP_CRASH); // "CQD * RESET RESET;"
return ret;
}
ExSqlComp::ReturnStatus ExSqlComp::resetAllDefaults(){
const char * buf[] = {"control query default * reset", "control query shape off",
"control table * reset", "control session * reset"};
for(Int32 i=0; i<sizeof(buf)/sizeof(char *); i++){
Lng32 len = str_len(buf[i])+1;
CmpCompileInfo c((char *)buf[i], len,
(Lng32)SQLCHARSETCODE_UTF8
, NULL, 0, 0, 0);
size_t dataLen = c.getLength();
char * data = new(h_) char[dataLen];
c.pack(data);
ReturnStatus ret = sendRequest(EXSQLCOMP::SQLTEXT_STATIC_COMPILE,
data,dataLen);
h_->deallocateMemory(data);
if(ret == ERROR) return ret;
}
return SUCCESS;
}
ExSqlComp::ReturnStatus ExSqlComp::resendControls(NABoolean ctxSw) // Genesis 10-981211-5986
{
// If we are already resending the controls, then we must return.
// Otherwise, we will resend the controls and set the resendControls_ state
// to TRUE. When we are done, we will reset the resendingControls_ state to
// FALSE. This is all because resendControls() call sendRequest()
// that call resendControls() again.
// * sigh *, if we could design the call with two-layered pattern.
// The lower layer should avoid calling the upper layer.
if (resendingControls_)
return SUCCESS;
resendingControls_ = TRUE;
// ##DLL-linkage problem ...
// ## Perhaps we need to copy the global IdentifyMyself to exe-glob-ctxt ...
// cerr << "## resendControls: I am " << IdentifyMyself::getMyName() << endl;
ReturnStatus ret = SUCCESS;
NABoolean e = doRefreshEnvironment_; // save
CmpMessageObj* t = outstandingSendBuffers_.message_; // save
outstandingSendBuffers_.message_= 0;
// Get the index into the cli compiler array so that we use the correct
// arkcmpInitFailed entry and not the default.
ContextCli *ctxt = cliGlobals_->currContext();
short indexIntoCliCompilerArray = ctxt->getIndexToCompilerArray();
//
if (ctxt->arkcmpInitFailed(indexIntoCliCompilerArray))
{
ctxt->arkcmpInitFailed(indexIntoCliCompilerArray) =
ContextCli::arkcmpERROR_;
error(- CLI_ARKCMP_INIT_FAILED);
ret = ERROR;
}
if (ret != ERROR)
{
// The message contains the following:
// (auth state and user ID are delimited by commas)
// authorization state (0 - off, 1 - on)
// integer user ID
// database user name
// See CmpStatement::process (CmpMessageDatabaseUser) for more details
Int32 *userID = ctxt->getDatabaseUserID();
Int32 userAsInt = *userID;
CmpContext *cmpCntxt = CmpCommon::context();
NABoolean authOn = cmpCntxt ? cmpCntxt->isAuthorizationEnabled() : FALSE;
char userMessage [MAX_AUTHID_AS_STRING_LEN + 1 + MAX_USERNAME_LEN + 1 + 2];
str_sprintf(userMessage, "%d,%d,%s", authOn, userAsInt, ctxt->getDatabaseUserName());
#ifdef _DEBUG
NABoolean doDebug = (getenv("DBUSER_DEBUG") ? TRUE : FALSE);
if (doDebug)
{
printf("[DBUSER:%d] Sending CMP user credentials through ExSqlComp::resendControls, %s\n",
(int) getpid(), userMessage);
fflush(stdout);
}
#endif
ret = sendRequest(EXSQLCOMP::DATABASE_USER,
(const char *) &userMessage,
(ULng32) sizeof(userMessage));
}
ComDiagsArea *loopDiags = NULL;
ExControlArea *ca = ctxt->getControlArea();
Queue *q = ca->getControlList();
if (ret != ERROR)
{
if (q->isEmpty())
{
ret=resetAllDefaults();
//
if (ret == ERROR)
{
resendingControls_ = FALSE;
lastContext_ = 0;
return ERROR;
}
ExControlArea *sharedCtrl = cliGlobals_->getSharedControl();
Queue *sharedCtrlList = sharedCtrl->getControlList();
if (sharedCtrlList->isEmpty())
{
// First time any arkcmp was started. Our caller is sendR().
// We ask arkcmp for all its initial (read-from-Defaults-table)
// defaults in the "externalized" subset, adding these to the
// Executor ControlArea. Thus, if any values in the persistent
// Defaults table get changed (by any user) between now and this
// arkcmp's crashing, when we restart arkcmp we will feed it
// these CQDs to reestablish the exact same context (i.e. we'll
// override the later Defaults-table values).
doRefreshEnvironment_ = FALSE;
char * buf = (char *)GetControlDefaults::GetExternalizedDefaultsStmt();
Lng32 len =
str_len(GetControlDefaults::GetExternalizedDefaultsStmt())+1;
CmpCompileInfo c(buf, len,
(Lng32)SQLCHARSETCODE_UTF8
, NULL, 0, 0, 0);
size_t dataLen = c.getLength();
char * data = new(h_) char[dataLen];
c.pack(data);
ReturnStatus ret = sendRequest(EXSQLCOMP::SQLTEXT_STATIC_COMPILE,
data,dataLen);
h_->deallocateMemory(data);
Int32 mainSqlcode = (Int32) diagArea_->mainSQLCODE();
if (ret != ERROR &&
ABS(mainSqlcode) == ABS(EXE_INFO_CQD_NAME_VALUE_PAIRS))
{
// This dang diagArea error list is [1]-based, not [0]-based...
const char *cqd = (*diagArea_)[1].getOptionalString(0);
if (cqd && str_len(cqd))
{
ret = saveControls(sharedCtrl, cqd);
}
if (ret != ERROR)
diagArea_->clear();
}
if ((ret == ERROR) && (!breakReceived_))
{
ctxt->arkcmpInitFailed(indexIntoCliCompilerArray) =
ContextCli::arkcmpWARN_;
error(+ CLI_ARKCMP_INIT_FAILED);
}
breakReceived_ = FALSE;
}
appendControls(ca, sharedCtrl);
} // if (q->isEmpty())
else
{
ret=resetAllDefaults();
if (ret == ERROR)
{
resendingControls_ = FALSE;
lastContext_ = 0;
return ERROR;
}
// Start a new arkcmp after earlier one crashed;
// need to reestablish the new arkcmp's control state.
// Our caller is resendRequest().
ExControlEntry *ctl;
for (q->position(); ctl = (ExControlEntry *)q->getNext(); )
{
if (ctxSw && (ctl->getResendType() == ExControlEntry::UPON_CMP_CRASH))
{
continue; // bypass controls needed for the crahsed arkcmp.
}
#ifdef SHARE_ARKCMP_DEBUG
cerr << "resend " << ctl->getSqlTextLen() << "\t"
<< ctl->getSqlText() << endl;
#endif
#ifdef _DEBUG
if (getenv("SQLCOMP_DEBUG"))
cerr << ctl->getSqlTextLen() << "\t" << ctl->getSqlText() << endl;
#endif
// Each time thru loop. See [##] in sendRequest().
doRefreshEnvironment_ = FALSE;
// ExControlTcb::work() is where this gets done for the earlier arkcmp
char * buf = ctl->getSqlText();
// Ignore OSIM-related CQDs
if (strstr(buf, "CQD") || strstr(buf, "CONTROL QUERY DEFAULT"))
if (strstr(buf, "OSIM"))
continue;
Lng32 len = ctl->getSqlTextLen()+1;
CmpCompileInfo c(buf, len,
(Lng32)SQLCHARSETCODE_UTF8
, NULL, 0, 0, 0);
size_t dataLen = c.getLength();
char * data = new(h_) char[dataLen];
c.pack(data);
if (strstr(buf, "CQD") || strstr(buf, "CONTROL QUERY DEFAULT"))
{
if (strstr(buf, "REPLICATE_ALLOW_ROLES"))
{
// allow setOnce cqds.
cliGlobals_->currContext()->setSqlParserFlags(0x400000);
}
}
ReturnStatus ret = sendRequest(EXSQLCOMP::SQLTEXT_STATIC_COMPILE,
data,dataLen);
if (strstr(buf, "CQD") || strstr(buf, "CONTROL QUERY DEFAULT"))
{
if (strstr(buf, "REPLICATE_ALLOW_ROLES"))
{
// allow setOnce cqds.
cliGlobals_->currContext()->resetSqlParserFlags(0x400000);
}
}
h_->deallocateMemory(data);
if (ret != ERROR)
if (waitForReply() == ERROR)
ret = ERROR;
if (ret != ERROR)
{
//
if (
((*diagArea_).contains(-2050) ||
(*diagArea_).contains(-2055)) &&
(compilerVersion_ < COM_VERS_COMPILER_VERSION)
)
{
diagArea_->clear();
}
else
{
if (diagArea_->getNumber() > 0)
{
if (loopDiags == NULL)
loopDiags = ComDiagsArea::allocate(h_);
loopDiags->mergeAfter(*diagArea_);
diagArea_->clear();
}
}
ret = SUCCESS;
}
else
break;
} // for each control
// ## CmpDescribe sendAllControls() should probably send this too
// ## to avoid Executor sending prepared stmts back to be rebound...
if (ret != ERROR)
{
ExTransaction *ta = ctxt->getTransaction();
ReturnStatus ret = sendRequest(EXSQLCOMP::SET_TRANS,
(char *)ta->getUserTransMode(),
sizeof(TransMode));
if (ret != ERROR)
if (waitForReply() == ERROR)
ret = ERROR;
}
if ((ret == ERROR) && (!breakReceived_))
{
ctxt->arkcmpInitFailed(indexIntoCliCompilerArray) =
ContextCli::arkcmpERROR_;
error(- CLI_ARKCMP_INIT_FAILED);
}
breakReceived_ = FALSE;
} // control list is NOT empty
} // if (ret != ERROR)
//
if (ret != SUCCESS || diagArea_->getNumber() || loopDiags != NULL )
{
if (loopDiags != NULL)
{
diagArea_->mergeAfter(*loopDiags);
loopDiags->decrRefCount();
}
if (ret != ERROR)
ret = diagArea_->getNumber(DgSqlCode::ERROR_) ? ERROR : WARNING;
if (ret == ERROR)
*diagArea_ << DgSqlCode(- CLI_SEND_REQUEST_ERROR)
<< DgString0("resendControls");
}
doRefreshEnvironment_ = e; // restore
outstandingSendBuffers_.message_ = t; // restore
if (ret == SUCCESS)
{
lastContext_ = cliGlobals_->currContext();
}
else
{
lastContext_ = 0;
}
resendingControls_ = FALSE;
return ret;
}
//inline
ExSqlComp::ReturnStatus ExSqlComp::refreshEnvs()
{
ReturnStatus ret = SUCCESS;
CmpMessageObj* t = outstandingSendBuffers_.message_; // save
outstandingSendBuffers_.message_= 0;
CmpMessageEnvs envs(CmpMessageEnvs::EXGLOBALS,
cliGlobals_->getEnvVars(),
FALSE,
getHeap());
ret = sendR(&envs);
if (ret != ERROR)
if (waitForReply() == ERROR)
ret = ERROR;
if (ret == ERROR)
*diagArea_ << DgSqlCode(- CLI_SEND_REQUEST_ERROR)
<< DgString0("SETENV");
outstandingSendBuffers_.message_ = t; // restore
if (ret == ERROR) return ret;
doRefreshEnvironment_ = FALSE;
return ret;
}
// -----------------------------------------------------------------------
// Constructors of ExSqlComp
// -----------------------------------------------------------------------
ExSqlComp::ExSqlComp(void* ex_environment,
CollHeap *h,
CliGlobals *cliGlobals,
ExStoredProcTcb *storedProcTcb,
short version,
char *nodeName ,
IpcEnvironment *env):
isShared_(FALSE), lastContext_(NULL), resendingControls_(FALSE)
{
h_ = h;
allocMethod_ = IPC_ALLOC_DONT_CARE;
cliGlobals_ = cliGlobals;
env_ = env;
sc_ = 0;
sqlcompMessage_ = 0;
storedProcTcb_ = storedProcTcb;
compilerVersion_ = version;
exEnvironment_ = ex_environment;
doRefreshEnvironment_ = TRUE;
initRequests(outstandingSendBuffers_);
badConnection_ = FALSE;
breakReceived_ = FALSE;
currentISPRequest_ = 0;
connectionType_ = CmpMessageConnectionType::DMLDDL;
nodeName_ = new (h_) char[9];
if (nodeName)
{
strcpy(nodeName_,nodeName);
}
else
{
nodeName_[0] = '\\';
if (ComRtGetOSClusterName(&nodeName_[1], 8, NULL) <= 0)
nodeName_ = nodeName;
}
server_ = 0;
diagArea_ = ComDiagsArea::allocate(h_);
recentIpcTimestamp_ = -1;
}
ExSqlComp::~ExSqlComp()
{
if (server_)
{
// release an existing server. This will close the connection
// to mxcmp. Do this for NSK only as for some reason, which am
// not going to debug right now, this doesn't work on NT platform.
// send exit message to MXCMP if NT platform.
if ( sqlcompMessage_ )
{
CmpMessageExit exitmsg;
sqlcompMessage_->clearAllObjects();
(*sqlcompMessage_) << exitmsg;
sqlcompMessage_->setWaited(TRUE);
sqlcompMessage_->send(TRUE);
}
}
delete sqlcompMessage_;
NADELETE(sc_, IpcServerClass, h_);
NADELETEBASIC(nodeName_, h_);
nodeName_ = NULL;
if (diagArea_)
diagArea_->deAllocate();
}
//
void ExSqlComp::endConnection()
{
if ( sqlcompMessage_ )
{
CmpMessageExit exitmsg;
sqlcompMessage_->clearAllObjects();
(*sqlcompMessage_) << exitmsg;
sqlcompMessage_->setWaited(TRUE);
sqlcompMessage_->send(TRUE);
}
delete sqlcompMessage_;
sqlcompMessage_ = NULL;
doRefreshEnvironment_ = TRUE;
}
inline NABoolean ExSqlComp::getEnvironment(char*&, ULng32&)
{
// TODO : use environment in executor globals.
return TRUE;
}
ExSqlComp::ReturnStatus ExSqlComp::preSendRequest(NABoolean doRefreshEnvs)
{
ReturnStatus ret = SUCCESS;
// currently, only one outstanding I/O is supported.
assert( !outstandingSendBuffers_.message_);
// make sure all the replys from currentISPRequest_ have been fetched already.
assert( currentISPRequest_ == 0 );
initRequests(outstandingSendBuffers_);
clearDiags();
retval_ = SUCCESS;
// if ( doRefreshEnvs && doRefreshEnvironment_ )
// {
// doRefreshEnvironment_ = FALSE;
// ret = refreshEnvs();
// }
if (doRefreshEnvs)
ret = refreshEnvs(); // this starts the server.
else
{
// Start mxcmp if not started.
// This is needed for nowaited prepare.
if (!sqlcompMessage_)
ret = startSqlcomp();
}
return ret;
}
ExSqlComp::ReturnStatus
ExSqlComp::sendRequest(CmpMessageObj* request, NABoolean waited)
{
ReturnStatus ret = SUCCESS;
outstandingSendBuffers_.ioStatus_ = ExSqlComp::PENDING;
outstandingSendBuffers_.message_ = request;
outstandingSendBuffers_.waited_ = waited;
outstandingSendBuffers_.requestId_ = request->id();
ret = sendR(request, waited);
if (ret != ERROR)
if (waitForReply() == ERROR)
if (waited)
ret = ERROR;
return ret;
}
// The server fielding these requests is ExCmpMessage::actOnReceive
// in arkcmp/CmpConnection.cpp.
//
ExSqlComp::ReturnStatus ExSqlComp::sendRequest (Operator op,
const char* const_input_data,
ULng32 size,
NABoolean waited,
Int64* id,
Lng32 charset,
NABoolean resendFlg,
const char *parentQid,
Lng32 parentQidLen)
{
ReturnStatus ret;
char *input_data = (char *)const_input_data;
// if this request is going to a downrev compiler(pre-2300),
// change charset to ISO88591.
// Downrev compilers prior to V2300 do not understand ISO_MAPPING.
//
// When we move to v2400 and beyond, this code need to correctly
// figure out which charsets are not understood by the downrev
// compiler where this msg is being sent to.
//
if (getVersion() < COM_VERS_2300)
{
charset = SQLCHARSETCODE_ISO88591;
}
if ( ( ret = preSendRequest(FALSE) ) == ERROR )
{
if ( resendFlg && badConnection_ )
{
// retry once.
ret = preSendRequest(FALSE);
}
if (ret == ERROR)
return ret;
}
// send out the request, or process the request in one process mode
CmpMessageRequest* request = NULL;
if (charset == SQLCHARSETCODE_UNKNOWN)
{
charset = CharInfo::UTF8;
}
switch (op)
{
case EXSQLCOMP::ENVS_REFRESH :
// TODO : this part needs to be reconstructed
ret = refreshEnvs();
break;
case EXSQLCOMP::SQLTEXT_RECOMPILE:
case EXSQLCOMP::SQLTEXT_COMPILE : // we might be static or dynamic mode
request=new(h_)
CmpMessageSQLText(input_data,(CmpMsgBufLenType)size,h_,charset,op);
break;
case EXSQLCOMP::SQLTEXT_STATIC_RECOMPILE :
case EXSQLCOMP::SQLTEXT_STATIC_COMPILE : // force to static compilation mode
request=new(h_)
CmpMessageCompileStmt(input_data,(CmpMsgBufLenType)size,op,h_, charset);
break;
case EXSQLCOMP::UPDATE_HIST_STAT:
request = new(h_)
CmpMessageUpdateHist(input_data,(CmpMsgBufLenType)size,h_,charset);
break;
case EXSQLCOMP::PROCESSDDL :
request = new(h_)
CmpMessageDDL(input_data,(CmpMsgBufLenType)size,h_,charset, parentQid, parentQidLen);
break;
case EXSQLCOMP::DESCRIBE :
request = new(h_)
CmpMessageDescribe(input_data,(CmpMsgBufLenType)size,h_,charset);
break;
case EXSQLCOMP::SET_TRANS :
request = new(h_)
CmpMessageSetTrans(input_data,(CmpMsgBufLenType)size,h_);
break;
case EXSQLCOMP::DDL_NATABLE_INVALIDATE :
request = new(h_)
CmpMessageDDLNATableInvalidate(input_data,(CmpMsgBufLenType)size,h_);
break;
case EXSQLCOMP::DATABASE_USER :
request = new(h_)
CmpMessageDatabaseUser(input_data,(CmpMsgBufLenType)size,h_);
break;
case EXSQLCOMP::END_SESSION :
request = new(h_)CmpMessageEndSession(input_data,
(CmpMsgBufLenType)size, h_);
break;
case EXSQLCOMP::DDL_WITH_STATUS :
request = new(h_)CmpMessageDDLwithStatus(input_data,
(CmpMsgBufLenType)size, h_);
break;
default :
assert(FALSE);
break;
}
if (request)
{
request->setFlags(cliGlobals_->currContext()->getSqlParserFlags());
// If we are talking to a downrev compiler take care of the following.
//
if ( compilerVersion_ < COM_VERS_COMPILER_VERSION)
{
// if the structure of any of the above message op types have changed from
// one release to another, redefine the foll virtual method :
// migrateToNewVersionto do the
// translation to the down rev structure.
// By default, it just sets the version.
request->migrateToVersion(compilerVersion_);
}
// send the request.
ret = sendRequest(request, waited);
if ((ret == ERROR) && badConnection_)
{
if (resendFlg)
{
outstandingSendBuffers_.ioStatus_ = ExSqlComp::PENDING;
outstandingSendBuffers_.message_ = request;
outstandingSendBuffers_.waited_ = waited;
outstandingSendBuffers_.requestId_ = request->id();
badConnection_ = FALSE;
clearDiags();
ret = resendRequest();
if (ret != ERROR)
{
if (id)
*id = request->id();
}
else
{
//
// The second retry failed. Reset outstandingSendBuffers.
outstandingSendBuffers_.ioStatus_ = ExSqlComp::FINISHED;
outstandingSendBuffers_.resendCount_ = 0;
outstandingSendBuffers_.message_ = 0;
badConnection_ = FALSE;
}
} // if (resendFlg)
else
{
// The first send attempt failed and the ExSqlComp caller
// does not want us to retry. Reset outstandingSendBuffers_.
outstandingSendBuffers_.ioStatus_ = ExSqlComp::FINISHED;
outstandingSendBuffers_.resendCount_ = 0;
outstandingSendBuffers_.message_ = 0;
badConnection_ = FALSE;
} // if (resendFlg) else ...
} // if ((ret == ERROR) && badConnection_)
} // if (request)
// For now [##], always reset this for next time in.
// We should really do refreshEnvs only when envs actually changed
// (in sqlci, only when a SET/RESET DEFINE occurred).
doRefreshEnvironment_ = TRUE;
return ret;
}
// stored proc request.
ExSqlComp::ReturnStatus ExSqlComp::sendRequest(
const char* procName,
void* inputExpr, ULng32 inputExprSize, // input Expr
void* outputExpr, ULng32 outputExprSize, // output Expr
void* keyExpr, ULng32 keyExprSize, // key expr
void* inputData, ULng32 inputDataSize, // input data
ULng32 outputRecSize, ULng32 outputTotalSize, // output data
NABoolean waited, Int64* id,
const char *parentQid,
Lng32 parentQidLen)
{
ReturnStatus ret = ERROR;
connectionType_ = CmpMessageConnectionType::ISP;
if ( ( ret = preSendRequest(TRUE) ) == ERROR ) return ret;
CmpMessageObj* request = new(h_) CmpMessageISPRequest
((char *)procName, //## for now, cast away constness...
inputExpr, inputExprSize, outputExpr, outputExprSize,
keyExpr, keyExprSize, inputData, inputDataSize,
outputRecSize, outputTotalSize, h_, parentQid, parentQidLen);
if (request)
{
request->setFlags(cliGlobals_->currContext()->getSqlParserFlags());
// Save the current request ID because request will be deleted
// in sendRequest()
Int64 savedISPRequest = request->id();
ret = sendRequest(request, waited);
if (ret != ERROR)
{
currentISPRequest_ = savedISPRequest;
if (id) *id = currentISPRequest_;
}
}
// For now [##], always reset this for next time in.
// We should really do refreshEnvs only when envs actually changed
// (in sqlci, only when a SET/RESET DEFINE occurred).
doRefreshEnvironment_ = TRUE;
return ret;
}
ExSqlComp::ReturnStatus ExSqlComp::getNext(ULng32 bufSize,
Int64 id,
NABoolean waited,
const char *parentQid,
Lng32 parentQidLen)
{
ReturnStatus ret = SUCCESS;
Int64 ispRequest = ( id ) ? id : currentISPRequest_;
if ( !ispRequest )
return ERROR;
// should not call presendRequest here, because the environment should be the
// same for one ISP execution request sent earlier. So ExSqlComp should not
// send the refresh environment again to arkcmp.
clearDiags();
CmpMessageObj* request = new(h_) CmpMessageISPGetNext
(bufSize, ispRequest, 0, h_, parentQid, parentQidLen);
if ( !request )
return ERROR;
ret = sendRequest(request, waited);
return ret;
}
ExSqlComp::ReturnStatus ExSqlComp::getReply
(char*& reply,ULng32& size, ULng32 maxSize, Int64 reqId,
NABoolean getDataWithErrReply)
{
ReturnStatus ret = SUCCESS;
assert(outstandingSendBuffers_.ioStatus_ == FINISHED);
outstandingSendBuffers_.ioStatus_ = FETCHED;
Int64 request = ( reqId ) ? reqId : outstandingSendBuffers_.requestId_;
if (diagArea_->getNumber(DgSqlCode::ERROR_))
{
if (NOT getDataWithErrReply)
{
reply=NULL;
retval_ = ERROR;
return retval_;
}
ret = retval_ = ERROR;
}
if (diagArea_->getNumber(DgSqlCode::WARNING_))
{
ret = retval_ = WARNING;
}
switch (sqlcompMessage_->getNextObjType())
{
case CmpMessageObj::REPLY_CODE :
{
CmpMessageReplyCode r(h_,0, reply, maxSize, h_);
(*sqlcompMessage_) >> r;
// in the future when there might be more than one outstanding I/O, the
// reply should be put into the receive buffer list, so it can be retrieved
// later.
assert ( r.request() == request );
reply = r.takeData();
size = r.getSize();
ret = retval_;
break;
}
case CmpMessageObj::REPLY_ISP :
{
CmpMessageReplyISP r(h_, 0, reply, maxSize, h_);
(*sqlcompMessage_) >> r;
assert ( r.request() == request || r.request() == currentISPRequest_ );
reply = r.takeData();
size = r.getSize();
// There is no WARNING status returned in this case, since the return status
// indicates whether there is more data to come or not. In the case of warning
// the remaining data should still be fetched. The warning information is kept
// in diagArea_ though.
retval_ = ret = r.areMore() ? MOREDATA : SUCCESS;
if ( retval_ == SUCCESS )
currentISPRequest_ = 0;
break;
}
default :
break;
}
return ret;
}
ComDiagsArea* ExSqlComp::getDiags(Int64 )
{
return diagArea_;
}
//
ComDiagsArea* ExSqlComp::takeDiags(Int64)
{
ComDiagsArea* d = diagArea_;
diagArea_=0;
return d;
}
void ExSqlComp::deleteServerStruct()
{
delete sqlcompMessage_;
delete sc_;
sqlcompMessage_ = NULL;
sc_ = NULL;
}
// -----------------------------------------------------------------------
// Methods for CmpMessageStream
// -----------------------------------------------------------------------
CmpMessageStream::CmpMessageStream(IpcEnvironment* env, ExSqlComp* sqlcomp) :
IpcMessageStream (env, CmpMessageObj::EXE_CMP_MESSAGE,
EXECMPIPCVERSION, 0, TRUE)
{
sqlcomp_ = sqlcomp;
waited_ = TRUE;
}
void CmpMessageStream::actOnSend(IpcConnection*)
{
if (sqlcomp_->storedProcTcb_ )
{
// at this point, get a pointer to the ex_stored_proc_tcb, if applicable,
// and call that TCB's tickleScheduler() method.
sqlcomp_->storedProcTcb_->tickleScheduler();
}
if (getState() == ERROR_STATE)
{
// Not to resend request here due to a memory corruption bug where
// sqlcomp_->sqlcompMessage_ was deleted and referenced later.
// Setting badConnection_ to true will trigger high layer to resend.
// sqlcomp_->resendRequest();
sqlcomp_->badConnection_ = TRUE;
return;
}
if (getState() == BREAK_RECEIVED)
{
// received a break signal while waiting on MXCMP. Kill MXCMP,
// and return a warning to MXCI, to indicate that the break key was
// received.
sqlcomp_->outstandingSendBuffers_.message_ = 0;
sqlcomp_->outstandingSendBuffers_.ioStatus_ = ExSqlComp::FINISHED;
sqlcomp_->badConnection_ = TRUE;
sqlcomp_->breakReceived_ = TRUE;
NAProcessHandle phandle((SB_Phandle_Type *)
&(sqlcomp_->server_->getServerId().getPhandle().phandle_));
Int32 guaRetcode = phandle.decompose();
if (XZFIL_ERR_OK == guaRetcode)
{
msg_mon_stop_process_name(phandle.getPhandleString());
}
delete sqlcomp_->sqlcompMessage_;
sqlcomp_->getDiags() ->setRollbackTransaction(-1);
sqlcomp_->sqlcompMessage_ = NULL;
sqlcomp_->doRefreshEnvironment_ = TRUE;
}
}
void CmpMessageStream::actOnSendAllComplete()
{
clearAllObjects();
receive(waited_);
}
void CmpMessageStream::actOnReceive(IpcConnection*)
{
if ((getState() == ERROR_STATE) || (getState() == BREAK_RECEIVED))
{
// If the state is ERROR, this could be due to many different
// scenarios, described below.
// If the state is BREAK_RECEIVED, this means that we
// received a break signal while waiting on MXCMP. Kill MXCMP,
// and return to MXCI, or to executor stored proc,
// to indicate that the break key was received.
// Not to resend request due to a memory corruption bug where
// sqlcomp_->sqlcompMessage_ was deleted and referenced later.
// Set badConnection_ to true so that the request will be resent
// at the high level (in sendRequest). It might be arkcmp
// crashed in the previous request. But
// IPCMessageStream won't report the error until
// receive method is called. If arkcmp dies in
// previous query, the following send actually fails
// (IpcMessageStream still set the state as SENT instead of
// ERROR ), when the receive method is called, IPCMessageStream
// then sets the state as ERROR.
//
// Possible scenarios:
// . arkcmp dies in previous request, ExSqlComp class should start
// another arkcmp.
// . arkcmp dies in the middle of processing environment setup request,
// this request will be resend one more time. Arkcmp will die again,
// resendRequest decides not to send more request and return with
// errors.
sqlcomp_->badConnection_ = TRUE;
if (getState() == BREAK_RECEIVED)
sqlcomp_->breakReceived_ = TRUE;
IpcMessageType typ = sqlcomp_->outstandingSendBuffers_.message_ ?
sqlcomp_->outstandingSendBuffers_.message_->getType() :
CmpMessageObj::NULL_REQUEST;
if ( sqlcomp_->currentISPRequest_ || // an ISP request
typ == CmpMessageObj::INTERNALSP_REQUEST ||
typ == CmpMessageObj::INTERNALSP_GETNEXT)
{
if (getState() == ERROR_STATE)
sqlcomp_->error(arkcmpErrorResend);
sqlcomp_->initRequests(sqlcomp_->outstandingSendBuffers_);
sqlcomp_->currentISPRequest_ = 0;
sqlcomp_->outstandingSendBuffers_.ioStatus_ = ExSqlComp::FINISHED;
if (getState() == BREAK_RECEIVED)
{
Int32 nid = 0;
Int32 pid = 0;
NAProcessHandle phandle((SB_Phandle_Type *)
&(sqlcomp_->server_->getServerId().getPhandle().phandle_));
Int32 guaRetcode = phandle.decompose();
if (XZFIL_ERR_OK == guaRetcode)
{
msg_mon_stop_process_name(phandle.getPhandleString());
}
delete sqlcomp_->sqlcompMessage_;
sqlcomp_->sqlcompMessage_ = NULL;
sqlcomp_->getDiags() ->setRollbackTransaction(-1);
sqlcomp_->doRefreshEnvironment_ = TRUE;
}
}
if ((getState() != BREAK_RECEIVED) &&
sqlcomp_->getDiags() &&
sqlcomp_->getDiags()->getNumber() == 0)
{
sqlcomp_->error(-CLI_RECEIVE_ERROR);
}
if (sqlcomp_->storedProcTcb_)
{
// and call that TCB's tickleScheduler() method.
sqlcomp_->storedProcTcb_->tickleScheduler();
return;
}
}
else
{
sqlcomp_->outstandingSendBuffers_.ioStatus_ = ExSqlComp::FINISHED;
sqlcomp_->outstandingSendBuffers_.resendCount_ = 0;
if (sqlcomp_->outstandingSendBuffers_.message_)
{
// This is not an environment setup message, update the I/O status
sqlcomp_->outstandingSendBuffers_.requestId_ =
sqlcomp_->outstandingSendBuffers_.message_->id();
sqlcomp_->outstandingSendBuffers_.message_->decrRefCount();
sqlcomp_->outstandingSendBuffers_.message_ = 0;
}
if (getNextObjType() == IPC_SQL_DIAG_AREA)
{
ComDiagsArea diags(sqlcomp_->getHeap());
*(this) >> diags;
if (diags.getNumber()) sqlcomp_->getDiags()->mergeAfter(diags);
}
}
if (sqlcomp_->storedProcTcb_)
{
// at this point, get a pointer to the ex_stored_proc_tcb, if applicable,
// and call that TCB's tickleScheduler() method.
sqlcomp_->storedProcTcb_->tickleScheduler();
}
}
#if 0
NABoolean NAExecTrans(Lng32 command,
Int64& transId)
{
assert(command == 0);
SQLDESC_ID transid_desc;
SQLMODULE_ID module;
// added for multi charset module names
init_SQLCLI_OBJ_ID(&transid_desc);
init_SQLMODULE_ID(&module);
module.module_name = 0;
transid_desc.module = &module;
transid_desc.name_mode = desc_handle;
transid_desc.handle = 0L;
Int32 rc;
if ( rc=SQL_EXEC_AllocDesc(&transid_desc, 1) )
return FALSE;
Int64 transid = transId;
if ( rc = SQL_EXEC_SetDescItem(&transid_desc, 1, SQLDESC_VAR_PTR,
(Lng32)&transid, 0) )
return FALSE;
Lng32 cliCommand = command ? SQLTRANS_SET : SQLTRANS_STATUS;
rc = SQL_EXEC_Xact(cliCommand, &transid_desc);
if (rc == 0)
{
transId = transid;
}
SQL_EXEC_DeallocDesc(&transid_desc);
return rc == 0;
}
#endif
| 1 | 23,102 | 2012 is a retryable error. Will avoiding rgenerating it here cause a difference in behavior in createServer() ? | apache-trafodion | cpp |
@@ -8,6 +8,7 @@ package factory
import (
"context"
+ "github.com/iotexproject/iotex-address/address"
"sort"
"github.com/iotexproject/go-pkgs/hash" | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package factory
import (
"context"
"sort"
"github.com/iotexproject/go-pkgs/hash"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/actpool/actioniterator"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/state"
)
var (
stateDBMtc = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "iotex_state_db",
Help: "IoTeX State DB",
},
[]string{"type"},
)
)
func init() {
prometheus.MustRegister(stateDBMtc)
}
type (
workingSet struct {
height uint64
finalized bool
dock protocol.Dock
receipts []*action.Receipt
commitFunc func(uint64) error
readviewFunc func(name string) (interface{}, error)
writeviewFunc func(name string, v interface{}) error
dbFunc func() db.KVStore
delStateFunc func(string, []byte) error
statesFunc func(opts ...protocol.StateOption) (uint64, state.Iterator, error)
digestFunc func() hash.Hash256
finalizeFunc func(uint64) error
getStateFunc func(string, []byte, interface{}) error
putStateFunc func(string, []byte, interface{}) error
revertFunc func(int) error
snapshotFunc func() int
}
workingSetCreator interface {
newWorkingSet(context.Context, uint64) (*workingSet, error)
}
)
func (ws *workingSet) digest() (hash.Hash256, error) {
if !ws.finalized {
return hash.ZeroHash256, errors.New("workingset has not been finalized yet")
}
return ws.digestFunc(), nil
}
func (ws *workingSet) Receipts() ([]*action.Receipt, error) {
if !ws.finalized {
return nil, errors.New("workingset has not been finalized yet")
}
return ws.receipts, nil
}
// Height returns the Height of the block being worked on
func (ws *workingSet) Height() (uint64, error) {
return ws.height, nil
}
func (ws *workingSet) validate(ctx context.Context) error {
if ws.finalized {
return errors.Errorf("cannot run action on a finalized working set")
}
blkCtx := protocol.MustGetBlockCtx(ctx)
if blkCtx.BlockHeight != ws.height {
return errors.Errorf(
"invalid block height %d, %d expected",
blkCtx.BlockHeight,
ws.height,
)
}
return nil
}
func (ws *workingSet) runActions(
ctx context.Context,
elps []action.SealedEnvelope,
) ([]*action.Receipt, error) {
if err := ws.validate(ctx); err != nil {
return nil, err
}
// Handle actions
receipts := make([]*action.Receipt, 0)
for _, elp := range elps {
ctx, err := withActionCtx(ctx, elp)
if err != nil {
return nil, err
}
receipt, err := ws.runAction(ctx, elp)
if err != nil {
return nil, errors.Wrap(err, "error when run action")
}
if receipt != nil {
receipts = append(receipts, receipt)
}
}
return receipts, nil
}
func withActionCtx(ctx context.Context, selp action.SealedEnvelope) (context.Context, error) {
var actionCtx protocol.ActionCtx
var err error
caller := selp.SrcPubkey().Address()
if caller == nil {
return nil, errors.New("failed to get address")
}
actionCtx.Caller = caller
actionCtx.ActionHash, err = selp.Hash()
if err != nil {
return nil, err
}
actionCtx.GasPrice = selp.GasPrice()
intrinsicGas, err := selp.IntrinsicGas()
if err != nil {
return nil, err
}
actionCtx.IntrinsicGas = intrinsicGas
actionCtx.Nonce = selp.Nonce()
return protocol.WithActionCtx(ctx, actionCtx), nil
}
func (ws *workingSet) runAction(
ctx context.Context,
elp action.SealedEnvelope,
) (*action.Receipt, error) {
if protocol.MustGetBlockCtx(ctx).GasLimit < protocol.MustGetActionCtx(ctx).IntrinsicGas {
return nil, errors.Wrap(action.ErrHitGasLimit, "block gas limit exceeded")
}
// Handle action
reg, ok := protocol.GetRegistry(ctx)
if !ok {
return nil, nil
}
for _, actionHandler := range reg.All() {
receipt, err := actionHandler.Handle(ctx, elp.Action(), ws)
elpHash, err1 := elp.Hash()
if err1 != nil {
return nil, errors.Wrapf(err1, "Failed to get hash")
}
if err != nil {
return nil, errors.Wrapf(
err,
"error when action %x mutates states",
elpHash,
)
}
if receipt != nil {
return receipt, nil
}
}
// TODO (zhi): return error
return nil, nil
}
func (ws *workingSet) finalize() error {
if ws.finalized {
return errors.New("Cannot finalize a working set twice")
}
if err := ws.finalizeFunc(ws.height); err != nil {
return err
}
ws.finalized = true
return nil
}
func (ws *workingSet) Snapshot() int {
return ws.snapshotFunc()
}
func (ws *workingSet) Revert(snapshot int) error {
return ws.revertFunc(snapshot)
}
// Commit persists all changes in RunActions() into the DB
func (ws *workingSet) Commit(ctx context.Context) error {
if err := ws.commitFunc(ws.height); err != nil {
return err
}
if err := protocolCommit(ctx, ws); err != nil {
return err
}
ws.Reset()
return nil
}
// GetDB returns the underlying DB for account/contract storage
func (ws *workingSet) GetDB() db.KVStore {
return ws.dbFunc()
}
// State pulls a state from DB
func (ws *workingSet) State(s interface{}, opts ...protocol.StateOption) (uint64, error) {
stateDBMtc.WithLabelValues("get").Inc()
cfg, err := processOptions(opts...)
if err != nil {
return ws.height, err
}
return ws.height, ws.getStateFunc(cfg.Namespace, cfg.Key, s)
}
func (ws *workingSet) States(opts ...protocol.StateOption) (uint64, state.Iterator, error) {
return ws.statesFunc(opts...)
}
// PutState puts a state into DB
func (ws *workingSet) PutState(s interface{}, opts ...protocol.StateOption) (uint64, error) {
stateDBMtc.WithLabelValues("put").Inc()
cfg, err := processOptions(opts...)
if err != nil {
return ws.height, err
}
return ws.height, ws.putStateFunc(cfg.Namespace, cfg.Key, s)
}
// DelState deletes a state from DB
func (ws *workingSet) DelState(opts ...protocol.StateOption) (uint64, error) {
stateDBMtc.WithLabelValues("delete").Inc()
cfg, err := processOptions(opts...)
if err != nil {
return ws.height, err
}
return ws.height, ws.delStateFunc(cfg.Namespace, cfg.Key)
}
// ReadView reads the view
func (ws *workingSet) ReadView(name string) (interface{}, error) {
return ws.readviewFunc(name)
}
// WriteView writeback the view to factory
func (ws *workingSet) WriteView(name string, v interface{}) error {
return ws.writeviewFunc(name, v)
}
func (ws *workingSet) ProtocolDirty(name string) bool {
return ws.dock.ProtocolDirty(name)
}
func (ws *workingSet) Load(name, key string, v interface{}) error {
return ws.dock.Load(name, key, v)
}
func (ws *workingSet) Unload(name, key string, v interface{}) error {
return ws.dock.Unload(name, key, v)
}
func (ws *workingSet) Reset() {
ws.dock.Reset()
}
// createGenesisStates initialize the genesis states
func (ws *workingSet) CreateGenesisStates(ctx context.Context) error {
if reg, ok := protocol.GetRegistry(ctx); ok {
for _, p := range reg.All() {
if gsc, ok := p.(protocol.GenesisStateCreator); ok {
if err := gsc.CreateGenesisStates(ctx, ws); err != nil {
return errors.Wrap(err, "failed to create genesis states for protocol")
}
}
}
}
return ws.finalize()
}
func (ws *workingSet) validateNonce(blk *block.Block) error {
accountNonceMap := make(map[string][]uint64)
for _, selp := range blk.Actions {
caller := selp.SrcPubkey().Address()
if caller == nil {
return errors.New("failed to get address")
}
appendActionIndex(accountNonceMap, caller.String(), selp.Nonce())
}
// Special handling for genesis block
if blk.Height() == 0 {
return nil
}
// Verify each account's Nonce
for srcAddr, receivedNonces := range accountNonceMap {
confirmedState, err := accountutil.AccountState(ws, srcAddr)
if err != nil {
return errors.Wrapf(err, "failed to get the confirmed nonce of address %s", srcAddr)
}
receivedNonces := receivedNonces
sort.Slice(receivedNonces, func(i, j int) bool { return receivedNonces[i] < receivedNonces[j] })
for i, nonce := range receivedNonces {
if nonce != confirmedState.Nonce+uint64(i+1) {
return errors.Wrapf(
action.ErrNonce,
"the %d nonce %d of address %s (confirmed nonce %d) is not continuously increasing",
i,
nonce,
srcAddr,
confirmedState.Nonce,
)
}
}
}
return nil
}
func (ws *workingSet) Process(ctx context.Context, actions []action.SealedEnvelope) error {
return ws.process(ctx, actions)
}
func (ws *workingSet) process(ctx context.Context, actions []action.SealedEnvelope) error {
var err error
reg := protocol.MustGetRegistry(ctx)
for _, act := range actions {
if ctx, err = withActionCtx(ctx, act); err != nil {
return err
}
for _, p := range reg.All() {
if validator, ok := p.(protocol.ActionValidator); ok {
if err := validator.Validate(ctx, act.Action(), ws); err != nil {
return err
}
}
}
}
for _, p := range protocol.MustGetRegistry(ctx).All() {
if pp, ok := p.(protocol.PreStatesCreator); ok {
if err := pp.CreatePreStates(ctx, ws); err != nil {
return err
}
}
}
// TODO: verify whether the post system actions are appended tail
receipts, err := ws.runActions(ctx, actions)
if err != nil {
return err
}
ws.receipts = receipts
return ws.finalize()
}
func (ws *workingSet) pickAndRunActions(
ctx context.Context,
ap actpool.ActPool,
postSystemActions []action.SealedEnvelope,
allowedBlockGasResidue uint64,
) ([]action.SealedEnvelope, error) {
err := ws.validate(ctx)
if err != nil {
return nil, err
}
receipts := make([]*action.Receipt, 0)
executedActions := make([]action.SealedEnvelope, 0)
reg := protocol.MustGetRegistry(ctx)
for _, p := range reg.All() {
if pp, ok := p.(protocol.PreStatesCreator); ok {
if err := pp.CreatePreStates(ctx, ws); err != nil {
return nil, err
}
}
}
// initial action iterator
blkCtx := protocol.MustGetBlockCtx(ctx)
if ap != nil {
actionIterator := actioniterator.NewActionIterator(ap.PendingActionMap())
for {
nextAction, ok := actionIterator.Next()
if !ok {
break
}
if nextAction.GasLimit() > blkCtx.GasLimit {
actionIterator.PopAccount()
continue
}
if ctx, err = withActionCtx(ctx, nextAction); err == nil {
for _, p := range reg.All() {
if validator, ok := p.(protocol.ActionValidator); ok {
if err = validator.Validate(ctx, nextAction.Action(), ws); err != nil {
break
}
}
}
}
if err != nil {
caller := nextAction.SrcPubkey().Address()
if caller == nil {
return nil, errors.New("failed to get address")
}
ap.DeleteAction(caller)
actionIterator.PopAccount()
continue
}
receipt, err := ws.runAction(ctx, nextAction)
switch errors.Cause(err) {
case nil:
// do nothing
case action.ErrHitGasLimit:
actionIterator.PopAccount()
continue
default:
nextActionHash, err := nextAction.Hash()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get hash for %x", nextActionHash)
}
return nil, errors.Wrapf(err, "Failed to update state changes for selp %x", nextActionHash)
}
if receipt != nil {
blkCtx.GasLimit -= receipt.GasConsumed
ctx = protocol.WithBlockCtx(ctx, blkCtx)
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, nextAction)
// To prevent loop all actions in act_pool, we stop processing action when remaining gas is below
// than certain threshold
if blkCtx.GasLimit < allowedBlockGasResidue {
break
}
}
}
for _, selp := range postSystemActions {
if ctx, err = withActionCtx(ctx, selp); err != nil {
return nil, err
}
receipt, err := ws.runAction(ctx, selp)
if err != nil {
return nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, selp)
}
ws.receipts = receipts
return executedActions, ws.finalize()
}
func (ws *workingSet) ValidateBlock(ctx context.Context, blk *block.Block) error {
if err := ws.validateNonce(blk); err != nil {
return errors.Wrap(err, "failed to validate nonce")
}
if err := ws.process(ctx, blk.RunnableActions().Actions()); err != nil {
log.L().Error("Failed to update state.", zap.Uint64("height", ws.height), zap.Error(err))
return err
}
digest, err := ws.digest()
if err != nil {
return err
}
if err = blk.VerifyDeltaStateDigest(digest); err != nil {
return errors.Wrap(err, "failed to verify delta state digest")
}
if err = blk.VerifyReceiptRoot(calculateReceiptRoot(ws.receipts)); err != nil {
return errors.Wrap(err, "Failed to verify receipt root")
}
return nil
}
func (ws *workingSet) CreateBuilder(
ctx context.Context,
ap actpool.ActPool,
postSystemActions []action.SealedEnvelope,
allowedBlockGasResidue uint64,
) (*block.Builder, error) {
actions, err := ws.pickAndRunActions(ctx, ap, postSystemActions, allowedBlockGasResidue)
if err != nil {
return nil, err
}
ra := block.NewRunnableActionsBuilder().
AddActions(actions...).
Build()
blkCtx := protocol.MustGetBlockCtx(ctx)
bcCtx := protocol.MustGetBlockchainCtx(ctx)
prevBlkHash := bcCtx.Tip.Hash
digest, err := ws.digest()
if err != nil {
return nil, errors.Wrap(err, "failed to get digest")
}
blkBuilder := block.NewBuilder(ra).
SetHeight(blkCtx.BlockHeight).
SetTimestamp(blkCtx.BlockTimeStamp).
SetPrevBlockHash(prevBlkHash).
SetDeltaStateDigest(digest).
SetReceipts(ws.receipts).
SetReceiptRoot(calculateReceiptRoot(ws.receipts)).
SetLogsBloom(calculateLogsBloom(ctx, ws.receipts))
return blkBuilder, nil
}
| 1 | 23,697 | move to line 14 below | iotexproject-iotex-core | go |
@@ -107,7 +107,7 @@ public class Rectangle2D {
return eastRelation;
}
- /** Checks if the rectangle intersects the provided triangle **/
+ /** Checks if the rectangle crosses the provided triangle **/
public boolean intersectsTriangle(int aX, int aY, int bX, int bY, int cX, int cY) {
// 1. query contains any triangle points
if (queryContainsPoint(aX, aY) || queryContainsPoint(bX, bY) || queryContainsPoint(cX, cY)) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.geo;
import java.util.Arrays;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.util.FutureArrays;
import org.apache.lucene.util.NumericUtils;
import static java.lang.Integer.BYTES;
import static org.apache.lucene.geo.GeoEncodingUtils.MAX_LON_ENCODED;
import static org.apache.lucene.geo.GeoEncodingUtils.MIN_LON_ENCODED;
import static org.apache.lucene.geo.GeoEncodingUtils.decodeLatitude;
import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude;
import static org.apache.lucene.geo.GeoEncodingUtils.encodeLatitude;
import static org.apache.lucene.geo.GeoEncodingUtils.encodeLatitudeCeil;
import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude;
import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitudeCeil;
import static org.apache.lucene.geo.GeoUtils.orient;
/**
* 2D rectangle implementation containing spatial logic.
*
* @lucene.internal
*/
public class Rectangle2D {
final byte[] bbox;
final byte[] west;
final int minX;
final int maxX;
final int minY;
final int maxY;
private Rectangle2D(double minLat, double maxLat, double minLon, double maxLon) {
this.bbox = new byte[4 * BYTES];
int minXenc = encodeLongitudeCeil(minLon);
int maxXenc = encodeLongitude(maxLon);
int minYenc = encodeLatitudeCeil(minLat);
int maxYenc = encodeLatitude(maxLat);
if (minYenc > maxYenc) {
minYenc = maxYenc;
}
this.minY = minYenc;
this.maxY = maxYenc;
if (minLon > maxLon == true) {
// crossing dateline is split into east/west boxes
this.west = new byte[4 * BYTES];
this.minX = minXenc;
this.maxX = maxXenc;
encode(MIN_LON_ENCODED, this.maxX, this.minY, this.maxY, this.west);
encode(this.minX, MAX_LON_ENCODED, this.minY, this.maxY, this.bbox);
} else {
// encodeLongitudeCeil may cause minX to be > maxX iff
// the delta between the longitude < the encoding resolution
if (minXenc > maxXenc) {
minXenc = maxXenc;
}
this.west = null;
this.minX = minXenc;
this.maxX = maxXenc;
encode(this.minX, this.maxX, this.minY, this.maxY, bbox);
}
}
/** Builds a Rectangle2D from rectangle */
public static Rectangle2D create(Rectangle rectangle) {
return new Rectangle2D(rectangle.minLat, rectangle.maxLat, rectangle.minLon, rectangle.maxLon);
}
public boolean crossesDateline() {
return minX > maxX;
}
/** Checks if the rectangle contains the provided point **/
public boolean queryContainsPoint(int x, int y) {
if (this.crossesDateline() == true) {
return bboxContainsPoint(x, y, MIN_LON_ENCODED, this.maxX, this.minY, this.maxY)
|| bboxContainsPoint(x, y, this.minX, MAX_LON_ENCODED, this.minY, this.maxY);
}
return bboxContainsPoint(x, y, this.minX, this.maxX, this.minY, this.maxY);
}
/** compare this to a provided rangle bounding box **/
public PointValues.Relation relateRangeBBox(int minXOffset, int minYOffset, byte[] minTriangle,
int maxXOffset, int maxYOffset, byte[] maxTriangle) {
PointValues.Relation eastRelation = compareBBoxToRangeBBox(this.bbox, minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle);
if (this.crossesDateline() && eastRelation == PointValues.Relation.CELL_OUTSIDE_QUERY) {
return compareBBoxToRangeBBox(this.west, minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle);
}
return eastRelation;
}
/** Checks if the rectangle intersects the provided triangle **/
public boolean intersectsTriangle(int aX, int aY, int bX, int bY, int cX, int cY) {
// 1. query contains any triangle points
if (queryContainsPoint(aX, aY) || queryContainsPoint(bX, bY) || queryContainsPoint(cX, cY)) {
return true;
}
// compute bounding box of triangle
int tMinX = StrictMath.min(StrictMath.min(aX, bX), cX);
int tMaxX = StrictMath.max(StrictMath.max(aX, bX), cX);
int tMinY = StrictMath.min(StrictMath.min(aY, bY), cY);
int tMaxY = StrictMath.max(StrictMath.max(aY, bY), cY);
// 2. check bounding boxes are disjoint
if (this.crossesDateline() == true) {
if (boxesAreDisjoint(tMinX, tMaxX, tMinY, tMaxY, MIN_LON_ENCODED, this.maxX, this.minY, this.maxY)
&& boxesAreDisjoint(tMinX, tMaxX, tMinY, tMaxY, this.minX, MAX_LON_ENCODED, this.minY, this.maxY)) {
return false;
}
} else if (tMaxX < minX || tMinX > maxX || tMinY > maxY || tMaxY < minY) {
return false;
}
// 3. check triangle contains any query points
if (Tessellator.pointInTriangle(minX, minY, aX, aY, bX, bY, cX, cY)) {
return true;
} else if (Tessellator.pointInTriangle(maxX, minY, aX, aY, bX, bY, cX, cY)) {
return true;
} else if (Tessellator.pointInTriangle(maxX, maxY, aX, aY, bX, bY, cX, cY)) {
return true;
} else if (Tessellator.pointInTriangle(minX, maxY, aX, aY, bX, bY, cX, cY)) {
return true;
}
// 4. last ditch effort: check crossings
if (queryIntersects(aX, aY, bX, bY, cX, cY)) {
return true;
}
return false;
}
/** Checks if the rectangle contains the provided triangle **/
public boolean containsTriangle(int ax, int ay, int bx, int by, int cx, int cy) {
if (this.crossesDateline() == true) {
return bboxContainsTriangle(ax, ay, bx, by, cx, cy, MIN_LON_ENCODED, this.maxX, this.minY, this.maxY)
|| bboxContainsTriangle(ax, ay, bx, by, cx, cy, this.minX, MAX_LON_ENCODED, this.minY, this.maxY);
}
return bboxContainsTriangle(ax, ay, bx, by, cx, cy, minX, maxX, minY, maxY);
}
/** static utility method to compare a bbox with a range of triangles (just the bbox of the triangle collection) */
private static PointValues.Relation compareBBoxToRangeBBox(final byte[] bbox,
int minXOffset, int minYOffset, byte[] minTriangle,
int maxXOffset, int maxYOffset, byte[] maxTriangle) {
// check bounding box (DISJOINT)
if (FutureArrays.compareUnsigned(minTriangle, minXOffset, minXOffset + BYTES, bbox, 3 * BYTES, 4 * BYTES) > 0 ||
FutureArrays.compareUnsigned(maxTriangle, maxXOffset, maxXOffset + BYTES, bbox, BYTES, 2 * BYTES) < 0 ||
FutureArrays.compareUnsigned(minTriangle, minYOffset, minYOffset + BYTES, bbox, 2 * BYTES, 3 * BYTES) > 0 ||
FutureArrays.compareUnsigned(maxTriangle, maxYOffset, maxYOffset + BYTES, bbox, 0, BYTES) < 0) {
return PointValues.Relation.CELL_OUTSIDE_QUERY;
}
if (FutureArrays.compareUnsigned(minTriangle, minXOffset, minXOffset + BYTES, bbox, BYTES, 2 * BYTES) >= 0 &&
FutureArrays.compareUnsigned(maxTriangle, maxXOffset, maxXOffset + BYTES, bbox, 3 * BYTES, 4 * BYTES) <= 0 &&
FutureArrays.compareUnsigned(minTriangle, minYOffset, minYOffset + BYTES, bbox, 0, BYTES) >= 0 &&
FutureArrays.compareUnsigned(maxTriangle, maxYOffset, maxYOffset + BYTES, bbox, 2 * BYTES, 3 * BYTES) <= 0) {
return PointValues.Relation.CELL_INSIDE_QUERY;
}
return PointValues.Relation.CELL_CROSSES_QUERY;
}
/**
* encodes a bounding box into the provided byte array
*/
private static void encode(final int minX, final int maxX, final int minY, final int maxY, byte[] b) {
if (b == null) {
b = new byte[4 * BYTES];
}
NumericUtils.intToSortableBytes(minY, b, 0);
NumericUtils.intToSortableBytes(minX, b, BYTES);
NumericUtils.intToSortableBytes(maxY, b, 2 * BYTES);
NumericUtils.intToSortableBytes(maxX, b, 3 * BYTES);
}
/** returns true if the query intersects the provided triangle (in encoded space) */
private boolean queryIntersects(int ax, int ay, int bx, int by, int cx, int cy) {
// check each edge of the triangle against the query
if (edgeIntersectsQuery(ax, ay, bx, by) ||
edgeIntersectsQuery(bx, by, cx, cy) ||
edgeIntersectsQuery(cx, cy, ax, ay)) {
return true;
}
return false;
}
/** returns true if the edge (defined by (ax, ay) (bx, by)) intersects the query */
private boolean edgeIntersectsQuery(int ax, int ay, int bx, int by) {
if (this.crossesDateline() == true) {
return edgeIntersectsBox(ax, ay, bx, by, MIN_LON_ENCODED, this.maxX, this.minY, this.maxY)
|| edgeIntersectsBox(ax, ay, bx, by, this.minX, MAX_LON_ENCODED, this.minY, this.maxY);
}
return edgeIntersectsBox(ax, ay, bx, by, this.minX, this.maxX, this.minY, this.maxY);
}
/** static utility method to check if a bounding box contains a point */
private static boolean bboxContainsPoint(int x, int y, int minX, int maxX, int minY, int maxY) {
return (x < minX || x > maxX || y < minY || y > maxY) == false;
}
/** static utility method to check if a bounding box contains a triangle */
private static boolean bboxContainsTriangle(int ax, int ay, int bx, int by, int cx, int cy,
int minX, int maxX, int minY, int maxY) {
return bboxContainsPoint(ax, ay, minX, maxX, minY, maxY)
&& bboxContainsPoint(bx, by, minX, maxX, minY, maxY)
&& bboxContainsPoint(cx, cy, minX, maxX, minY, maxY);
}
/** returns true if the edge (defined by (ax, ay) (bx, by)) intersects the query */
private static boolean edgeIntersectsBox(int ax, int ay, int bx, int by,
int minX, int maxX, int minY, int maxY) {
// shortcut: if edge is a point (occurs w/ Line shapes); simply check bbox w/ point
if (ax == bx && ay == by) {
return Rectangle.containsPoint(ay, ax, minY, maxY, minX, maxX);
}
// shortcut: check if either of the end points fall inside the box
if (bboxContainsPoint(ax, ay, minX, maxX, minY, maxY)
|| bboxContainsPoint(bx, by, minX, maxX, minY, maxY)) {
return true;
}
// shortcut: check bboxes of edges are disjoint
if (boxesAreDisjoint(Math.min(ax, bx), Math.max(ax, bx), Math.min(ay, by), Math.max(ay, by),
minX, maxX, minY, maxY)) {
return false;
}
// shortcut: edge is a point
if (ax == bx && ay == by) {
return false;
}
// top
if (orient(ax, ay, bx, by, minX, maxY) * orient(ax, ay, bx, by, maxX, maxY) <= 0 &&
orient(minX, maxY, maxX, maxY, ax, ay) * orient(minX, maxY, maxX, maxY, bx, by) <= 0) {
return true;
}
// right
if (orient(ax, ay, bx, by, maxX, maxY) * orient(ax, ay, bx, by, maxX, minY) <= 0 &&
orient(maxX, maxY, maxX, minY, ax, ay) * orient(maxX, maxY, maxX, minY, bx, by) <= 0) {
return true;
}
// bottom
if (orient(ax, ay, bx, by, maxX, minY) * orient(ax, ay, bx, by, minX, minY) <= 0 &&
orient(maxX, minY, minX, minY, ax, ay) * orient(maxX, minY, minX, minY, bx, by) <= 0) {
return true;
}
// left
if (orient(ax, ay, bx, by, minX, minY) * orient(ax, ay, bx, by, minX, maxY) <= 0 &&
orient(minX, minY, minX, maxY, ax, ay) * orient(minX, minY, minX, maxY, bx, by) <= 0) {
return true;
}
return false;
}
/** utility method to check if two boxes are disjoint */
private static boolean boxesAreDisjoint(final int aMinX, final int aMaxX, final int aMinY, final int aMaxY,
final int bMinX, final int bMaxX, final int bMinY, final int bMaxY) {
return (aMaxX < bMinX || aMinX > bMaxX || aMaxY < bMinY || aMinY > bMaxY);
}
@Override
public boolean equals(Object o) {
return Arrays.equals(bbox, ((Rectangle2D)o).bbox)
&& Arrays.equals(west, ((Rectangle2D)o).west);
}
@Override
public int hashCode() {
int hash = super.hashCode();
hash = 31 * hash + Arrays.hashCode(bbox);
hash = 31 * hash + Arrays.hashCode(west);
return hash;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("Rectangle(lat=");
sb.append(decodeLatitude(minY));
sb.append(" TO ");
sb.append(decodeLatitude(maxY));
sb.append(" lon=");
sb.append(decodeLongitude(minX));
sb.append(" TO ");
sb.append(decodeLongitude(maxX));
if (maxX < minX) {
sb.append(" [crosses dateline!]");
}
sb.append(")");
return sb.toString();
}
}
| 1 | 28,260 | the method name should match teh docs | apache-lucene-solr | java |
@@ -278,13 +278,16 @@ func getOps(config Config, id TlfID) *folderBranchOps {
getOpsNoAdd(FolderBranch{id, MasterBranch})
}
+// TODO: Test MDv3.
+
// createNewRMD creates a new RMD for the given name. Returns its ID
// and handle also.
func createNewRMD(t *testing.T, config Config, name string, public bool) (
TlfID, *TlfHandle, *RootMetadata) {
id := FakeTlfID(1, public)
h := parseTlfHandleOrBust(t, config, name, public)
- rmd := newRootMetadataOrBust(t, id, h)
+ rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
+ require.NoError(t, err)
return id, h, rmd
}
| 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"errors"
"fmt"
"math/rand"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfshash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
type CheckBlockOps struct {
BlockOps
tr gomock.TestReporter
}
var _ BlockOps = (*CheckBlockOps)(nil)
func (cbo *CheckBlockOps) Ready(ctx context.Context, kmd KeyMetadata,
block Block) (id BlockID, plainSize int, readyBlockData ReadyBlockData,
err error) {
id, plainSize, readyBlockData, err = cbo.BlockOps.Ready(ctx, kmd, block)
encodedSize := readyBlockData.GetEncodedSize()
if plainSize > encodedSize {
cbo.tr.Errorf("expected plainSize <= encodedSize, got plainSize = %d, "+
"encodedSize = %d", plainSize, encodedSize)
}
return
}
type tCtxIDType int
const (
tCtxID tCtxIDType = iota
)
// Time out individual tests after 10 seconds.
var individualTestTimeout = 10 * time.Second
func kbfsOpsInit(t *testing.T, changeMd bool) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr)
config = NewConfigMock(mockCtrl, ctr)
config.SetCodec(kbfscodec.NewMsgpack())
blockops := &CheckBlockOps{config.mockBops, ctr}
config.SetBlockOps(blockops)
kbfsops := NewKBFSOpsStandard(config)
config.SetKBFSOps(kbfsops)
config.SetNotifier(kbfsops)
// Use real caches, to avoid the overhead of tracking cache calls.
// Each test is expected to check the cache for correctness at the
// end of the test.
config.SetBlockCache(NewBlockCacheStandard(100, 1<<30))
config.SetDirtyBlockCache(NewDirtyBlockCacheStandard(wallClock{},
testLoggerMaker(t), 5<<20, 10<<20, 5<<20))
config.mockBcache = nil
config.mockDirtyBcache = nil
if changeMd {
// Give different values for the MD Id so we can test that it
// is properly cached
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(2), nil)
} else {
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(1), nil)
}
// These tests don't rely on external notifications at all, so ignore any
// goroutine attempting to register:
c := make(chan error, 1)
config.mockMdserv.EXPECT().RegisterForUpdate(gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return(c, nil)
config.mockMdserv.EXPECT().OffsetFromServerTime().
Return(time.Duration(0), true).AnyTimes()
// None of these tests depend on time
config.mockClock.EXPECT().Now().AnyTimes().Return(time.Now())
// Ignore Notify calls for now
config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes()
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore key bundle ID creation calls for now
config.mockCrypto.EXPECT().MakeTLFWriterKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFWriterKeyBundleID{}, nil)
config.mockCrypto.EXPECT().MakeTLFReaderKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFReaderKeyBundleID{}, nil)
// Ignore favorites
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).AnyTimes().
Return(nil, nil)
config.mockKbpki.EXPECT().FavoriteAdd(gomock.Any(), gomock.Any()).
AnyTimes().Return(nil)
interposeDaemonKBPKI(config, "alice", "bob", "charlie")
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
// make the context identifiable, to verify that it is passed
// correctly to the observer
id := rand.Int()
var err error
if ctx, err = NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(ctx context.Context) context.Context {
return context.WithValue(ctx, tCtxID, id)
})); err != nil {
cancel()
panic(err)
}
return
}
func kbfsTestShutdown(mockCtrl *gomock.Controller, config *ConfigMock,
ctx context.Context, cancel context.CancelFunc) {
config.ctr.CheckForFailures()
config.KBFSOps().(*KBFSOpsStandard).Shutdown()
if config.mockDirtyBcache == nil {
if err := config.DirtyBlockCache().Shutdown(); err != nil {
// Ignore error; some tests intentionally leave around dirty data.
}
}
cancel()
if err := CleanupCancellationDelayer(ctx); err != nil {
panic(err)
}
mockCtrl.Finish()
}
// kbfsOpsInitNoMocks returns a config that doesn't use any mocks. The
// shutdown call is kbfsTestShutdownNoMocks.
func kbfsOpsInitNoMocks(t *testing.T, users ...libkb.NormalizedUsername) (
*ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) {
config := MakeTestConfigOrBust(t, users...)
_, currentUID, err := config.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(c context.Context) context.Context {
return c
}))
if err != nil {
cancel()
panic(err)
}
return config, currentUID, ctx, cancel
}
func kbfsTestShutdownNoMocks(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
CheckConfigAndShutdown(t, config)
cancel()
CleanupCancellationDelayer(ctx)
}
// TODO: Get rid of all users of this.
func kbfsTestShutdownNoMocksNoCheck(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
config.Shutdown()
cancel()
CleanupCancellationDelayer(ctx)
}
func checkBlockCache(t *testing.T, config *ConfigMock, id TlfID,
expectedCleanBlocks []BlockID,
expectedDirtyBlocks map[BlockPointer]BranchName) {
bcache := config.BlockCache().(*BlockCacheStandard)
// make sure the LRU consists of exactly the right set of clean blocks
for _, id := range expectedCleanBlocks {
_, ok := bcache.cleanTransient.Get(id)
if !ok {
t.Errorf("BlockCache missing clean block %v at the end of the test",
id)
}
}
if bcache.cleanTransient.Len() != len(expectedCleanBlocks) {
t.Errorf("BlockCache has extra clean blocks at end of test")
}
// make sure the dirty cache consists of exactly the right set of
// dirty blocks
dirtyBcache := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
for ptr, branch := range expectedDirtyBlocks {
_, err := dirtyBcache.Get(id, ptr, branch)
if err != nil {
t.Errorf("BlockCache missing dirty block %v, branch %s at "+
"the end of the test: err %v", ptr, branch, err)
}
if !dirtyBcache.IsDirty(id, ptr, branch) {
t.Errorf("BlockCache has incorrectly clean block %v, branch %s at "+
"the end of the test: err %v", ptr, branch, err)
}
}
if len(dirtyBcache.cache) != len(expectedDirtyBlocks) {
t.Errorf("BlockCache has extra dirty blocks at end of test")
}
}
func TestKBFSOpsGetFavoritesSuccess(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
handle1 := parseTlfHandleOrBust(t, config, "alice", false)
handle2 := parseTlfHandleOrBust(t, config, "alice,bob", false)
// dup for testing
handles := []*TlfHandle{handle1, handle2, handle2}
for _, h := range handles {
config.KeybaseService().FavoriteAdd(
context.Background(), h.ToFavorite().toKBFolder(false))
}
// The favorites list contains our own public dir by default, even
// if KBPKI doesn't return it.
handle3 := parseTlfHandleOrBust(t, config, "alice", true)
handles = append(handles, handle3)
handles2, err := config.KBFSOps().GetFavorites(ctx)
if err != nil {
t.Errorf("Got error on favorites: %v", err)
}
if len(handles2) != len(handles)-1 {
t.Errorf("Got bad handles back: %v", handles2)
}
}
func TestKBFSOpsGetFavoritesFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
err := errors.New("Fake fail")
// Replace the old one (added in init function)
config.mockKbpki = NewMockKBPKI(mockCtrl)
config.SetKBPKI(config.mockKbpki)
// expect one call to favorites, and fail it
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).Return(nil, err)
if _, err2 := config.KBFSOps().GetFavorites(ctx); err2 != err {
t.Errorf("Got bad error on favorites: %v", err2)
}
}
func getOps(config Config, id TlfID) *folderBranchOps {
return config.KBFSOps().(*KBFSOpsStandard).
getOpsNoAdd(FolderBranch{id, MasterBranch})
}
// createNewRMD creates a new RMD for the given name. Returns its ID
// and handle also.
func createNewRMD(t *testing.T, config Config, name string, public bool) (
TlfID, *TlfHandle, *RootMetadata) {
id := FakeTlfID(1, public)
h := parseTlfHandleOrBust(t, config, name, public)
rmd := newRootMetadataOrBust(t, id, h)
return id, h, rmd
}
func makeImmutableRMDForTest(t *testing.T, config Config, rmd *RootMetadata,
mdID MdID) ImmutableRootMetadata {
key, err := config.KBPKI().GetCurrentVerifyingKey(context.Background())
require.NoError(t, err)
return makeImmutableRootMetadataForTest(t, rmd, key, mdID)
}
// injectNewRMD creates a new RMD and makes sure the existing ops for
// its ID has as its head that RMD.
func injectNewRMD(t *testing.T, config *ConfigMock) (
keybase1.UID, TlfID, *RootMetadata) {
id, h, rmd := createNewRMD(t, config, "alice", false)
var keyGen KeyGen
if id.IsPublic() {
keyGen = PublicKeyGen
} else {
keyGen = 1
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
KeyGen: keyGen,
DataVer: 1,
},
EncodedSize: 1,
},
}
rmd.FakeInitialRekey(config.Crypto(), h.ToBareHandleOrBust())
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(
t, config, rmd, fakeMdID(fakeTlfIDByte(id)))
rmd.SetSerializedPrivateMetadata(make([]byte, 1))
config.Notifier().RegisterForChanges(
[]FolderBranch{{id, MasterBranch}}, config.observer)
uid := h.FirstResolvedWriter()
rmd.data.Dir.Creator = uid
return uid, id, rmd
}
func TestKBFSOpsGetRootNodeCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = fakeBlockID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
func TestKBFSOpsGetRootNodeReIdentify(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = fakeBlockID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
// Mark everything for reidentifying, and wait for it to finish
// before checking.
kop := config.KBFSOps().(*KBFSOpsStandard)
returnCh := make(chan struct{})
kop.reIdentifyControlChan <- returnCh
<-returnCh
assert.False(t, fboIdentityDone(ops))
// Trigger new identify.
lState = makeFBOLockState()
_, err = ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
// fboIdentityDone is needed to avoid data races.
func fboIdentityDone(fbo *folderBranchOps) bool {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
return fbo.identifyDone
}
type failIdentifyKBPKI struct {
KBPKI
identifyErr error
}
func (kbpki failIdentifyKBPKI) Identify(ctx context.Context, assertion, reason string) (UserInfo, error) {
return UserInfo{}, kbpki.identifyErr
}
func TestKBFSOpsGetRootNodeCacheIdentifyFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = fakeBlockID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
expectedErr := errors.New("Identify failure")
config.SetKBPKI(failIdentifyKBPKI{config.KBPKI(), expectedErr})
// Trigger identify.
lState := makeFBOLockState()
_, err := ops.getMDLocked(ctx, lState, mdReadNeedIdentify)
assert.Equal(t, expectedErr, err)
assert.False(t, fboIdentityDone(ops))
}
func expectBlock(config *ConfigMock, kmd KeyMetadata, blockPtr BlockPointer, block Block, err error) {
config.mockBops.EXPECT().Get(gomock.Any(), kmdMatcher{kmd},
ptrMatcher{blockPtr}, gomock.Any()).
Do(func(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer, getBlock Block) {
switch v := getBlock.(type) {
case *FileBlock:
*v = *block.(*FileBlock)
case *DirBlock:
*v = *block.(*DirBlock)
}
}).Return(err)
}
// ptrMatcher implements the gomock.Matcher interface to compare
// BlockPointer objects. We don't care about some of the fields in a
// pointer for the purposes of these tests.
type ptrMatcher struct {
ptr BlockPointer
}
// Matches implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) Matches(x interface{}) bool {
xPtr, ok := x.(BlockPointer)
if !ok {
return false
}
return (xPtr.ID == p.ptr.ID && xPtr.RefNonce == p.ptr.RefNonce)
}
// String implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) String() string {
return fmt.Sprintf("Matches BlockPointer %v", p.ptr)
}
func fillInNewMD(t *testing.T, config *ConfigMock, rmd *RootMetadata) {
if !rmd.TlfID().IsPublic() {
rmd.FakeInitialRekey(config.Crypto(), rmd.GetTlfHandle().ToBareHandleOrBust())
}
rootPtr := BlockPointer{
ID: fakeBlockID(42),
KeyGen: 1,
DataVer: 1,
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: rootPtr,
EncodedSize: 5,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 3,
},
}
return
}
func testKBFSOpsGetRootNodeCreateNewSuccess(t *testing.T, public bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", public)
fillInNewMD(t, config, rmd)
// create a new MD
config.mockMdops.EXPECT().GetUnmergedForTLF(
gomock.Any(), id, gomock.Any()).Return(ImmutableRootMetadata{}, nil)
irmd := makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
config.mockMdops.EXPECT().GetForTLF(gomock.Any(), id).Return(irmd, nil)
config.mockMdcache.EXPECT().Put(irmd).Return(nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
require.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
require.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
require.Equal(t, rmd.data.Dir.EntryInfo, ei)
require.Equal(t, rmd.GetTlfHandle(), h)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPublic(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, true)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPrivate(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, false)
}
func TestKBFSOpsGetRootMDForHandleExisting(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
ID: fakeBlockID(1),
},
EncodedSize: 15,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 10,
Mtime: 1,
Ctime: 2,
},
}
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Unmerged).Return(
TlfID{}, ImmutableRootMetadata{}, nil)
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Merged).Return(
TlfID{}, makeImmutableRMDForTest(t, config, rmd, fakeMdID(1)), nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(2))
n, ei, err :=
config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
if p.Tlf != id {
t.Errorf("Got bad dir id back: %v", p.Tlf)
} else if len(p.path) != 1 {
t.Errorf("Got bad MD back: path size %d", len(p.path))
} else if p.path[0].ID != rmd.data.Dir.ID {
t.Errorf("Got bad MD back: root ID %v", p.path[0].ID)
} else if ei.Type != Dir {
t.Error("Got bad MD non-dir rootID back")
} else if ei.Size != 10 {
t.Errorf("Got bad MD Size back: %d", ei.Size)
} else if ei.Mtime != 1 {
t.Errorf("Got bad MD MTime back: %d", ei.Mtime)
} else if ei.Ctime != 2 {
t.Errorf("Got bad MD CTime back: %d", ei.Ctime)
}
}
// rmd should really be a ReadOnlyRootMetadata or *BareRootMetadata in
// the helper functions below, but all the callers would have to go
// md.ReadOnly(), which doesn't buy us much in tests.
func makeBP(id BlockID, kmd KeyMetadata, config Config,
u keybase1.UID) BlockPointer {
return BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: DefaultNewBlockDataVersion(config, false),
BlockContext: BlockContext{
Creator: u,
// Refnonces not needed; explicit refnonce
// testing happens elsewhere.
},
}
}
func makeBI(id BlockID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32) BlockInfo {
return BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
}
}
func makeIFP(id BlockID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32, off int64) IndirectFilePtr {
return IndirectFilePtr{
BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
},
off,
false,
codec.UnknownFieldSetHandler{},
}
}
func makeBIFromID(id BlockID, user keybase1.UID) BlockInfo {
return BlockInfo{
BlockPointer: BlockPointer{
ID: id, KeyGen: 1, DataVer: 1,
BlockContext: BlockContext{
Creator: user,
},
},
EncodedSize: 1,
}
}
func nodeFromPath(t *testing.T, ops *folderBranchOps, p path) Node {
var prevNode Node
// populate the node cache with all the nodes we'll need
for _, pathNode := range p.path {
n, err := ops.nodeCache.GetOrCreate(pathNode.BlockPointer,
pathNode.Name, prevNode)
if err != nil {
t.Fatal(err)
}
prevNode = n
}
return prevNode
}
func testPutBlockInCache(
t *testing.T, config *ConfigMock, ptr BlockPointer, id TlfID,
block Block) {
err := config.BlockCache().Put(ptr, id, block, TransientEntry)
require.NoError(t, err)
}
func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: File}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Dir}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock)
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, blockPtr, dirBlock, nil)
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err != nil {
t.Errorf("Got error on getdir: %v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailNonReader(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id := FakeTlfID(1, false)
h := parseTlfHandleOrBust(t, config, "bob#alice", false)
// Hack around access check in ParseTlfHandle.
h.resolvedReaders = nil
rmd := newRootMetadataOrBust(t, id, h)
_, uid, err := config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
t.Fatal(err)
}
rootID := fakeBlockID(42)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
// won't even try getting the block if the user isn't a reader
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
expectedErr := ReadAccessError{"alice", h.GetCanonicalName(), false}
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err == nil {
t.Errorf("Got no expected error on getdir")
} else if err != expectedErr {
t.Errorf("Got unexpected error on root MD: %v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key, then
// fail block fetch
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, blockPtr, dirBlock, err)
if _, err2 := config.KBFSOps().GetDirChildren(ctx, n); err2 == nil {
t.Errorf("Got no expected error on getdir")
} else if err2 != err {
t.Errorf("Got unexpected error on root MD: %v", err)
}
}
func TestKBFSOpsGetNestedDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
bID := fakeBlockID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: Exec}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Sym}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, bNode.BlockPointer, id, dirBlock)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsLookupSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
bID := fakeBlockID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %v", err)
}
bPath := ops.nodeCache.PathFromNode(bn)
expectedBNode := pathNode{makeBP(bID, rmd, config, u), "b"}
expectedBNode.KeyGen = 1
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bPath.path[2] != expectedBNode {
t.Errorf("Bad path node after lookup: %v vs %v",
bPath.path[2], expectedBNode)
}
}
func TestKBFSOpsLookupSymlinkSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
bID := fakeBlockID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Sym,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad directory entry: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bn != nil {
t.Errorf("Node for symlink is not nil: %v", bn)
}
}
func TestKBFSOpsLookupNoSuchNameFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
bID := fakeBlockID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := NoSuchNameError{"c"}
_, _, err := config.KBFSOps().Lookup(ctx, n, "c")
if err == nil {
t.Error("No error as expected on Lookup")
} else if err != expectedErr {
t.Errorf("Unexpected error after bad Lookup: %v", err)
}
}
func TestKBFSOpsLookupNewDataVersionFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
bID := fakeBlockID(44)
dirBlock := NewDirBlock().(*DirBlock)
bInfo := makeBIFromID(bID, u)
bInfo.DataVer = 10
dirBlock.Children["b"] = DirEntry{
BlockInfo: bInfo,
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := &NewDataVersionError{
path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}},
bInfo.DataVer,
}
_, _, err := config.KBFSOps().Lookup(ctx, n, "b")
if err == nil {
t.Error("No expected error found on lookup")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Unexpected error after bad lookup: %v", err)
}
}
func TestKBFSOpsStatSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
bID := fakeBlockID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
ei, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Stat returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
}
}
type shimMDOps struct {
isUnmerged bool
codec kbfscodec.Codec
crypto cryptoPure
kbpki KBPKI
MDOps
}
func (s shimMDOps) Put(ctx context.Context, rmd *RootMetadata) (MdID, error) {
if s.isUnmerged {
return MdID{}, MDServerErrorConflictRevision{}
}
rmd.SetSerializedPrivateMetadata([]byte{0x1})
username, _, err := s.kbpki.GetCurrentUserInfo(ctx)
if err != nil {
return MdID{}, err
}
signingKey := MakeLocalUserSigningKeyOrBust(username)
rmd.bareMd.SignWriterMetadataInternally(
ctx, s.codec, kbfscrypto.SigningKeySigner{Key: signingKey})
return s.crypto.MakeMdID(rmd.bareMd)
}
func (s shimMDOps) PutUnmerged(ctx context.Context, rmd *RootMetadata) (MdID, error) {
if !s.isUnmerged {
panic("Unexpected PutUnmerged call")
}
rmd.SetSerializedPrivateMetadata([]byte{0x2})
username, _, err := s.kbpki.GetCurrentUserInfo(ctx)
if err != nil {
return MdID{}, err
}
signingKey := MakeLocalUserSigningKeyOrBust(username)
rmd.bareMd.SignWriterMetadataInternally(
ctx, s.codec, kbfscrypto.SigningKeySigner{Key: signingKey})
return s.crypto.MakeMdID(rmd.bareMd)
}
func expectSyncBlockHelper(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id TlfID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []BlockID, isUnmerged bool) (
path, *gomock.Call) {
// construct new path
newPath := path{
FolderBranch{Tlf: id},
make([]pathNode, 0, len(p.path)+1),
}
for _, node := range p.path {
newPath.path = append(newPath.path, pathNode{Name: node.Name})
}
if newEntry {
// one for the new entry
newPath.path = append(newPath.path, pathNode{Name: name})
}
// all MD is embedded for now
config.mockBsplit.EXPECT().ShouldEmbedBlockChanges(gomock.Any()).
AnyTimes().Return(true)
// By convention for these tests, the old blocks along the path
// all have EncodedSize == 1.
unrefBytes += uint64(len(p.path) * 1)
lastID := p.tailPointer().ID
for i := len(newPath.path) - 1; i >= skipSync; i-- {
newID := fakeBlockIDMul(lastID, 2)
newBuf := []byte{byte(i)}
refBytes += uint64(len(newBuf))
lastID = newID
readyBlockData := ReadyBlockData{
buf: newBuf,
}
call := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd},
gomock.Any()).Return(newID, len(newBuf), readyBlockData, nil)
if lastCall != nil {
call = call.After(lastCall)
}
lastCall = call
newPath.path[i].ID = newID
newBlockIDs[i] = newID
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), readyBlockData.buf, readyBlockData.serverHalf).
Return(nil)
}
if skipSync == 0 {
// sign the MD and put it
oldMDOps := config.MDOps()
if oldShim, ok := oldMDOps.(shimMDOps); ok {
if oldShim.isUnmerged != isUnmerged {
t.Fatal("old shim with different isUnmerged")
}
} else {
mdOps := shimMDOps{
isUnmerged,
config.Codec(),
config.Crypto(),
config.KBPKI(),
oldMDOps,
}
config.SetMDOps(mdOps)
}
config.mockMdcache.EXPECT().Put(gomock.Any()).
Do(func(rmd ImmutableRootMetadata) {
*newRmd = rmd
// Check that the ref bytes are correct.
if rmd.RefBytes() != refBytes {
t.Errorf("Unexpected refbytes: %d vs %d",
rmd.RefBytes(), refBytes)
}
if rmd.UnrefBytes() != unrefBytes {
t.Errorf("Unexpected unrefbytes: %d vs %d",
rmd.UnrefBytes(), unrefBytes)
}
}).Return(nil)
}
return newPath, lastCall
}
func expectSyncBlock(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id TlfID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []BlockID) (path, *gomock.Call) {
return expectSyncBlockHelper(t, config, lastCall, uid, id, name, p, kmd,
newEntry, skipSync, refBytes, unrefBytes, newRmd, newBlockIDs, false)
}
func expectSyncBlockUnmerged(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id TlfID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []BlockID) (path, *gomock.Call) {
return expectSyncBlockHelper(t, config, lastCall, uid, id, name, p, kmd,
newEntry, skipSync, refBytes, unrefBytes, newRmd, newBlockIDs, true)
}
func getBlockFromCache(t *testing.T, config Config, id TlfID, ptr BlockPointer,
branch BranchName) Block {
if block, err := config.DirtyBlockCache().Get(id, ptr, branch); err == nil {
return block
}
block, err := config.BlockCache().Get(ptr)
if err != nil {
t.Errorf("Couldn't find block %v, branch %s in the cache after test: "+
"%v", ptr, branch, err)
return nil
}
return block
}
func getDirBlockFromCache(t *testing.T, config Config, id TlfID,
ptr BlockPointer, branch BranchName) *DirBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
dblock, ok := block.(*DirBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a DirBlock", ptr, branch)
}
return dblock
}
func getFileBlockFromCache(t *testing.T, config Config, id TlfID,
ptr BlockPointer, branch BranchName) *FileBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
fblock, ok := block.(*FileBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a FileBlock", ptr, branch)
}
return fblock
}
func checkNewPath(t *testing.T, ctx context.Context, config Config,
newPath path, expectedPath path, rmd ReadOnlyRootMetadata, blocks []BlockID,
entryType EntryType, newName string, rename bool) {
// TODO: check that the observer updates match the expectedPath as
// well (but need to handle the rename case where there can be
// multiple updates). For now, just check that there's at least
// one update.
if len(config.(*ConfigMock).observer.batchChanges) < 1 {
t.Errorf("No batch notifications sent, at least one expected")
}
if ctx.Value(tCtxID) != config.(*ConfigMock).observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in batch notify: %v",
config.(*ConfigMock).observer.ctx.Value(tCtxID))
}
if len(newPath.path) != len(expectedPath.path) {
t.Errorf("Unexpected new path length: %d", len(newPath.path))
return
}
if newPath.Tlf != expectedPath.Tlf {
t.Errorf("Unexpected topdir in new path: %s",
newPath.Tlf)
}
// check all names and IDs
for i, node := range newPath.path {
eNode := expectedPath.path[i]
if node.ID != eNode.ID {
t.Errorf("Wrong id on new path[%d]: %v vs. %v", i, node, eNode)
}
if node.Name != eNode.Name {
t.Errorf("Wrong name on new path[%d]: %v vs. %v", i, node, eNode)
}
}
// all the entries should point correctly and have the right times set
currDe := rmd.data.Dir
for i, id := range blocks {
var timeSet bool
if newName != "" {
// only the last 2 nodes should have their times changed
timeSet = i > len(blocks)-3
} else {
// only the last node should have its times changed
timeSet = i > len(blocks)-2
}
// for a rename, the last entry only changes ctime
if (!rename || i != len(blocks)-1) && (currDe.Mtime != 0) != timeSet {
t.Errorf("mtime was wrong (%d): %d", i, currDe.Mtime)
}
if (currDe.Ctime != 0) != timeSet {
t.Errorf("ctime was wrong (%d): %d", i, currDe.Ctime)
}
if i < len(expectedPath.path) {
eID := expectedPath.path[i].ID
if currDe.ID != eID {
t.Errorf("Entry does not point to %v, but to %v",
eID, currDe.ID)
}
}
if i < len(blocks)-1 {
var nextName string
if i+1 >= len(expectedPath.path) {
// new symlinks don't have an entry in the path
nextName = newName
} else {
nextName = expectedPath.path[i+1].Name
}
// TODO: update BlockPointer for refnonces when we start deduping
dblock := getDirBlockFromCache(t, config, newPath.Tlf,
makeBP(id, rmd.RootMetadata, config, rmd.data.Dir.Creator), newPath.Branch)
nextDe, ok := dblock.Children[nextName]
if !ok {
t.Errorf("No entry (%d) for %s", i, nextName)
}
currDe = nextDe
} else if newName != "" {
if currDe.Type != entryType {
t.Errorf("New entry has wrong type %s, expected %s",
currDe.Type, entryType)
}
}
if (currDe.Type != File && currDe.Type != Exec) && currDe.Size == 0 {
t.Errorf("Type %s unexpectedly has 0 size (%d)", currDe.Type, i)
}
}
}
func checkBPs(t *testing.T, bps []BlockPointer, expectedBPs []BlockPointer,
kind string) {
if len(expectedBPs) != len(bps) {
t.Errorf("Unexpected %s size: %d vs %d",
kind, len(bps), len(expectedBPs))
}
for _, ptr := range expectedBPs {
found := false
for _, ptr2 := range bps {
if ptr == ptr2 {
found = true
break
}
}
if !found {
t.Errorf("Missing expected %s block: %v", kind, ptr)
}
}
}
func checkOp(t *testing.T, op OpCommon, refs []BlockPointer,
unrefs []BlockPointer, updates []blockUpdate) {
checkBPs(t, op.RefBlocks, refs, "Refs")
checkBPs(t, op.UnrefBlocks, unrefs, "Unrefs")
if len(updates) != len(op.Updates) {
t.Errorf("Unexpected updates size: %d vs %d",
len(op.Updates), len(updates))
}
for _, up := range updates {
found := false
for _, up2 := range op.Updates {
if up == up2 {
found = true
break
}
}
if !found {
t.Errorf("Missing expected block update: %v", up)
}
}
}
func testCreateEntrySuccess(t *testing.T, entryType EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// creating "a/b"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 3)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "b", p, rmd,
entryType != Sym, 0, 0, 0, &newRmd, blocks)
var newN Node
var err error
switch entryType {
case File:
newN, _, err = config.KBFSOps().CreateFile(ctx, n, "b", false, NoExcl)
case Exec:
newN, _, err = config.KBFSOps().CreateFile(ctx, n, "b", true, NoExcl)
case Dir:
newN, _, err = config.KBFSOps().CreateDir(ctx, n, "b")
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, "b", "c")
newN = n
}
newP := ops.nodeCache.PathFromNode(newN)
if err != nil {
t.Errorf("Got error on create: %v", err)
}
require.NotNil(t, newRmd)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
entryType, "b", false)
b1 := getDirBlockFromCache(t, config, id, newP.path[1].BlockPointer,
newP.Branch)
if entryType == Sym {
de := b1.Children["b"]
if de.Type != Sym {
t.Error("Entry is not a symbolic link")
}
if de.SymPath != "c" {
t.Errorf("Symbolic path points to the wrong thing: %s", de.SymPath)
}
blocks = blocks[:len(blocks)-1] // discard fake block for symlink
} else if entryType != Dir {
de := b1.Children["b"]
if de.Size != 0 {
t.Errorf("New file has non-zero size: %d", de.Size)
}
}
checkBlockCache(t, config, id, append(blocks, rootID, aID), nil)
// make sure the createOp is correct
co, ok := newRmd.data.Changes.Ops[0].(*createOp)
if !ok {
t.Errorf("Couldn't find the createOp")
}
var refBlocks []BlockPointer
if entryType != Sym {
refBlocks = append(refBlocks, newP.path[2].BlockPointer)
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, co.OpCommon, refBlocks, nil, updates)
dirUpdate := blockUpdate{rootBlock.Children["a"].BlockPointer,
newP.path[1].BlockPointer}
if co.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", co.Dir, dirUpdate)
} else if co.NewName != "b" {
t.Errorf("Incorrect name in op: %v", co.NewName)
} else if co.Type != entryType {
t.Errorf("Incorrect entry type in op: %v", co.Type)
}
}
func TestKBFSOpsCreateDirSuccess(t *testing.T) {
testCreateEntrySuccess(t, Dir)
}
func TestKBFSOpsCreateFileSuccess(t *testing.T) {
testCreateEntrySuccess(t, File)
}
func TestKBFSOpsCreateExecFileSuccess(t *testing.T) {
testCreateEntrySuccess(t, Exec)
}
func TestKBFSOpsCreateLinkSuccess(t *testing.T) {
testCreateEntrySuccess(t, Sym)
}
func testCreateEntryFailDupName(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// creating "a", which already exists in the root block
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameExistsError{"a"}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, "a")
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, "a", "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %v", err)
}
}
func TestCreateDirFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, true)
}
func TestCreateLinkFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, false)
}
func testCreateEntryFailNameTooLong(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
config.maxNameBytes = 2
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameTooLongError{name, config.maxNameBytes}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %v", err)
}
}
func TestCreateDirFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, true)
}
func TestCreateLinkFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, false)
}
func testCreateEntryFailDirTooBig(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
rmd.data.Dir.Size = 10
config.maxDirBytes = 12
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if _, ok := err.(DirTooBigError); !ok {
t.Errorf("Got unexpected error on create: %v", err)
}
}
func TestCreateDirFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, true)
}
func TestCreateLinkFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, false)
}
func testCreateEntryFailKBFSPrefix(t *testing.T, et EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
name := ".kbfs_status"
expectedErr := DisallowedPrefixError{name, ".kbfs"}
var err error
// dir and link have different checks for dup name
switch et {
case Dir:
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, name, "a")
case Exec:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, true, NoExcl)
case File:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, false, NoExcl)
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %v", err)
}
}
func TestCreateDirFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Dir)
}
func TestCreateFileFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, File)
}
func TestCreateExecFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Exec)
}
func TestCreateLinkFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Sym)
}
// TODO: Currently only the remove tests use makeDirTree(),
// makeFile(), et al. Make the other tests use these functions, too.
// makeDirTree creates a block tree for the given path components and
// returns the DirEntry for the root block, a path, and the
// corresponding list of blocks. If n components are given, then the
// path will have n+1 nodes (one extra for the root node), and there
// will be n+1 corresponding blocks.
func makeDirTree(id TlfID, uid keybase1.UID, components ...string) (
DirEntry, path, []*DirBlock) {
var idCounter byte = 0x10
makeBlockID := func() BlockID {
id := fakeBlockID(idCounter)
idCounter++
return id
}
// Handle the first (root) block.
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
rootEntry := DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes := []pathNode{{bi.BlockPointer, "{root}"}}
rootBlock := NewDirBlock().(*DirBlock)
blocks := []*DirBlock{rootBlock}
// Handle the rest.
parentDirBlock := rootBlock
for _, component := range components {
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
parentDirBlock.Children[component] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes = append(nodes, pathNode{bi.BlockPointer, component})
dirBlock := NewDirBlock().(*DirBlock)
blocks = append(blocks, dirBlock)
parentDirBlock = dirBlock
}
return rootEntry, path{FolderBranch{Tlf: id}, nodes}, blocks
}
func makeFile(dir path, parentDirBlock *DirBlock, name string, et EntryType) (
path, *FileBlock) {
if et != File && et != Exec {
panic(fmt.Sprintf("Unexpected type %s", et))
}
bid := fakeBlockIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: et,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewFileBlock().(*FileBlock)
}
func makeDir(dir path, parentDirBlock *DirBlock, name string) (
path, *DirBlock) {
bid := fakeBlockIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewDirBlock().(*DirBlock)
}
func makeSym(dir path, parentDirBlock *DirBlock, name string) {
parentDirBlock.Children[name] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
},
}
}
func checkRmOp(t *testing.T, entryName string, newRmd ReadOnlyRootMetadata,
dirPath, newDirPath path, unrefBlocks []BlockPointer) {
// make sure the rmOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*rmOp)
require.True(t, ok)
var updates []blockUpdate
for i := 0; i < len(dirPath.path)-1; i++ {
updates = append(updates, blockUpdate{
dirPath.path[i].BlockPointer,
newDirPath.path[i].BlockPointer,
})
}
checkOp(t, ro.OpCommon, nil, unrefBlocks, updates)
dirUpdate := blockUpdate{
dirPath.tailPointer(), newDirPath.tailPointer(),
}
require.Equal(t, dirUpdate, ro.Dir)
require.Equal(t, entryName, ro.OldName)
}
func testKBFSOpsRemoveFileSuccess(t *testing.T, et EntryType) {
if et != File && et != Exec {
t.Fatalf("Unexpected type %s", et)
}
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "file"
if et == Exec {
entryName += ".exe"
}
p, block := makeFile(dirPath, parentDirBlock, entryName, et)
testPutBlockInCache(t, config, p.tailPointer(), id, block)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]BlockID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, et, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveFileSuccess(t *testing.T) {
testKBFSOpsRemoveFileSuccess(t, File)
}
func TestKBFSOpsRemoveExecSuccess(t *testing.T) {
testKBFSOpsRemoveFileSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
entryName := "dir"
rootEntry, p, blocks := makeDirTree(
id, uid, "a", "b", "c", "d", entryName)
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
dirPath := *p.parentPath()
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]BlockID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveDir(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Dir, "", false)
newParentBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveSymSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "sym"
makeSym(dirPath, parentDirBlock, entryName)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]BlockID, len(dirPath.path))
// No block is being referenced.
var unrefBytes uint64
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Sym, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, nil)
}
func TestKBFSOpRemoveMultiBlockFileSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
entryName := "multiBlockFile"
lastBID := dirPath.tailPointer().ID
fileBID := fakeBlockIDAdd(lastBID, 1)
fileBI := makeBIFromID(fileBID, dirPath.tailPointer().Creator)
parentDirBlock.Children[entryName] = DirEntry{
BlockInfo: fileBI,
EntryInfo: EntryInfo{
Type: File,
},
}
// TODO: Write a helper function for making a file with
// indirect blocks and use it in other tests.
bid1 := fakeBlockIDAdd(lastBID, 2)
bid2 := fakeBlockIDAdd(lastBID, 3)
bid3 := fakeBlockIDAdd(lastBID, 4)
bid4 := fakeBlockIDAdd(lastBID, 5)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(bid1, rmd, config, uid, 5, 0),
makeIFP(bid2, rmd, config, uid, 5, 5),
makeIFP(bid3, rmd, config, uid, 5, 10),
makeIFP(bid4, rmd, config, uid, 5, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
fileBP := makeBP(fileBID, rmd, config, uid)
p := dirPath.ChildPath(entryName, fileBP)
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// let the top block be uncached, so we have to fetch it from BlockOps.
expectBlock(config, rmd, fileBP, fileBlock, nil)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
// sync block
blockIDs := make([]BlockID, len(dirPath.path))
unrefBytes := uint64(1 + 4*5) // fileBlock + 4 indirect blocks
var newRmd ImmutableRootMetadata
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(), blockIDs,
File, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
blockIDs = append(blockIDs, bid1, bid2, bid3, bid4)
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{
fileBP,
fileBlock.IPtrs[0].BlockPointer,
fileBlock.IPtrs[1].BlockPointer,
fileBlock.IPtrs[2].BlockPointer,
fileBlock.IPtrs[3].BlockPointer,
}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestRemoveDirFailNonEmpty(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, *p.parentPath().parentPath())
expectedErr := DirNotEmptyError{p.parentPath().tailName()}
err := config.KBFSOps().RemoveDir(ctx, n, "d")
require.Equal(t, expectedErr, err)
}
func testKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T, et EntryType) {
if et != File && et != Exec {
t.Fatalf("Unexpected type %s", et)
}
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "file"
if et == Exec {
entryName += ".exe"
}
p, _ := makeFile(dirPath, parentDirBlock, entryName, et)
// The operation might be retried several times.
config.mockBops.EXPECT().Get(
gomock.Any(), gomock.Any(), p.tailPointer(),
gomock.Any()).Return(BServerErrorBlockNonExistent{}).MinTimes(1)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]BlockID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, File, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, File)
}
func TestKBFSOpsRemoveExecMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
entryName := "dir"
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", entryName)
rmd.data.Dir = rootEntry
// Prime cache with all directory blocks.
for i := 0; i < len(blocks)-1; i++ {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, blocks[i])
}
dirPath := *p.parentPath()
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// The operation might be retried several times.
config.mockBops.EXPECT().Get(
gomock.Any(), gomock.Any(), p.tailPointer(),
gomock.Any()).Return(BServerErrorBlockNonExistent{}).MinTimes(1)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]BlockID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveDir(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Dir, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestRemoveDirFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
expectedErr := NoSuchNameError{"nonexistent"}
err := config.KBFSOps().RemoveDir(ctx, n, "nonexistent")
require.Equal(t, expectedErr, err)
}
func TestRenameInDirSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(41)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(42)
bID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 3)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "b", n, "c")
if err != nil {
t.Errorf("Got error on rename: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP.path[1].BlockPointer, newP.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID, aID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, nil, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameInDirOverEntrySuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(41)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(42)
bID := fakeBlockID(43)
cID := fakeBlockID(44)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock.Children["c"] = DirEntry{
BlockInfo: makeBIFromID(cID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
cBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
cNode := pathNode{makeBP(cID, rmd, config, uid), "c"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, cNode.BlockPointer, id, cBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 3)
unrefBytes := uint64(1)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, unrefBytes, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "b", n, "c")
if err != nil {
t.Errorf("Got error on rename: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP.path[1].BlockPointer, newP.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID, aID, cID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, []BlockPointer{cNode.BlockPointer}, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameInRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(41)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(42)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a" to "b"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "a", n, "b")
if err != nil {
t.Errorf("Got error on rename: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "b", true)
b0 := getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
if _, ok := b0.Children["a"]; ok {
t.Errorf("entry for a is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
checkOp(t, ro.OpCommon, nil, nil, nil)
oldDirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "a" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "b" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameAcrossDirsSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(41)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(42)
bID := fakeBlockID(43)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
dID := fakeBlockID(40)
rootBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
dBlock := NewDirBlock().(*DirBlock)
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, dNode}}
n2 := nodeFromPath(t, ops, p2)
// renaming "a/b" to "d/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks1 := make([]BlockID, 2)
expectedPath1, lastCall :=
expectSyncBlock(t, config, nil, uid, id, "", p1, rmd, false,
1, 0, 0, nil, blocks1)
blocks2 := make([]BlockID, 3)
refBytes := uint64(1) // need to include directory "a"
unrefBytes := uint64(1) // need to include directory "a"
expectedPath2, _ :=
expectSyncBlock(t, config, lastCall, uid, id, "", p2, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks2)
// fix up old expected path's common ancestor
expectedPath1.path[0].ID = expectedPath2.path[0].ID
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on rename: %v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
// fix up blocks1 -- the first partial sync stops at aBlock, and
// checkNewPath expects {rootBlock, aBlock}
blocks1 = []BlockID{blocks2[0], blocks1[0]}
checkNewPath(t, ctx, config, newP1, expectedPath1, newRmd.ReadOnly(), blocks1,
File, "", true)
checkNewPath(t, ctx, config, newP2, expectedPath2, newRmd.ReadOnly(), blocks2,
File, "c", true)
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if _, ok := b0.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks2 = blocks2[:len(blocks2)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks2, rootID, aID, dID, blocks1[0]), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP1.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, nil, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP1.path[1].BlockPointer}
newDirUpdate := blockUpdate{dNode.BlockPointer, newP2.path[1].BlockPointer}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v vs. %v",
ro.NewDir, newDirUpdate)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameAcrossPrefixSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(41)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(42)
bID := fakeBlockID(43)
dID := fakeBlockID(40)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
dBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, dNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
n2 := nodeFromPath(t, ops, p2)
// renaming "a/b" to "a/d/c"
// the common ancestor and its parent will be changed once and then re-read
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 4)
expectedPath2, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p2, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on rename: %v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
if newP1.path[0].ID != newP2.path[0].ID {
t.Errorf("New old path not a prefix of new new path")
}
if newP1.path[1].ID != newP2.path[1].ID {
t.Errorf("New old path not a prefix of new new path")
}
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if b0.Children["a"].Mtime == 0 {
t.Errorf("a's mtime didn't change")
}
if b0.Children["a"].Ctime == 0 {
t.Errorf("a's ctime didn't change")
}
// now change the times back so checkNewPath below works without hacking
aDe := b0.Children["a"]
aDe.Mtime = 0
aDe.Ctime = 0
b0.Children["a"] = aDe
checkNewPath(t, ctx, config, newP2, expectedPath2, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP1.path[1].BlockPointer, newP1.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks, rootID, aID, dID), nil)
}
func TestRenameAcrossOtherPrefixSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(41)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(42)
bID := fakeBlockID(43)
dID := fakeBlockID(40)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
dBlock := NewDirBlock().(*DirBlock)
dBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, dNode}}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
n2 := nodeFromPath(t, ops, p2)
// renaming "a/d/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks1 := make([]BlockID, 3)
expectedPath1, lastCall :=
expectSyncBlock(t, config, nil, uid, id, "", p1, rmd, false,
2, 0, 0, &newRmd, blocks1)
blocks2 := make([]BlockID, 3)
refBytes := uint64(1) // need to include directory "d"
unrefBytes := uint64(1) // need to include directory "d"
expectedPath2, _ :=
expectSyncBlock(t, config, lastCall, uid, id, "", p2, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks2)
// the new path is a prefix of the old path
expectedPath1.path[0].ID = expectedPath2.path[0].ID
expectedPath1.path[1].ID = expectedPath2.path[1].ID
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on removal: %v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
if newP2.path[0].ID != newP1.path[0].ID {
t.Errorf("New old path not a prefix of new new path")
}
if newP2.path[1].ID != newP1.path[1].ID {
t.Errorf("New old path not a prefix of new new path")
}
b1 := getDirBlockFromCache(
t, config, id, newP1.path[1].BlockPointer, newP1.Branch)
if b1.Children["d"].Mtime == 0 {
t.Errorf("d's mtime didn't change")
}
if b1.Children["d"].Ctime == 0 {
t.Errorf("d's ctime didn't change")
}
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if b0.Children["a"].Mtime == 0 {
t.Errorf("d's mtime didn't change")
}
if b0.Children["a"].Ctime == 0 {
t.Errorf("d's ctime didn't change")
}
checkNewPath(t, ctx, config, newP1, expectedPath1, newRmd.ReadOnly(), blocks2,
File, "c", true)
b2 := getDirBlockFromCache(
t, config, id, newP1.path[2].BlockPointer, newP1.Branch)
if _, ok := b2.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks2 = blocks2[:len(blocks2)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks2, rootID, aID, dID, blocks1[2]), nil)
}
func TestRenameFailAcrossTopLevelFolders(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := FakeTlfID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1 := newRootMetadataOrBust(t, id1, h1)
id2 := FakeTlfID(2, false)
h2 := parseTlfHandleOrBust(t, config, "alice,bob,charlie", false)
rmd2 := newRootMetadataOrBust(t, id1, h2)
uid1 := h2.ResolvedWriters()[0]
uid2 := h2.ResolvedWriters()[2]
rootID1 := fakeBlockID(41)
aID1 := fakeBlockID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
rootID2 := fakeBlockID(38)
aID2 := fakeBlockID(39)
node2 := pathNode{makeBP(rootID2, rmd2, config, uid2), "p"}
aNode2 := pathNode{makeBP(aID2, rmd2, config, uid2), "a"}
p2 := path{FolderBranch{Tlf: id2}, []pathNode{node2, aNode2}}
ops2 := getOps(config, id2)
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %v", err)
}
}
func TestRenameFailAcrossBranches(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := FakeTlfID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1 := newRootMetadataOrBust(t, id1, h1)
uid1 := h1.FirstResolvedWriter()
rootID1 := fakeBlockID(41)
aID1 := fakeBlockID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
p2 := path{FolderBranch{id1, "test"}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
ops2 := config.KBFSOps().(*KBFSOpsStandard).getOpsNoAdd(
FolderBranch{id1, "test"})
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %v", err)
}
}
func TestKBFSOpsCacheReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 2); err != nil {
t.Errorf("Got error on read: %v", err)
} else if n != 4 {
t.Errorf("Read the wrong number of bytes: %d", n)
} else if !bytes.Equal(dest, fileBlock.Contents[2:6]) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFullMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
id3 := fakeBlockID(46)
id4 := fakeBlockID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
n := 20
dest := make([]byte, n, n)
fullContents := append(block1.Contents, block2.Contents...)
fullContents = append(fullContents, block3.Contents...)
fullContents = append(fullContents, block4.Contents...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fullContents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
id3 := fakeBlockID(46)
id4 := fakeBlockID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
n := 10
dest := make([]byte, n, n)
contents := append(block1.Contents[3:], block2.Contents...)
contents = append(contents, block3.Contents[:3]...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 3); err != nil {
t.Errorf("Got error on read: %v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFailPastEnd(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 10); err != nil {
t.Errorf("Got error on read: %v", err)
} else if n != 0 {
t.Errorf("Read the wrong number of bytes: %d", n)
}
}
func TestKBFSOpsServerReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, fileBlockPtr, fileBlock, nil)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsServerReadFailNoSuchBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, fileBlockPtr, fileBlock, err)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if _, err2 := config.KBFSOps().Read(ctx, pNode, dest, 0); err2 == nil {
t.Errorf("Got no expected error")
} else if err2 != err {
t.Errorf("Got unexpected error: %v", err2)
}
}
func checkSyncOp(t *testing.T, codec kbfscodec.Codec,
so *syncOp, filePtr BlockPointer, writes []WriteRange) {
if so == nil {
t.Error("No sync info for written file!")
}
if so.File.Unref != filePtr {
t.Errorf("Unexpected unref file in sync op: %v vs %v",
so.File.Unref, filePtr)
}
if len(so.Writes) != len(writes) {
t.Errorf("Unexpected number of writes: %v (expected %v)",
len(so.Writes), len(writes))
}
for i, w := range writes {
writeEqual, err := kbfscodec.Equal(codec, so.Writes[i], w)
if err != nil {
t.Fatal(err)
}
if !writeEqual {
t.Errorf("Unexpected write: %v vs %v", so.Writes[i], w)
}
}
}
func checkSyncOpInCache(t *testing.T, codec kbfscodec.Codec,
ops *folderBranchOps, filePtr BlockPointer, writes []WriteRange) {
// check the in-progress syncOp
si, ok := ops.blocks.unrefCache[filePtr.Ref()]
if !ok {
t.Error("No sync info for written file!")
}
checkSyncOp(t, codec, si.op, filePtr, writes)
}
func updateWithDirtyEntries(ctx context.Context, ops *folderBranchOps,
lState *lockState, block *DirBlock) (*DirBlock, error) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
return ops.blocks.updateWithDirtyEntriesLocked(ctx, lState, block)
}
func TestKBFSOpsWriteNewBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(ctx, ops, lState, newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != uint64(len(data)) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteExtendSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 5); err != nil {
t.Errorf("Got error on write: %v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: uint64(len(data))}})
}
func TestKBFSOpsWritePastEndSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 0, 0, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(7)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 7); err != nil {
t.Errorf("Got error on write: %v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteCauseSplit(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
newData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
expectedFullData := append([]byte{0}, newData...)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData, int64(1)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append([]byte{0}, data[0:5]...)
}).Return(int64(5))
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
// new left block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id1, nil)
// new right block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id2, nil)
// next we'll get the right block again
// then the second half
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData[5:10], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(5))
if err := config.KBFSOps().Write(ctx, n, newData, 1); err != nil {
t.Errorf("Got error on write: %v", err)
}
b, _ := config.BlockCache().Get(node.BlockPointer)
newRootBlock := b.(*DirBlock)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(ctx, ops, lState, newRootBlock)
require.NoError(t, err)
b, _ = config.DirtyBlockCache().Get(id, fileNode.BlockPointer, p.Branch)
pblock := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id1, rmd, config, uid),
p.Branch)
block1 := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id2, rmd, config, uid),
p.Branch)
block2 := b.(*FileBlock)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:6], block1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[6:11], block2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
} else if !pblock.IsInd {
t.Errorf("Parent block is not indirect!")
} else if len(pblock.IPtrs) != 2 {
t.Errorf("Wrong number of pointers in pblock: %v", pblock.IPtrs)
} else if pblock.IPtrs[0].ID != id1 {
t.Errorf("Parent block has wrong id for block 1: %v (vs. %v)",
pblock.IPtrs[0].ID, id1)
} else if pblock.IPtrs[1].ID != id2 {
t.Errorf("Parent block has wrong id for block 2: %v",
pblock.IPtrs[1].ID)
} else if pblock.IPtrs[0].Off != 0 {
t.Errorf("Parent block has wrong offset for block 1: %d",
pblock.IPtrs[0].Off)
} else if pblock.IPtrs[1].Off != 6 {
t.Errorf("Parent block has wrong offset for block 5: %d",
pblock.IPtrs[1].Off)
} else if newRootBlock.Children["f"].Size != uint64(11) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
pblock.IPtrs[0].BlockPointer: p.Branch,
pblock.IPtrs[1].BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 1, Len: uint64(len(newData))}})
}
func mergeUnrefCache(
ops *folderBranchOps, lState *lockState, file path, md *RootMetadata) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
ops.blocks.unrefCache[file.tailPointer().Ref()].mergeUnrefCache(md)
}
func TestKBFSOpsWriteOverMultipleBlocks(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
rootBlock := NewDirBlock().(*DirBlock)
filePtr := BlockPointer{
ID: fileID, KeyGen: 1, DataVer: 1,
BlockContext: BlockContext{
Creator: uid,
},
}
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: filePtr,
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5}
expectedFullData := []byte{5, 4, 1, 2, 3, 4, 5, 8, 7, 6}
so, err := newSyncOp(filePtr)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
// gomock.Any(), gomock.Any(), data, int64(2)).
gomock.Any(), gomock.Any(), []byte{1, 2, 3}, int64(2)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block1.Contents[0:2], data[0:3]...)
}).Return(int64(3))
// update block 2
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data[3:], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(data, block2.Contents[2:]...)
}).Return(int64(2))
if err := config.KBFSOps().Write(ctx, n, data, 2); err != nil {
t.Errorf("Got error on write: %v", err)
}
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:5], newBlock1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[5:10], newBlock2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
}
lState := makeFBOLockState()
// merge the unref cache to make it easy to check for changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 2, Len: uint64(len(data))}})
mergeUnrefCache(ops, lState, p, rmd)
checkBlockCache(t, config, id, []BlockID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
func TestKBFSOpsWriteFailTooBig(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8}
config.maxFileBytes = 12
err := config.KBFSOps().Write(ctx, n, data, 10)
if err == nil {
t.Errorf("Got no expected error on Write")
} else if _, ok := err.(FileTooBigError); !ok {
t.Errorf("Got unexpected error on Write: %v", err)
}
}
// Read tests check the same error cases, so no need for similar write
// error tests
func TestKBFSOpsTruncateToZeroSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{}
if err := config.KBFSOps().Truncate(ctx, n, 0); err != nil {
t.Errorf("Got error on truncate: %v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(ctx, ops, lState, newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", newFileBlock.Contents)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != 0 {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: 0}})
}
func TestKBFSOpsTruncateSameSize(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: makeBIFromID(fileID, u),
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := fileBlock.Contents
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %v", err)
} else if config.observer.localChange != nil {
t.Errorf("Unexpected local update during truncate: %v",
config.observer.localChange)
} else if !bytes.Equal(data, fileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID}, nil)
}
func TestKBFSOpsTruncateSmallerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{1, 2, 3, 4, 5}
if err := config.KBFSOps().Truncate(ctx, n, 5); err != nil {
t.Errorf("Got error on truncate: %v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 0}})
}
func TestKBFSOpsTruncateShortensLastBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data2 := []byte{10, 9}
if err := config.KBFSOps().Truncate(ctx, n, 7); err != nil {
t.Errorf("Got error on truncate: %v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(block1.Contents, newBlock1.Contents) {
t.Errorf("Wrote bad contents for block 1: %v", newBlock1.Contents)
} else if !bytes.Equal(data2, newBlock2.Contents) {
t.Errorf("Wrote bad contents for block 2: %v", newBlock2.Contents)
} else if len(newPBlock.IPtrs) != 2 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+6 {
// The fileid and the last block was all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateRemovesABlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
data := []byte{5, 4, 3, 2}
if err := config.KBFSOps().Truncate(ctx, n, 4); err != nil {
t.Errorf("Got error on truncate: %v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 4, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newBlock1.Contents) {
t.Errorf("Wrote bad contents: %v", newBlock1.Contents)
} else if len(newPBlock.IPtrs) != 1 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+5+6 {
// The fileid and both blocks were all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID, id1},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateBiggerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), []byte{0, 0, 0, 0, 0}, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data...)
}).Return(int64(5))
data := []byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []BlockID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
// A truncate past the end of the file actually translates into a
// write for the difference
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 5}})
}
func testSetExSuccess(t *testing.T, entryType EntryType, ex bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, entryType != Sym)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Size: 1,
Type: entryType,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedChanges := 1
// SetEx() should do nothing when the exec status doesn't change.
if entryType == Sym || entryType == Dir || (entryType == File && !ex) ||
(entryType == Exec && ex) {
expectedChanges = 0
}
var expectedPath path
var newRmd ImmutableRootMetadata
var blocks []BlockID
if expectedChanges > 0 {
// sync block
blocks = make([]BlockID, 2)
expectedPath, _ = expectSyncBlock(t, config, nil, uid, id, "",
*p.parentPath(), rmd, false, 0, 0, 0, &newRmd, blocks)
expectedPath.path = append(expectedPath.path, aNode)
}
// SetEx() should only change the type of File and Exec.
var expectedType EntryType
if entryType == File && ex {
expectedType = Exec
} else if entryType == Exec && !ex {
expectedType = File
} else {
expectedType = entryType
}
// chmod a+x a
err := config.KBFSOps().SetEx(ctx, n, ex)
if err != nil {
t.Errorf("Got unexpected error on setex: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if expectedChanges != len(config.observer.batchChanges) {
t.Errorf("got changed=%d, expected %d",
len(config.observer.batchChanges), expectedChanges)
} else {
if blocks != nil {
rootBlock = getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
}
if rootBlock.Children["a"].Type != expectedType {
t.Errorf("a has type %s, expected %s",
rootBlock.Children["a"].Type, expectedType)
} else if expectedChanges > 0 {
// SetEx() should always change the ctime of
// non-symlinks.
// pretend it's a rename so only ctime gets checked
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
expectedType, "", true)
}
}
if expectedChanges > 0 {
blocks = blocks[:len(blocks)-1] // last block is never in the cache
}
checkBlockCache(t, config, id, append(blocks, rootID), nil)
if expectedChanges > 0 {
// make sure the setAttrOp is correct
sao, ok := newRmd.data.Changes.Ops[0].(*setAttrOp)
if !ok {
t.Errorf("Couldn't find the setAttrOp")
}
checkOp(t, sao.OpCommon, nil, nil, nil)
dirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
if sao.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", sao.Dir,
dirUpdate)
} else if sao.Name != "a" {
t.Errorf("Incorrect name in op: %v", sao.Name)
} else if sao.Attr != exAttr {
t.Errorf("Incorrect attr in op: %v", sao.Attr)
}
}
}
func TestSetExFileSuccess(t *testing.T) {
testSetExSuccess(t, File, true)
}
func TestSetNoExFileSuccess(t *testing.T) {
testSetExSuccess(t, File, false)
}
func TestSetExExecSuccess(t *testing.T) {
testSetExSuccess(t, Exec, true)
}
func TestSetNoExExecSuccess(t *testing.T) {
testSetExSuccess(t, Exec, false)
}
func TestSetExDirSuccess(t *testing.T) {
testSetExSuccess(t, Dir, true)
}
func TestSetNoExDirSuccess(t *testing.T) {
testSetExSuccess(t, Dir, false)
}
func TestSetExSymSuccess(t *testing.T) {
testSetExSuccess(t, Sym, true)
}
func TestSetNoExSymSuccess(t *testing.T) {
testSetExSuccess(t, Sym, false)
}
func TestSetExFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
// chmod a+x a
if err := config.KBFSOps().SetEx(ctx, n, true); err == nil {
t.Errorf("Got no expected error on setex")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setex: %v", err)
}
}
// Other SetEx failure cases are all the same as any other block sync
func TestSetMtimeSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
*p.parentPath(), rmd, false, 0, 0, 0, &newRmd, blocks)
expectedPath.path = append(expectedPath.path, aNode)
newMtime := time.Now()
err := config.KBFSOps().SetMtime(ctx, n, &newMtime)
if err != nil {
t.Errorf("Got unexpected error on setmtime: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
b0 := getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
if b0.Children["a"].Mtime != newMtime.UnixNano() {
t.Errorf("a has wrong mtime: %v", b0.Children["a"].Mtime)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
blocks = blocks[:len(blocks)-1] // last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// make sure the setAttrOp is correct
sao, ok := newRmd.data.Changes.Ops[0].(*setAttrOp)
if !ok {
t.Errorf("Couldn't find the setAttrOp")
}
checkOp(t, sao.OpCommon, nil, nil, nil)
dirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
if sao.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", sao.Dir,
dirUpdate)
} else if sao.Name != "a" {
t.Errorf("Incorrect name in op: %v", sao.Name)
} else if sao.Attr != mtimeAttr {
t.Errorf("Incorrect attr in op: %v", sao.Attr)
}
}
func TestSetMtimeNull(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
oldMtime := time.Now().UnixNano()
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: File,
Mtime: oldMtime,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
if err := config.KBFSOps().SetMtime(ctx, n, nil); err != nil {
t.Errorf("Got unexpected error on null setmtime: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if rootBlock.Children["a"].Mtime != oldMtime {
t.Errorf("a has wrong mtime: %v", rootBlock.Children["a"].Mtime)
} else if newP.path[0].ID != p.path[0].ID {
t.Errorf("Got back a changed path for null setmtime test: %v", newP)
}
checkBlockCache(t, config, id, nil, nil)
}
func TestMtimeFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
newMtime := time.Now()
if err := config.KBFSOps().SetMtime(ctx, n, &newMtime); err == nil {
t.Errorf("Got no expected error on setmtime")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setmtime: %v", err)
}
}
func getOrCreateSyncInfo(
ops *folderBranchOps, lState *lockState, de DirEntry) (*syncInfo, error) {
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
return ops.blocks.getOrCreateSyncInfoLocked(lState, de)
}
func makeBlockStateDirty(config Config, kmd KeyMetadata, p path,
ptr BlockPointer) {
ops := getOps(config, kmd.TlfID())
lState := makeFBOLockState()
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
df := ops.blocks.getOrCreateDirtyFileLocked(lState, p)
df.setBlockDirty(ptr)
}
// SetMtime failure cases are all the same as any other block sync
func testSyncDirtySuccess(t *testing.T, isUnmerged bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock := NewFileBlock().(*FileBlock)
aBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
si, err := getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
require.NoError(t, err)
si.op.addWrite(0, 10)
// fsync a
config.DirtyBlockCache().Put(id, aNode.BlockPointer, p.Branch, aBlock)
makeBlockStateDirty(config, rmd, p, aNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// TODO: put a dirty DE entry in the cache, to test that the new
// root block has the correct file size.
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
var expectedPath path
if isUnmerged {
// Turn off the conflict resolver to avoid unexpected mock
// calls. Recreate the input channel to make sure the later
// Shutdown() call works.
ops.cr.Pause()
expectedPath, _ = expectSyncBlockUnmerged(t, config, nil, uid, id,
"", p, rmd, false, 0, 0, 0, &newRmd, blocks)
} else {
expectedPath, _ = expectSyncBlock(t, config, nil, uid, id, "", p,
rmd, false, 0, 0, 0, &newRmd, blocks)
}
err = config.KBFSOps().Sync(ctx, n)
if err != nil {
t.Errorf("Got unexpected error on sync: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// check the sync op
so, ok := newRmd.data.Changes.Ops[0].(*syncOp)
if !ok {
t.Errorf("Couldn't find the syncOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, so.OpCommon, nil, nil, updates)
fileUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
if so.File != fileUpdate {
t.Errorf("Incorrect file update in op: %v vs. %v", so.File,
fileUpdate)
}
// make sure the write is propagated
checkSyncOp(t, config.Codec(), so,
aNode.BlockPointer, []WriteRange{{Off: 0, Len: 10}})
}
func TestSyncDirtySuccess(t *testing.T) {
testSyncDirtySuccess(t, false)
}
func TestSyncDirtyUnmergedSuccess(t *testing.T) {
testSyncDirtySuccess(t, true)
}
func TestSyncCleanSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// fsync a
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(newP.path) != len(p.path) {
// should be the exact same path back
t.Errorf("Got a different length path back: %v", newP)
} else {
for i, n := range newP.path {
if n != p.path[i] {
t.Errorf("Node %d differed: %v", i, n)
}
}
}
checkBlockCache(t, config, id, nil, nil)
}
func expectSyncDirtyBlock(config *ConfigMock, kmd KeyMetadata,
p path, ptr BlockPointer, block *FileBlock, splitAt int64,
padSize int, opsLockHeld bool) *gomock.Call {
branch := MasterBranch
if config.mockDirtyBcache != nil {
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(true)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(block, nil)
} else {
config.DirtyBlockCache().Put(p.Tlf, ptr, branch, block)
}
if !opsLockHeld {
makeBlockStateDirty(config, kmd, p, ptr)
}
c1 := config.mockBsplit.EXPECT().CheckSplit(block).Return(splitAt)
newID := fakeBlockIDAdd(ptr.ID, 100)
// Ideally, we'd use the size of block.Contents at the time
// that Ready() is called, but GoMock isn't expressive enough
// for that.
newEncBuf := make([]byte, len(block.Contents)+padSize)
readyBlockData := ReadyBlockData{
buf: newEncBuf,
}
c2 := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd}, block).
After(c1).Return(newID, len(block.Contents), readyBlockData, nil)
newPtr := BlockPointer{ID: newID}
if config.mockBcache != nil {
config.mockBcache.EXPECT().Put(ptrMatcher{newPtr}, kmd.TlfID(), block, PermanentEntry).Return(nil)
config.mockBcache.EXPECT().DeletePermanent(newID).Return(nil)
} else {
// Nothing to do, since the cache entry is added and
// removed.
}
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
return c2
}
func TestSyncDirtyMultiBlocksSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
id3 := fakeBlockID(46)
id4 := fakeBlockID(47)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(fileID, uid),
EntryInfo: EntryInfo{
Size: 20,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config,
keybase1.MakeTestUID(0), 0, 5),
makeIFP(id3, rmd, config, uid, 7, 10),
makeIFP(id4, rmd, config, uid, 0, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
si, err := getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
require.NoError(t, err)
// add the dirty blocks to the unref list
si.op.addWrite(5, 5)
si.op.addWrite(15, 5)
si.unrefs = append(si.unrefs,
makeBI(id2, rmd, config, keybase1.MakeTestUID(0), 5),
makeBI(id4, rmd, config, keybase1.MakeTestUID(0), 5))
// fsync a, only block 2 is dirty
config.DirtyBlockCache().Put(id, fileNode.BlockPointer, p.Branch, fileBlock)
makeBlockStateDirty(config, rmd, p, fileNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// the split is good
pad2 := 5
pad4 := 8
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[1].BlockPointer,
block2, int64(0), pad2, false)
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[3].BlockPointer,
block4, int64(0), pad4, false)
// sync 2 blocks, plus their pad sizes
refBytes := uint64((len(block2.Contents) + pad2) +
(len(block4.Contents) + pad4))
unrefBytes := uint64(5 + 5) // blocks 1 and 3
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks)
err = config.KBFSOps().Sync(ctx, n)
if err != nil {
t.Errorf("Got unexpected error on sync: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if fileBlock.IPtrs[0].EncodedSize != 5 {
t.Errorf("Indirect pointer encoded size1 wrong: %d", fileBlock.IPtrs[0].EncodedSize)
} else if fileBlock.IPtrs[1].GetWriter() != uid {
t.Errorf("Got unexpected writer: %s", fileBlock.IPtrs[1].GetWriter())
} else if fileBlock.IPtrs[1].EncodedSize != 10 {
t.Errorf("Indirect pointer encoded size2 wrong: %d", fileBlock.IPtrs[1].EncodedSize)
} else if fileBlock.IPtrs[2].EncodedSize != 7 {
t.Errorf("Indirect pointer encoded size3 wrong: %d", fileBlock.IPtrs[2].EncodedSize)
} else if fileBlock.IPtrs[3].EncodedSize != 13 {
t.Errorf("Indirect pointer encoded size4 wrong: %d", fileBlock.IPtrs[3].EncodedSize)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
checkBlockCache(t, config, id,
append(blocks, rootID, fileBlock.IPtrs[1].ID, fileBlock.IPtrs[3].ID),
nil)
// check the sync op
so, ok := newRmd.data.Changes.Ops[0].(*syncOp)
if !ok {
t.Errorf("Couldn't find the syncOp")
}
refBlocks := []BlockPointer{fileBlock.IPtrs[1].BlockPointer,
fileBlock.IPtrs[3].BlockPointer}
unrefBlocks := []BlockPointer{
makeBP(id2, rmd, config, keybase1.MakeTestUID(0)),
makeBP(id4, rmd, config, keybase1.MakeTestUID(0)),
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, so.OpCommon, refBlocks, unrefBlocks, updates)
fileUpdate := blockUpdate{fileNode.BlockPointer, newP.path[1].BlockPointer}
if so.File != fileUpdate {
t.Errorf("Incorrect file update in op: %v vs. %v", so.File,
fileUpdate)
}
}
func TestSyncDirtyDupBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
bID := fakeBlockID(44)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
rootBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock := NewFileBlock().(*FileBlock)
aBlock.Contents = []byte{1, 2, 3, 4, 5}
bBlock := NewFileBlock().(*FileBlock)
bBlock.Contents = aBlock.Contents
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
bNode := pathNode{makeBP(bID, rmd, config, uid), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, bNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
si, err := getOrCreateSyncInfo(ops, lState, rootBlock.Children["b"])
require.NoError(t, err)
si.op.addWrite(0, 10)
config.DirtyBlockCache().Put(id, bNode.BlockPointer, p.Branch, bBlock)
makeBlockStateDirty(config, rmd, p, bNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
readyBlockData := ReadyBlockData{
buf: []byte{6, 7, 8, 9, 10, 11, 12},
}
config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{rmd}, bBlock).
Return(bID, len(bBlock.Contents), readyBlockData, nil)
refNonce := BlockRefNonce{1}
config.mockCrypto.EXPECT().MakeBlockRefNonce().AnyTimes().
Return(refNonce, nil)
// sync block (but skip the last block)
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 1)
unrefBytes := uint64(1) // unref'd block b
refBytes := uint64(len(readyBlockData.buf))
rootP := path{FolderBranch: p.FolderBranch, path: []pathNode{p.path[0]}}
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "", rootP,
rmd, false, 0, refBytes, unrefBytes, &newRmd, blocks)
blocks = append(blocks, bID)
// manually add b
expectedPath.path = append(expectedPath.path,
pathNode{BlockPointer{ID: aID, BlockContext: BlockContext{RefNonce: refNonce}}, "b"})
// TODO: build a context matcher that can check the refnonce.
config.mockBserv.EXPECT().AddBlockReference(gomock.Any(), rmd.TlfID(),
expectedPath.path[1].ID, gomock.Any()).Return(nil)
// fsync b
err = config.KBFSOps().Sync(ctx, n)
if err != nil {
t.Errorf("Got unexpected error on sync: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
// block b shouldn't be anywhere in the cache
checkBlockCache(t, config, id, append(blocks[0:1], rootID, aID), nil)
// make sure the new blockpointer for b has a non-zero refnonce,
// marking it as a dup
if newP.path[1].RefNonce != refNonce {
t.Errorf("Block was not caught as a dup: %v", newP.path[1])
}
if newP.path[1].Creator != aNode.GetWriter() {
t.Errorf("Creator was not successfully propagated: saw %v, expected %v",
newP.path[1].Creator, aNode.GetWriter())
}
// check the sync op
so, ok := newRmd.data.Changes.Ops[0].(*syncOp)
if !ok {
t.Errorf("Couldn't find the syncOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, so.OpCommon, nil, nil, updates)
fileUpdate := blockUpdate{bNode.BlockPointer, newP.path[1].BlockPointer}
if so.File != fileUpdate {
t.Errorf("Incorrect file update in op: %v vs. %v", so.File,
fileUpdate)
}
// make sure the write is propagated
checkSyncOp(t, config.Codec(), so,
bNode.BlockPointer, []WriteRange{{Off: 0, Len: 10}})
}
func putAndCleanAnyBlock(config *ConfigMock, p path) {
config.mockBcache.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), TransientEntry).
Do(func(ptr BlockPointer, tlf TlfID, block Block, lifetime BlockCacheLifetime) {
config.mockDirtyBcache.EXPECT().
Get(gomock.Any(), ptrMatcher{BlockPointer{ID: ptr.ID}},
p.Branch).AnyTimes().Return(nil, NoSuchBlockError{ptr.ID})
config.mockBcache.EXPECT().
Get(ptrMatcher{BlockPointer{ID: ptr.ID}}).
AnyTimes().Return(block, nil)
}).AnyTimes().Return(nil)
config.mockDirtyBcache.EXPECT().Delete(gomock.Any(), gomock.Any(),
p.Branch).AnyTimes().Return(nil)
}
func TestSyncDirtyMultiBlocksSplitInBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
// we need to mock the bcache for this test, because we have to
// capture new file blocks that are created as they are written to
// the cache (in order to expect calls on them)
config.mockBcache = NewMockBlockCache(mockCtrl)
config.SetBlockCache(config.mockBcache)
config.mockDirtyBcache = NewMockDirtyBlockCache(mockCtrl)
config.SetDirtyBlockCache(config.mockDirtyBcache)
config.mockDirtyBcache.EXPECT().UpdateSyncingBytes(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().BlockSyncFinished(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().SyncFinished(gomock.Any(), gomock.Any())
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
id3 := fakeBlockID(46)
id4 := fakeBlockID(47)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(fileID, uid),
EntryInfo: EntryInfo{
Size: 20,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 10, 0),
makeIFP(id2, rmd, config, uid, 0, 5),
makeIFP(id3, rmd, config, uid, 0, 10),
makeIFP(id4, rmd, config, uid, 0, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
// fsync a, only block 2 is dirty
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[0].BlockPointer},
p.Branch).AnyTimes().Return(false)
makeBlockStateDirty(config, rmd, p, fileNode.BlockPointer)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[2].BlockPointer},
p.Branch).Return(false)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[2].BlockPointer}, p.Branch).Return(nil,
NoSuchBlockError{fileBlock.IPtrs[2].BlockPointer.ID})
config.mockBcache.EXPECT().Get(ptrMatcher{fileBlock.IPtrs[2].BlockPointer}).
Return(block3, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[3].BlockPointer},
p.Branch).AnyTimes().Return(false)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).AnyTimes().Return(true)
makeBlockStateDirty(config, rmd, p, node.BlockPointer)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).
AnyTimes().Return(rootBlock, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).AnyTimes().Return(true)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).
AnyTimes().Return(fileBlock, nil)
// no matching pointers
config.mockBcache.EXPECT().CheckForKnownPtr(gomock.Any(), gomock.Any()).
AnyTimes().Return(BlockPointer{}, nil)
// the split is in the middle
pad2 := 0
pad3 := 14
extraBytesFor3 := 2
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[1].BlockPointer,
block2, int64(len(block2.Contents)-extraBytesFor3), pad2, false)
// this causes block 3 to be updated
var newBlock3 *FileBlock
config.mockDirtyBcache.EXPECT().Put(gomock.Any(),
fileBlock.IPtrs[2].BlockPointer, p.Branch, gomock.Any()).
Do(func(id TlfID, ptr BlockPointer, branch BranchName, block Block) {
newBlock3 = block.(*FileBlock)
// id3 syncs just fine
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{ptr}, branch).AnyTimes().Return(true)
expectSyncDirtyBlock(config, rmd, p, ptr, newBlock3, int64(0), pad3,
true)
}).Return(nil)
// id4 is the final block, and the split causes a new block to be made
pad4 := 9
pad5 := 1
c4 := expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[3].BlockPointer,
block4, int64(3), pad4, false)
var newID5 BlockID
var newBlock5 *FileBlock
id5 := fakeBlockID(48)
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id5, nil)
config.mockDirtyBcache.EXPECT().Put(gomock.Any(),
ptrMatcher{BlockPointer{ID: id5}}, p.Branch, gomock.Any()).
Do(func(id TlfID, ptr BlockPointer, branch BranchName, block Block) {
newID5 = ptr.ID
newBlock5 = block.(*FileBlock)
// id5 syncs just fine
expectSyncDirtyBlock(config, rmd, p, ptr, newBlock5, int64(0), pad5,
true)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{ptr}, branch).AnyTimes().Return(true)
}).Return(nil)
// The parent is dirtied too since the pointers changed
config.mockDirtyBcache.EXPECT().Put(gomock.Any(), fileNode.BlockPointer,
p.Branch, gomock.Any()).AnyTimes().Return(nil)
// sync block contents and their padding sizes
refBytes := uint64((len(block2.Contents) + pad2) +
(len(block3.Contents) + extraBytesFor3 + pad3) +
(len(block4.Contents) + pad4) + pad5)
unrefBytes := uint64(0) // no encoded sizes on dirty blocks
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, c4, uid, id, "", p, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks)
putAndCleanAnyBlock(config, p)
newID2 := fakeBlockIDAdd(id2, 100)
newID3 := fakeBlockIDAdd(id3, 100)
newID4 := fakeBlockIDAdd(id4, 100)
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(fileBlock.IPtrs) != 5 {
t.Errorf("Wrong number of indirect pointers: %d", len(fileBlock.IPtrs))
} else if fileBlock.IPtrs[0].ID != id1 {
t.Errorf("Indirect pointer id1 wrong: %v", fileBlock.IPtrs[0].ID)
} else if fileBlock.IPtrs[0].EncodedSize != 10 {
t.Errorf("Indirect pointer encoded size1 wrong: %d", fileBlock.IPtrs[0].EncodedSize)
} else if fileBlock.IPtrs[0].Off != 0 {
t.Errorf("Indirect pointer off1 wrong: %d", fileBlock.IPtrs[0].Off)
} else if fileBlock.IPtrs[1].ID != newID2 {
t.Errorf("Indirect pointer id2 wrong: %v", fileBlock.IPtrs[1].ID)
} else if fileBlock.IPtrs[1].EncodedSize != 5 {
t.Errorf("Indirect pointer encoded size2 wrong: %d", fileBlock.IPtrs[1].EncodedSize)
} else if fileBlock.IPtrs[1].Off != 5 {
t.Errorf("Indirect pointer off2 wrong: %d", fileBlock.IPtrs[1].Off)
} else if fileBlock.IPtrs[2].ID != newID3 {
t.Errorf("Indirect pointer id3 wrong: %v", fileBlock.IPtrs[2].ID)
} else if fileBlock.IPtrs[2].EncodedSize != 21 {
t.Errorf("Indirect pointer encoded size3 wrong: %d", fileBlock.IPtrs[2].EncodedSize)
} else if fileBlock.IPtrs[2].Off != 8 {
t.Errorf("Indirect pointer off3 wrong: %d", fileBlock.IPtrs[2].Off)
} else if fileBlock.IPtrs[3].ID != newID4 {
t.Errorf("Indirect pointer id4 wrong: %v", fileBlock.IPtrs[3].ID)
} else if fileBlock.IPtrs[3].EncodedSize != 14 {
t.Errorf("Indirect pointer encoded size4 wrong: %d", fileBlock.IPtrs[3].EncodedSize)
} else if fileBlock.IPtrs[3].Off != 15 {
t.Errorf("Indirect pointer off4 wrong: %d", fileBlock.IPtrs[3].Off)
} else if fileBlock.IPtrs[4].ID != fakeBlockIDAdd(newID5, 100) {
t.Errorf("Indirect pointer id5 wrong: %v", fileBlock.IPtrs[4].ID)
} else if fileBlock.IPtrs[4].EncodedSize != 1 {
t.Errorf("Indirect pointer encoded size5 wrong: %d", fileBlock.IPtrs[4].EncodedSize)
} else if fileBlock.IPtrs[4].Off != 18 {
t.Errorf("Indirect pointer off5 wrong: %d", fileBlock.IPtrs[4].Off)
} else if !bytes.Equal([]byte{10, 9, 8}, block2.Contents) {
t.Errorf("Block 2 has the wrong data: %v", block2.Contents)
} else if !bytes.Equal(
[]byte{7, 6, 15, 14, 13, 12, 11}, newBlock3.Contents) {
t.Errorf("Block 3 has the wrong data: %v", newBlock3.Contents)
} else if !bytes.Equal([]byte{20, 19, 18}, block4.Contents) {
t.Errorf("Block 4 has the wrong data: %v", block4.Contents)
} else if !bytes.Equal([]byte{17, 16}, newBlock5.Contents) {
t.Errorf("Block 5 has the wrong data: %v", newBlock5.Contents)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
}
func TestSyncDirtyMultiBlocksCopyNextBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
// we need to mock the bcache for this test, because we have to
// capture new file blocks that are created as they are written to
// the cache (in order to expect calls on them)
config.mockBcache = NewMockBlockCache(mockCtrl)
config.SetBlockCache(config.mockBcache)
config.mockDirtyBcache = NewMockDirtyBlockCache(mockCtrl)
config.SetDirtyBlockCache(config.mockDirtyBcache)
config.mockDirtyBcache.EXPECT().UpdateSyncingBytes(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().BlockSyncFinished(gomock.Any(),
gomock.Any()).AnyTimes()
config.mockDirtyBcache.EXPECT().SyncFinished(gomock.Any(), gomock.Any())
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
fileID := fakeBlockID(43)
id1 := fakeBlockID(44)
id2 := fakeBlockID(45)
id3 := fakeBlockID(46)
id4 := fakeBlockID(47)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(fileID, uid),
EntryInfo: EntryInfo{
Size: 20,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 0, 0),
makeIFP(id2, rmd, config, uid, 10, 5),
makeIFP(id3, rmd, config, uid, 0, 10),
makeIFP(id4, rmd, config, uid, 15, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
// fsync a, only block 2 is dirty
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).AnyTimes().Return(true)
makeBlockStateDirty(config, rmd, p, fileNode.BlockPointer)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileNode.BlockPointer}, p.Branch).AnyTimes().Return(fileBlock, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).AnyTimes().Return(true)
makeBlockStateDirty(config, rmd, p, node.BlockPointer)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{node.BlockPointer}, p.Branch).
AnyTimes().Return(rootBlock, nil)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[1].BlockPointer}, p.Branch).Return(nil,
NoSuchBlockError{fileBlock.IPtrs[1].BlockPointer.ID})
config.mockBcache.EXPECT().Get(ptrMatcher{fileBlock.IPtrs[1].BlockPointer}).
Return(block2, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[1].BlockPointer},
p.Branch).AnyTimes().Return(false)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[3].BlockPointer}, p.Branch).Return(nil,
NoSuchBlockError{fileBlock.IPtrs[3].BlockPointer.ID})
config.mockBcache.EXPECT().Get(ptrMatcher{fileBlock.IPtrs[3].BlockPointer}).
Return(block4, nil)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{fileBlock.IPtrs[3].BlockPointer},
p.Branch).Return(false)
// no matching pointers
config.mockBcache.EXPECT().CheckForKnownPtr(gomock.Any(), gomock.Any()).
AnyTimes().Return(BlockPointer{}, nil)
// the split is in the middle
pad1 := 14
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[0].BlockPointer,
block1, int64(-1), pad1, false)
// this causes block 2 to be copied from (copy whole block)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), block2.Contents, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data...)
}).Return(int64(5))
// now block 2 is empty, and should be deleted
// block 3 is dirty too, just copy part of block 4
pad3 := 10
split4At := int64(3)
pad4 := 15
expectSyncDirtyBlock(config, rmd, p, fileBlock.IPtrs[2].BlockPointer,
block3, int64(-1), pad3, false)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), block4.Contents, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data[:3]...)
}).Return(split4At)
var newBlock4 *FileBlock
config.mockDirtyBcache.EXPECT().Put(gomock.Any(),
fileBlock.IPtrs[3].BlockPointer, p.Branch, gomock.Any()).
Do(func(id TlfID, ptr BlockPointer, branch BranchName, block Block) {
newBlock4 = block.(*FileBlock)
// now block 4 is dirty, but it's the end of the line,
// so nothing else to do
expectSyncDirtyBlock(config, rmd, p, ptr, newBlock4, int64(-1),
pad4, true)
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(),
ptrMatcher{ptr}, branch).AnyTimes().Return(false)
}).Return(nil)
// The parent is dirtied too since the pointers changed
config.mockDirtyBcache.EXPECT().Put(gomock.Any(), fileNode.BlockPointer,
p.Branch, gomock.Any()).AnyTimes().Return(nil)
// sync block
refBytes := uint64((len(block1.Contents) + pad1) +
(len(block3.Contents) + pad3) +
(len(block4.Contents) - int(split4At) + pad4))
unrefBytes := uint64(10 + 15) // id2 and id4
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks)
putAndCleanAnyBlock(config, p)
newID1 := fakeBlockIDAdd(id1, 100)
newID3 := fakeBlockIDAdd(id3, 100)
newID4 := fakeBlockIDAdd(id4, 100)
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(fileBlock.IPtrs) != 3 {
t.Errorf("Wrong number of indirect pointers: %d", len(fileBlock.IPtrs))
} else if fileBlock.IPtrs[0].ID != newID1 {
t.Errorf("Indirect pointer id1 wrong: %v", fileBlock.IPtrs[0].ID)
} else if fileBlock.IPtrs[0].EncodedSize != 19 {
t.Errorf("Indirect pointer encoded size1 wrong: %d", fileBlock.IPtrs[0].EncodedSize)
} else if fileBlock.IPtrs[0].Off != 0 {
t.Errorf("Indirect pointer off1 wrong: %d", fileBlock.IPtrs[0].Off)
} else if fileBlock.IPtrs[1].ID != newID3 {
t.Errorf("Indirect pointer id3 wrong: %v", fileBlock.IPtrs[1].ID)
} else if fileBlock.IPtrs[1].EncodedSize != 15 {
t.Errorf("Indirect pointer encoded size3 wrong: %d", fileBlock.IPtrs[1].EncodedSize)
} else if fileBlock.IPtrs[1].Off != 10 {
t.Errorf("Indirect pointer off3 wrong: %d", fileBlock.IPtrs[1].Off)
} else if fileBlock.IPtrs[2].ID != newID4 {
t.Errorf("Indirect pointer id4 wrong: %v", fileBlock.IPtrs[2].ID)
} else if fileBlock.IPtrs[2].EncodedSize != 17 {
t.Errorf("Indirect pointer encoded size4 wrong: %d", fileBlock.IPtrs[2].EncodedSize)
} else if fileBlock.IPtrs[2].Off != 18 {
t.Errorf("Indirect pointer off4 wrong: %d", fileBlock.IPtrs[2].Off)
} else if !bytes.Equal([]byte{5, 4, 3, 2, 1, 10, 9, 8, 7, 6},
block1.Contents) {
t.Errorf("Block 1 has the wrong data: %v", block1.Contents)
} else if !bytes.Equal(
[]byte{15, 14, 13, 12, 11, 20, 19, 18}, block3.Contents) {
t.Errorf("Block 3 has the wrong data: %v", block3.Contents)
} else if !bytes.Equal([]byte{17, 16}, newBlock4.Contents) {
t.Errorf("Block 4 has the wrong data: %v", newBlock4.Contents)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
}
func TestSyncDirtyWithBlockChangePointerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
aID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock := NewFileBlock().(*FileBlock)
aBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
lState := makeFBOLockState()
getOrCreateSyncInfo(ops, lState, rootBlock.Children["a"])
// fsync a
config.DirtyBlockCache().Put(id, aNode.BlockPointer, p.Branch, aBlock)
makeBlockStateDirty(config, rmd, p, aNode.BlockPointer)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// override the AnyTimes expect call done by default in expectSyncBlock()
config.mockBsplit.EXPECT().ShouldEmbedBlockChanges(gomock.Any()).
AnyTimes().Return(false)
// sync block
refBytes := uint64(1) // 1 new block changes block
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
expectedPath, lastCall := expectSyncBlock(t, config, nil, uid, id, "", p,
rmd, false, 0, refBytes, 0, &newRmd, blocks)
// expected calls for block changes block
changeBlockID := fakeBlockID(253)
changePlainSize := 1
changeBuf := []byte{253}
changeReadyBlockData := ReadyBlockData{
buf: changeBuf,
}
lastCall = config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{rmd},
gomock.Any()).Return(changeBlockID, changePlainSize,
changeReadyBlockData, nil).After(lastCall)
config.mockBserv.EXPECT().Put(gomock.Any(), rmd.TlfID(), changeBlockID,
gomock.Any(), changeReadyBlockData.buf,
changeReadyBlockData.serverHalf).Return(nil)
// For now, fake the amount copied by using a large number, since
// we don't have easy access here to the actual encoded data. The
// exact return value doesn't matter as long as it's large enough.
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), gomock.Any(), int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(100 * 1024 * 1024))
if err := config.KBFSOps().Sync(ctx, n); err != nil {
t.Errorf("Got unexpected error on sync: %v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if newRmd.data.cachedChanges.Info.ID != changeBlockID {
t.Errorf("Got unexpected changeBlocks pointer: %v vs %v",
newRmd.data.cachedChanges.Info.ID, changeBlockID)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
checkBlockCache(t, config, id, append(blocks, rootID, changeBlockID), nil)
}
func TestKBFSOpsStatRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
_, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %v", err)
}
}
func TestKBFSOpsFailingRootOps(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
u := h.FirstResolvedWriter()
rootID := fakeBlockID(42)
rmd.data.Dir.BlockPointer = makeBP(rootID, rmd, config, u)
node := pathNode{rmd.data.Dir.BlockPointer, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
// TODO: Make sure Read, Write, and Truncate fail also with
// InvalidPathError{}.
err := config.KBFSOps().SetEx(ctx, n, true)
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetEx: %v", err)
}
err = config.KBFSOps().SetMtime(ctx, n, &time.Time{})
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetMtime: %v", err)
}
// TODO: Sync succeeds, but it should fail. Fix this!
}
type testBGObserver struct {
c chan<- struct{}
}
func (t *testBGObserver) LocalChange(ctx context.Context, node Node,
write WriteRange) {
// ignore
}
func (t *testBGObserver) BatchChanges(ctx context.Context,
changes []NodeChange) {
t.c <- struct{}{}
}
func (t *testBGObserver) TlfHandleChange(ctx context.Context,
newHandle *TlfHandle) {
return
}
// Tests that the background flusher will sync a dirty file if the
// application does not.
func TestKBFSOpsBackgroundFlush(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
// Make sure all MDs get different MD IDs, as otherwise
// setHeadLocked will panic).
injectShimCrypto(config)
rootID := fakeBlockID(42)
rmd.data.Dir.ID = rootID
fileID := fakeBlockID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %v", err)
}
// expect a sync to happen in the background
var newRmd ImmutableRootMetadata
blocks := make([]BlockID, 2)
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0, 0, 0,
&newRmd, blocks)
c := make(chan struct{})
observer := &testBGObserver{c}
config.Notifier().RegisterForChanges([]FolderBranch{{id, MasterBranch}},
observer)
// start the background flusher
go ops.backgroundFlusher(1 * time.Millisecond)
// Make sure we get the notification
<-c
// Make sure we get a sync even if we overwrite (not extend) the file
data[1] = 0
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
// expect another sync to happen in the background
var newRmd2 ImmutableRootMetadata
blocks = make([]BlockID, 2)
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false, 0, 0, 0,
&newRmd2, blocks)
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %v", err)
}
<-c
}
func TestKBFSOpsWriteRenameStat(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %v", err)
}
// Stat it again.
newEi, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %v", err)
}
if ei != newEi {
t.Errorf("Entry info unexpectedly changed from %+v to %+v", ei, newEi)
}
}
func TestKBFSOpsWriteRenameGetDirChildren(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %v", err)
}
// Get the stats via GetDirChildren.
eis, err := kbfsOps.GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatalf("Couldn't stat file: %v", err)
}
if ei != eis["b"] {
t.Errorf("Entry info unexpectedly changed from %+v to %+v",
ei, eis["b"])
}
}
func TestKBFSOpsCreateFileWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Remove the file, which will archive the block
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Create a second file, which will use the same initial block ID
// from the cache, even though it's been archived, and will be
// forced to try again.
_, _, err = kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create second file: %v", err)
}
}
func TestKBFSOpsMultiBlockSyncWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// make blocks small
blockSize := int64(5)
config.BlockSplitter().(*BlockSplitterSimple).maxSize = blockSize
// create a file.
rootNode := GetRootNodeOrBust(t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Write a few blocks
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't sync file: %v", err)
}
// Now overwrite those blocks to archive them
newData := []byte{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
err = kbfsOps.Write(ctx, fileNode, newData, 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't sync file: %v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Now write the original first block, which has been archived,
// and make sure it works.
err = kbfsOps.Write(ctx, fileNode, data[0:blockSize], 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't sync file: %v", err)
}
}
type corruptBlockServer struct {
BlockServer
}
func (cbs corruptBlockServer) Get(
ctx context.Context, tlfID TlfID, id BlockID, context BlockContext) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
data, keyServerHalf, err := cbs.BlockServer.Get(ctx, tlfID, id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return append(data, 0), keyServerHalf, nil
}
func TestKBFSOpsFailToReadUnverifiableBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.SetBlockServer(&corruptBlockServer{
BlockServer: config.BlockServer(),
})
// create a file.
rootNode := GetRootNodeOrBust(t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Read using a different "device"
config2 := ConfigAsUser(config, "test_user")
defer CheckConfigAndShutdown(t, config2)
// Shutdown the mdserver explicitly before the state checker tries to run
defer config2.MDServer().Shutdown()
rootNode2 := GetRootNodeOrBust(t, config2, "test_user", false)
// Lookup the file, which should fail on block ID verification
kbfsOps2 := config2.KBFSOps()
_, _, err = kbfsOps2.Lookup(ctx, rootNode2, "a")
if _, ok := err.(kbfshash.HashMismatchError); !ok {
t.Fatalf("Could unexpectedly lookup the file: %v", err)
}
}
// Test that the size of a single empty block doesn't change. If this
// test ever fails, consult max or strib before merging.
func TestKBFSOpsEmptyTlfSize(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Create a TLF.
rootNode := GetRootNodeOrBust(t, config, "test_user", false)
status, _, err := config.KBFSOps().FolderStatus(ctx,
rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't get folder status: %v", err)
}
if status.DiskUsage != 313 {
t.Fatalf("Disk usage of an empty TLF is no longer 313. " +
"Talk to max or strib about why this matters.")
}
}
type cryptoFixedTlf struct {
Crypto
tlf TlfID
}
func (c cryptoFixedTlf) MakeRandomTlfID(isPublic bool) (TlfID, error) {
return c.tlf, nil
}
// TestKBFSOpsMaliciousMDServerRange tries to trick KBFSOps into
// accepting bad MDs.
func TestKBFSOpsMaliciousMDServerRange(t *testing.T) {
config1, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "mallory")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config1, ctx, cancel)
// Create alice's TLF.
rootNode1 := GetRootNodeOrBust(t, config1, "alice", false)
fb1 := rootNode1.GetFolderBranch()
kbfsOps1 := config1.KBFSOps()
_, _, err := kbfsOps1.CreateFile(ctx, rootNode1, "dummy.txt", false, NoExcl)
require.NoError(t, err)
// Create mallory's fake TLF using the same TLF ID as alice's.
config2 := ConfigAsUser(config1, "mallory")
crypto2 := cryptoFixedTlf{config2.Crypto(), fb1.Tlf}
config2.SetCrypto(crypto2)
mdserver2, err := NewMDServerMemory(mdServerLocalConfigAdapter{config2})
require.NoError(t, err)
config2.MDServer().Shutdown()
config2.SetMDServer(mdserver2)
config2.SetMDCache(NewMDCacheStandard(1))
rootNode2 := GetRootNodeOrBust(t, config2, "alice,mallory", false)
require.Equal(t, fb1.Tlf, rootNode2.GetFolderBranch().Tlf)
kbfsOps2 := config2.KBFSOps()
// Add some operations to get mallory's TLF to have a higher
// MetadataVersion.
_, _, err = kbfsOps2.CreateFile(
ctx, rootNode2, "dummy.txt", false, NoExcl)
require.NoError(t, err)
err = kbfsOps2.RemoveEntry(ctx, rootNode2, "dummy.txt")
require.NoError(t, err)
// Now route alice's TLF to mallory's MD server.
config1.SetMDServer(mdserver2.copy(mdServerLocalConfigAdapter{config1}))
// Simulate the server triggering alice to update.
config1.SetKeyCache(NewKeyCacheStandard(1))
err = kbfsOps1.SyncFromServerForTesting(ctx, fb1)
// TODO: We can actually fake out the PrevRoot pointer, too
// and then we'll be caught by the handle check. But when we
// have MDOps do the handle check, that'll trigger first.
require.IsType(t, MDPrevRootMismatch{}, err)
}
| 1 | 14,127 | Won't this happen automatically when we flip the default version in the config? | keybase-kbfs | go |
@@ -312,7 +312,7 @@ func TestLocalExec(t *testing.T) {
err = app.Exec("web", true, "pwd")
assert.NoError(err)
out := stdout()
- assert.Contains(out, "/var/www/html/docroot")
+ assert.Contains(out, "/var/www/html")
stdout = testcommon.CaptureStdOut()
switch app.GetType() { | 1 | package platform
import (
"fmt"
"path/filepath"
"testing"
"time"
"os"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/ddev/pkg/util"
"github.com/stretchr/testify/assert"
)
var (
TestSites = []testcommon.TestSite{
{
Name: "TestMainPkgDrupal8",
SourceURL: "https://github.com/drud/drupal8/archive/v0.6.0.tar.gz",
ArchiveInternalExtractionPath: "drupal8-0.6.0/",
FilesTarballURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/files.tar.gz",
FilesZipballURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/files.zip",
DBTarURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/db.tar.gz",
DBZipURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/db.zip",
FullSiteTarballURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/site.tar.gz",
},
{
Name: "TestMainPkgWordpress",
SourceURL: "https://github.com/drud/wordpress/archive/v0.4.0.tar.gz",
ArchiveInternalExtractionPath: "wordpress-0.4.0/",
FilesTarballURL: "https://github.com/drud/wordpress/releases/download/v0.4.0/files.tar.gz",
DBTarURL: "https://github.com/drud/wordpress/releases/download/v0.4.0/db.tar.gz",
},
{
Name: "TestMainPkgDrupalKickstart",
SourceURL: "https://github.com/drud/drupal-kickstart/archive/v0.4.0.tar.gz",
ArchiveInternalExtractionPath: "drupal-kickstart-0.4.0/",
FilesTarballURL: "https://github.com/drud/drupal-kickstart/releases/download/v0.4.0/files.tar.gz",
DBTarURL: "https://github.com/drud/drupal-kickstart/releases/download/v0.4.0/db.tar.gz",
FullSiteTarballURL: "https://github.com/drud/drupal-kickstart/releases/download/v0.4.0/site.tar.gz",
},
}
)
func TestMain(m *testing.M) {
if len(GetApps()) > 0 {
log.Fatalf("Local plugin tests require no sites running. You have %v site(s) running.", len(GetApps()))
}
for i := range TestSites {
err := TestSites[i].Prepare()
if err != nil {
log.Fatalf("Prepare() failed on TestSite.Prepare(), err=%v", err)
}
}
log.Debugln("Running tests.")
testRun := m.Run()
for i := range TestSites {
TestSites[i].Cleanup()
}
os.Exit(testRun)
}
// TestLocalSetup reduces the TestSite list on shorter test runs.
func TestLocalSetup(t *testing.T) {
// Allow tests to run in "short" mode, which will only test a single site. This keeps test runtimes low.
// We would much prefer to do this in TestMain, but the Short() flag is not yet available at that point.
if testing.Short() {
TestSites = []testcommon.TestSite{TestSites[0]}
}
}
// TestLocalStart tests the functionality that is called when "ddev start" is executed
func TestLocalStart(t *testing.T) {
// ensure we have docker network
client := dockerutil.GetDockerClient()
err := dockerutil.EnsureNetwork(client, dockerutil.NetName)
if err != nil {
log.Fatal(err)
}
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalStart", site.Name))
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
err = app.Start()
assert.NoError(err)
err = app.Wait("web")
assert.NoError(err)
// ensure docker-compose.yaml exists inside .ddev site folder
composeFile := fileutil.FileExists(app.DockerComposeYAMLPath())
assert.True(composeFile)
for _, containerType := range [3]string{"web", "db", "dba"} {
containerName, err := constructContainerName(containerType, app)
assert.NoError(err)
check, err := testcommon.ContainerCheck(containerName, "running")
assert.NoError(err)
assert.True(check, containerType, "container is running")
}
runTime()
cleanup()
}
// try to start a site of same name at different path
another := TestSites[0]
err = another.Prepare()
if err != nil {
assert.FailNow("TestLocalStart: Prepare() failed on another.Prepare(), err=%v", err)
return
}
err = app.Init(another.Dir)
assert.Error(err)
assert.Contains(err.Error(), fmt.Sprintf("container in running state already exists for %s that was created at %s", TestSites[0].Name, TestSites[0].Dir))
another.Cleanup()
}
// TestGetApps tests the GetApps function to ensure it accurately returns a list of running applications.
func TestGetApps(t *testing.T) {
assert := assert.New(t)
apps := GetApps()
assert.Equal(len(apps["local"]), len(TestSites))
for _, site := range TestSites {
var found bool
for _, siteInList := range apps["local"] {
if site.Name == siteInList.GetName() {
found = true
break
}
}
assert.True(found, "Found site %s in list", site.Name)
}
}
// TestLocalImportDB tests the functionality that is called when "ddev import-db" is executed
func TestLocalImportDB(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
testDir, _ := os.Getwd()
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalImportDB", site.Name))
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
// Test simple db loads.
for _, file := range []string{"users.sql", "users.sql.gz", "users.sql.tar", "users.sql.tar.gz", "users.sql.tgz", "users.sql.zip"} {
path := filepath.Join(testDir, "testdata", file)
err = app.ImportDB(path, "")
assert.NoError(err, "Failed to app.ImportDB path: %s err: %v", path, err)
}
if site.DBTarURL != "" {
err = app.Exec("db", true, "mysql", "-e", "DROP DATABASE db;")
assert.NoError(err)
err = app.Exec("db", true, "mysql", "information_schema", "-e", "CREATE DATABASE db;")
assert.NoError(err)
dbPath := filepath.Join(testcommon.CreateTmpDir("local-db"), "db.tar.gz")
err := util.DownloadFile(dbPath, site.DBTarURL)
assert.NoError(err)
err = app.ImportDB(dbPath, "")
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Exec("db", true, "mysql", "-e", "SHOW TABLES;")
assert.NoError(err)
out := stdout()
assert.Contains(string(out), "Tables_in_db")
assert.False(strings.Contains(string(out), "Empty set"))
err = os.Remove(dbPath)
assert.NoError(err)
}
if site.DBZipURL != "" {
err = app.Exec("db", true, "mysql", "-e", "DROP DATABASE db;")
assert.NoError(err)
err = app.Exec("db", true, "mysql", "information_schema", "-e", "CREATE DATABASE db;")
assert.NoError(err)
dbZipPath := filepath.Join(testcommon.CreateTmpDir("local-db-zip"), "db.zip")
err = util.DownloadFile(dbZipPath, site.DBZipURL)
assert.NoError(err)
err = app.ImportDB(dbZipPath, "")
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Exec("db", true, "mysql", "-e", "SHOW TABLES;")
assert.NoError(err)
out := stdout()
assert.Contains(string(out), "Tables_in_db")
assert.False(strings.Contains(string(out), "Empty set"))
err = os.Remove(dbZipPath)
assert.NoError(err)
}
if site.FullSiteTarballURL != "" {
err = app.Exec("db", true, "mysql", "-e", "DROP DATABASE db;")
assert.NoError(err)
err = app.Exec("db", true, "mysql", "information_schema", "-e", "CREATE DATABASE db;")
assert.NoError(err)
siteTarPath := filepath.Join(testcommon.CreateTmpDir("local-site-tar"), "site.tar.gz")
err = util.DownloadFile(siteTarPath, site.FullSiteTarballURL)
assert.NoError(err)
err = app.ImportDB(siteTarPath, "data.sql")
assert.NoError(err)
err = os.Remove(siteTarPath)
assert.NoError(err)
}
runTime()
cleanup()
}
}
// TestLocalImportFiles tests the functionality that is called when "ddev import-files" is executed
func TestLocalImportFiles(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalImportFiles", site.Name))
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
if site.FilesTarballURL != "" {
filePath := filepath.Join(testcommon.CreateTmpDir("local-tarball-files"), "files.tar.gz")
err := util.DownloadFile(filePath, site.FilesTarballURL)
assert.NoError(err)
err = app.ImportFiles(filePath, "")
assert.NoError(err)
err = os.Remove(filePath)
assert.NoError(err)
}
if site.FilesZipballURL != "" {
filePath := filepath.Join(testcommon.CreateTmpDir("local-zipball-files"), "files.zip")
err := util.DownloadFile(filePath, site.FilesZipballURL)
assert.NoError(err)
err = app.ImportFiles(filePath, "")
assert.NoError(err)
err = os.Remove(filePath)
assert.NoError(err)
}
if site.FullSiteTarballURL != "" {
siteTarPath := filepath.Join(testcommon.CreateTmpDir("local-site-tar"), "site.tar.gz")
err = util.DownloadFile(siteTarPath, site.FullSiteTarballURL)
assert.NoError(err)
err = app.ImportFiles(siteTarPath, "docroot/sites/default/files")
assert.NoError(err)
err = os.Remove(siteTarPath)
assert.NoError(err)
}
runTime()
cleanup()
}
}
// TestLocalExec tests the execution of commands inside a docker container of a site.
func TestLocalExec(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalExec", site.Name))
err := app.Init(site.Dir)
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Exec("web", true, "pwd")
assert.NoError(err)
out := stdout()
assert.Contains(out, "/var/www/html/docroot")
stdout = testcommon.CaptureStdOut()
switch app.GetType() {
case "drupal7":
fallthrough
case "drupal8":
err := app.Exec("web", true, "drush", "status")
assert.NoError(err)
case "wordpress":
err = app.Exec("web", true, "wp", "--info")
assert.NoError(err)
default:
}
out = stdout()
assert.Contains(string(out), "/etc/php/7.0/cli/php.ini")
runTime()
cleanup()
}
}
// TestLocalLogs tests the container log output functionality.
func TestLocalLogs(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalLogs", site.Name))
err := app.Init(site.Dir)
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Logs("web", false, false, "")
assert.NoError(err)
out := stdout()
assert.Contains(out, "Server started")
stdout = testcommon.CaptureStdOut()
err = app.Logs("db", false, false, "")
assert.NoError(err)
out = stdout()
assert.Contains(out, "Database initialized")
stdout = testcommon.CaptureStdOut()
err = app.Logs("db", false, false, "2")
assert.NoError(err)
out = stdout()
assert.Contains(out, "MySQL init process done. Ready for start up.")
assert.False(strings.Contains(out, "Database initialized"))
runTime()
cleanup()
}
}
// TestLocalStop tests the functionality that is called when "ddev stop" is executed
func TestLocalStop(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalStop", site.Name))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Stop()
assert.NoError(err)
for _, containerType := range [3]string{"web", "db", "dba"} {
containerName, err := constructContainerName(containerType, app)
assert.NoError(err)
check, err := testcommon.ContainerCheck(containerName, "exited")
assert.NoError(err)
assert.True(check, containerType, "container has exited")
}
runTime()
cleanup()
}
}
// TestDescribeStopped tests that the describe command works properly on a stopped site.
func TestDescribeStopped(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
out, err := app.Describe()
assert.NoError(err)
assert.Contains(out, SiteStopped, "Output did not include the word stopped when describing a stopped site.")
cleanup()
}
}
// TestLocalRemove tests the functionality that is called when "ddev rm" is executed
func TestLocalRemove(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
// start the previously stopped containers -
// stopped/removed have the same state
err = app.Start()
assert.NoError(err)
err = app.Wait("web")
assert.NoError(err)
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalRemove", site.Name))
err = app.Down()
assert.NoError(err)
for _, containerType := range [3]string{"web", "db", "dba"} {
_, err := constructContainerName(containerType, app)
assert.Error(err, "Received error on containerName search: ", err)
}
runTime()
cleanup()
}
}
// TestCleanupWithoutCompose ensures app containers can be properly cleaned up without a docker-compose config file present.
func TestCleanupWithoutCompose(t *testing.T) {
assert := assert.New(t)
site := TestSites[0]
revertDir := site.Chdir()
app, err := GetPluginApp("local")
assert.NoError(err)
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
// Start a site so we have something to cleanup
err = app.Start()
assert.NoError(err)
err = app.Wait("web")
assert.NoError(err)
// Call the Cleanup command()
err = Cleanup(app)
assert.NoError(err)
for _, containerType := range [3]string{"web", "db", "dba"} {
_, err := constructContainerName(containerType, app)
assert.Error(err)
}
revertDir()
}
// TestGetappsEmpty ensures that GetApps returns an empty list when no applications are running.
func TestGetAppsEmpty(t *testing.T) {
assert := assert.New(t)
apps := GetApps()
assert.Equal(len(apps["local"]), 0)
}
// TestRouterNotRunning ensures the router is shut down after all sites are stopped.
func TestRouterNotRunning(t *testing.T) {
assert := assert.New(t)
containers, err := dockerutil.GetDockerContainers(false)
assert.NoError(err)
for _, container := range containers {
assert.NotEqual(dockerutil.ContainerName(container), "ddev-router", "Failed to find ddev-router container running")
}
}
// constructContainerName builds a container name given the type (web/db/dba) and the app
func constructContainerName(containerType string, app App) (string, error) {
container, err := app.FindContainerByType(containerType)
if err != nil {
return "", err
}
name := dockerutil.ContainerName(container)
return name, nil
}
| 1 | 11,402 | This would also change back to /var/www/html/docroot if we go that way. | drud-ddev | go |
@@ -0,0 +1,7 @@
+import { getImplicitRole } from '../commons/aria';
+
+function hasImplicitChromiumRoleMatches(node, virtualNode) {
+ return getImplicitRole(virtualNode) !== null;
+}
+
+export default hasImplicitChromiumRoleMatches; | 1 | 1 | 16,569 | This still needs to use the new option. We'll need tests for it too. | dequelabs-axe-core | js |
|
@@ -0,0 +1,16 @@
+#appModules/egui.py
+#A part of NonVisual Desktop Access (NVDA)
+#Copyright (C) 2020 Pavol Kecskemety <[email protected]>
+#This file is covered by the GNU General Public License.
+#See the file COPYING for more details.
+
+import appModuleHandler
+
+class AppModule(appModuleHandler.AppModule):
+
+ def event_NVDAObject_init(self, obj):
+ obj.description = None
+ obj.shouldAllowIAccessibleFocusEvent=True
+
+ if obj.name == obj.value:
+ obj.value = None | 1 | 1 | 28,823 | We are no longer including file name in the source files. | nvaccess-nvda | py |
|
@@ -4,6 +4,9 @@ class ApplicationController < ActionController::Base
# Look for template overrides before rendering
before_filter :prepend_view_paths
+ # Set current user (see set_current_user)
+ before_filter :set_current_user
+
include GlobalHelpers
include Pundit
helper_method GlobalHelpers.instance_methods | 1 | class ApplicationController < ActionController::Base
protect_from_forgery with: :exception
# Look for template overrides before rendering
before_filter :prepend_view_paths
include GlobalHelpers
include Pundit
helper_method GlobalHelpers.instance_methods
rescue_from Pundit::NotAuthorizedError, with: :user_not_authorized
def user_not_authorized
if user_signed_in?
redirect_to plans_url, notice: _('You are not authorized to perform this action.')
else
redirect_to root_url, alert: _('You need to sign in or sign up before continuing.')
end
end
before_filter :set_gettext_locale
after_filter :store_location
# Sets FastGettext locale for every request made
def set_gettext_locale
FastGettext.locale = session[:locale] || FastGettext.default_locale
end
# PATCH /locale/:locale REST method
def set_locale_session
if FastGettext.default_available_locales.include?(params[:locale])
session[:locale] = params[:locale]
end
redirect_to(request.referer || root_path) #redirects the user to URL where she/he was when the request to this resource was made or root if none is encountered
end
def store_location
# store last url - this is needed for post-login redirect to whatever the user last visited.
unless ["/users/sign_in",
"/users/sign_up",
"/users/password",
"/users/invitation/accept",
].any? { |ur| request.fullpath.include?(ur) } \
or request.xhr? # don't store ajax calls
session[:previous_url] = request.fullpath
end
end
def after_sign_in_path_for(resource)
session[:previous_url] || root_path
end
def after_sign_up_path_for(resource)
session[:previous_url] || root_path
end
def after_sign_in_error_path_for(resource)
session[:previous_url] || root_path
end
def after_sign_up_error_path_for(resource)
session[:previous_url] || root_path
end
def authenticate_admin!
# currently if admin has any super-admin task, they can view the super-admin
redirect_to root_path unless user_signed_in? && (current_user.can_add_orgs? || current_user.can_change_org? || current_user.can_super_admin?)
end
def failed_create_error(obj, obj_name)
"#{_('Could not create your %{o}.') % {o: obj_name}} #{errors_to_s(obj)}"
end
def failed_update_error(obj, obj_name)
"#{_('Could not update your %{o}.') % {o: obj_name}} #{errors_to_s(obj)}"
end
def failed_destroy_error(obj, obj_name)
"#{_('Could not delete the %{o}.') % {o: obj_name}} #{errors_to_s(obj)}"
end
private
# Override rails default render action to look for a branded version of a
# template instead of using the default one. If no override exists, the
# default version in ./app/views/[:controller]/[:action] will be used
#
# The path in the app/views/branded/ directory must match the the file it is
# replacing. For example:
# app/views/branded/layouts/_header.html.erb -> app/views/layouts/_header.html.erb
def prepend_view_paths
prepend_view_path "app/views/branded"
end
def errors_to_s(obj)
if obj.errors.count > 0
msg = "<br />"
obj.errors.each do |e,m|
if m.include?('empty') || m.include?('blank')
msg += "#{_(e)} - #{_(m)}<br />"
else
msg += "'#{obj[e]}' - #{_(m)}<br />"
end
end
msg
end
end
##
# Sign out of Shibboleth SP local session too.
# -------------------------------------------------------------
def after_sign_out_path_for(resource_or_scope)
if Rails.application.config.shibboleth_enabled
return Rails.application.config.shibboleth_logout_url + root_url
super
else
super
end
end
# -------------------------------------------------------------
end
| 1 | 17,609 | no need for this. Devise provides us with `current_user` and `user_signed_in?` helpers. | DMPRoadmap-roadmap | rb |
@@ -169,4 +169,8 @@ public class EthProtocol implements SubProtocol {
public static final int V65 = 65;
public static final int V66 = 66;
}
+
+ public static boolean isEth66Compatible(final Capability capability) {
+ return capability.getName().equals(NAME) && capability.getVersion() >= ETH66.getVersion();
+ }
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth;
import static java.util.stream.Collectors.toUnmodifiableList;
import org.hyperledger.besu.ethereum.eth.messages.EthPV62;
import org.hyperledger.besu.ethereum.eth.messages.EthPV63;
import org.hyperledger.besu.ethereum.eth.messages.EthPV65;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.Capability;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.SubProtocol;
import java.util.List;
import java.util.Set;
import java.util.stream.Stream;
/**
* Eth protocol messages as defined in
* https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol#new-model-syncing-pv62}
*/
public class EthProtocol implements SubProtocol {
public static final String NAME = "eth";
public static final Capability ETH62 = Capability.create(NAME, EthVersion.V62);
public static final Capability ETH63 = Capability.create(NAME, EthVersion.V63);
public static final Capability ETH64 = Capability.create(NAME, EthVersion.V64);
public static final Capability ETH65 = Capability.create(NAME, EthVersion.V65);
public static final Capability ETH66 = Capability.create(NAME, EthVersion.V66);
private static final EthProtocol INSTANCE = new EthProtocol();
private static final List<Integer> eth62Messages =
List.of(
EthPV62.STATUS,
EthPV62.NEW_BLOCK_HASHES,
EthPV62.TRANSACTIONS,
EthPV62.GET_BLOCK_HEADERS,
EthPV62.BLOCK_HEADERS,
EthPV62.GET_BLOCK_BODIES,
EthPV62.BLOCK_BODIES,
EthPV62.NEW_BLOCK);
private static final List<Integer> eth63Messages =
Stream.concat(
eth62Messages.stream(),
Stream.of(
EthPV63.GET_NODE_DATA, EthPV63.NODE_DATA, EthPV63.GET_RECEIPTS, EthPV63.RECEIPTS))
.collect(toUnmodifiableList());
private static final List<Integer> eth65Messages =
Stream.concat(
eth63Messages.stream(),
Stream.of(
EthPV65.NEW_POOLED_TRANSACTION_HASHES,
EthPV65.GET_POOLED_TRANSACTIONS,
EthPV65.POOLED_TRANSACTIONS))
.collect(toUnmodifiableList());
public static boolean requestIdCompatible(final int code) {
return Set.of(
EthPV62.GET_BLOCK_HEADERS,
EthPV62.BLOCK_HEADERS,
EthPV62.GET_BLOCK_BODIES,
EthPV62.BLOCK_BODIES,
EthPV65.GET_POOLED_TRANSACTIONS,
EthPV65.POOLED_TRANSACTIONS,
EthPV63.GET_NODE_DATA,
EthPV63.NODE_DATA,
EthPV63.GET_RECEIPTS,
EthPV63.RECEIPTS)
.contains(code);
}
@Override
public String getName() {
return NAME;
}
@Override
public int messageSpace(final int protocolVersion) {
switch (protocolVersion) {
case EthVersion.V62:
return 8;
case EthVersion.V63:
case EthVersion.V64:
case EthVersion.V65:
case EthVersion.V66:
// same number of messages in each range, eth65 defines messages in the middle of the
// range defined by eth63 and eth64 defines no new ranges.
return 17;
default:
return 0;
}
}
@Override
public boolean isValidMessageCode(final int protocolVersion, final int code) {
switch (protocolVersion) {
case EthVersion.V62:
return eth62Messages.contains(code);
case EthVersion.V63:
case EthVersion.V64:
return eth63Messages.contains(code);
case EthVersion.V65:
case EthVersion.V66:
return eth65Messages.contains(code);
default:
return false;
}
}
@Override
public String messageName(final int protocolVersion, final int code) {
switch (code) {
case EthPV62.STATUS:
return "Status";
case EthPV62.NEW_BLOCK_HASHES:
return "NewBlockHashes";
case EthPV62.TRANSACTIONS:
return "Transactions";
case EthPV62.GET_BLOCK_HEADERS:
return "GetBlockHeaders";
case EthPV62.BLOCK_HEADERS:
return "BlockHeaders";
case EthPV62.GET_BLOCK_BODIES:
return "GetBlockBodies";
case EthPV62.BLOCK_BODIES:
return "BlockBodies";
case EthPV62.NEW_BLOCK:
return "NewBlock";
case EthPV65.NEW_POOLED_TRANSACTION_HASHES:
return "NewPooledTransactionHashes";
case EthPV65.GET_POOLED_TRANSACTIONS:
return "GetPooledTransactions";
case EthPV65.POOLED_TRANSACTIONS:
return "PooledTransactions";
case EthPV63.GET_NODE_DATA:
return "GetNodeData";
case EthPV63.NODE_DATA:
return "NodeData";
case EthPV63.GET_RECEIPTS:
return "GetReceipts";
case EthPV63.RECEIPTS:
return "Receipts";
default:
return INVALID_MESSAGE_NAME;
}
}
public static EthProtocol get() {
return INSTANCE;
}
public static class EthVersion {
public static final int V62 = 62;
public static final int V63 = 63;
public static final int V64 = 64;
public static final int V65 = 65;
public static final int V66 = 66;
}
}
| 1 | 25,902 | Would `Object.equals(capability.getName(), NAME)` would be safer, as you wouldn't have to do null checks? | hyperledger-besu | java |
@@ -193,6 +193,7 @@ RSpec.describe RSpec::Core::Example, :parent_metadata => 'sample' do
expect(RSpec::Matchers).not_to receive(:generated_description)
example_group.example { assert 5 == 5 }
example_group.run
+ RSpec::Mocks.space.reset_all
end
it "uses the file and line number" do | 1 | require 'pp'
require 'stringio'
RSpec.describe RSpec::Core::Example, :parent_metadata => 'sample' do
let(:example_group) do
RSpec.describe('group description')
end
let(:example_instance) do
example_group.example('example description') { }
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
example = example_group.example('example description', *args)
example.metadata
end
end
it "can be pretty printed" do
expect { ignoring_warnings { pp example_instance }}.to output(/RSpec::Core::Example/).to_stdout
end
describe "#exception" do
it "supplies the first exception raised, if any" do
RSpec.configuration.output_stream = StringIO.new
example = example_group.example { raise "first" }
example_group.after { raise "second" }
example_group.run
expect(example.exception.message).to eq("first")
end
it "returns nil if there is no exception" do
example = example_group.example('example') { }
example_group.run
expect(example.exception).to be_nil
end
end
describe "when there is an explicit description" do
context "when RSpec.configuration.format_docstrings is set to a block" do
it "formats the description using the block" do
RSpec.configuration.format_docstrings { |s| s.strip }
example = example_group.example(' an example with whitespace ') {}
example_group.run
expect(example.description).to eql('an example with whitespace')
end
end
end
describe "when there is no explicit description" do
def expect_with(*frameworks)
if frameworks.include?(:stdlib)
example_group.class_exec do
def assert(val)
raise "Expected #{val} to be true" unless val
end
end
end
end
context "when RSpec.configuration.format_docstrings is set to a block" do
it "formats the description using the block" do
RSpec.configuration.format_docstrings { |s| s.upcase }
example_group.example { }
example_group.run
pattern = /EXAMPLE AT #{relative_path(__FILE__).upcase}:#{__LINE__ - 2}/
expect(example_group.examples.first.description).to match(pattern)
end
end
context "when `expect_with :rspec` is configured" do
before(:each) { expect_with :rspec }
it "uses the matcher-generated description" do
example_group.example { expect(5).to eq(5) }
example_group.run
expect(example_group.examples.first.description).to eq("should eq 5")
end
it "uses the matcher-generated description in the full description" do
example_group.example { expect(5).to eq(5) }
example_group.run
expect(example_group.examples.first.full_description).to eq("group description should eq 5")
end
it "uses the file and line number if there is no matcher-generated description" do
example = example_group.example {}
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 2}/)
end
it "uses the file and line number if there is an error before the matcher" do
example = example_group.example { expect(5).to eq(5) }
example_group.before { raise }
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 3}/)
end
context "if the example is pending" do
it "still uses the matcher-generated description if a matcher ran" do
example = example_group.example { pending; expect(4).to eq(5) }
example_group.run
expect(example.description).to eq("should eq 5")
end
it "uses the file and line number of the example if no matcher ran" do
example = example_group.example { pending; fail }
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 2}/)
end
end
context "when an `after(:example)` hook raises an error" do
it 'still assigns the description' do
ex = nil
RSpec.describe do
ex = example { expect(2).to eq(2) }
after { raise "boom" }
end.run
expect(ex.description).to eq("should eq 2")
end
end
context "when the matcher's `description` method raises an error" do
description_line = __LINE__ + 3
RSpec::Matchers.define :matcher_with_failing_description do
match { true }
description { raise ArgumentError, "boom" }
end
it 'allows the example to pass and surfaces the failing description in the example description' do
ex = nil
RSpec.describe do
ex = example { expect(2).to matcher_with_failing_description }
end.run
expect(ex).to pass.and have_attributes(:description => a_string_including(
"example at #{ex.location}",
"ArgumentError",
"boom",
"#{__FILE__}:#{description_line}"
))
end
end
context "when an `after(:example)` hook has an expectation" do
it "assigns the description based on the example's last expectation, ignoring the `after` expectation since it can apply to many examples" do
ex = nil
RSpec.describe do
ex = example { expect(nil).to be_nil }
after { expect(true).to eq(true) }
end.run
expect(ex).to pass.and have_attributes(:description => "should be nil")
end
end
end
context "when `expect_with :rspec, :stdlib` is configured" do
before(:each) { expect_with :rspec, :stdlib }
it "uses the matcher-generated description" do
example_group.example { expect(5).to eq(5) }
example_group.run
expect(example_group.examples.first.description).to eq("should eq 5")
end
it "uses the file and line number if there is no matcher-generated description" do
example = example_group.example {}
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 2}/)
end
it "uses the file and line number if there is an error before the matcher" do
example = example_group.example { expect(5).to eq(5) }
example_group.before { raise }
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 3}/)
end
end
context "when `expect_with :stdlib` is configured" do
before(:each) { expect_with :stdlib }
it "does not attempt to get the generated description from RSpec::Matchers" do
expect(RSpec::Matchers).not_to receive(:generated_description)
example_group.example { assert 5 == 5 }
example_group.run
end
it "uses the file and line number" do
example = example_group.example { assert 5 == 5 }
example_group.run
expect(example.description).to match(/example at #{relative_path(__FILE__)}:#{__LINE__ - 2}/)
end
end
end
describe "#described_class" do
it "returns the class (if any) of the outermost example group" do
expect(described_class).to eq(RSpec::Core::Example)
end
end
describe "accessing metadata within a running example" do
it "has a reference to itself when running" do |ex|
expect(ex.description).to eq("has a reference to itself when running")
end
it "can access the example group's top level metadata as if it were its own" do |ex|
expect(ex.example_group.metadata).to include(:parent_metadata => 'sample')
expect(ex.metadata).to include(:parent_metadata => 'sample')
end
end
describe "accessing options within a running example" do
it "can look up option values by key", :demo => :data do |ex|
expect(ex.metadata[:demo]).to eq(:data)
end
end
describe "#run" do
it "sets its reference to the example group instance to nil" do
group = RSpec.describe do
example('example') { expect(1).to eq(1) }
end
group.run
expect(group.examples.first.instance_variable_get("@example_group_instance")).to be_nil
end
it "generates a description before tearing down mocks in case a mock object is used in the description" do
group = RSpec.describe do
example { test = double('Test'); expect(test).to eq test }
end
expect(RSpec::Matchers).to receive(:generated_description).and_call_original.ordered
expect(RSpec::Mocks).to receive(:teardown).and_call_original.ordered
group.run
end
it "runs after(:each) when the example passes" do
after_run = false
group = RSpec.describe do
after(:each) { after_run = true }
example('example') { expect(1).to eq(1) }
end
group.run
expect(after_run).to be_truthy, "expected after(:each) to be run"
end
it "runs after(:each) when the example fails" do
after_run = false
group = RSpec.describe do
after(:each) { after_run = true }
example('example') { expect(1).to eq(2) }
end
group.run
expect(after_run).to be_truthy, "expected after(:each) to be run"
end
it "runs after(:each) when the example raises an Exception" do
after_run = false
group = RSpec.describe do
after(:each) { after_run = true }
example('example') { raise "this error" }
end
group.run
expect(after_run).to be_truthy, "expected after(:each) to be run"
end
context "with an after(:each) that raises" do
it "runs subsequent after(:each)'s" do
after_run = false
group = RSpec.describe do
after(:each) { after_run = true }
after(:each) { raise "FOO" }
example('example') { expect(1).to eq(1) }
end
group.run
expect(after_run).to be_truthy, "expected after(:each) to be run"
end
it "stores the exception" do
group = RSpec.describe
group.after(:each) { raise "FOO" }
example = group.example('example') { expect(1).to eq(1) }
group.run
expect(example.execution_result.exception.message).to eq("FOO")
end
end
it "wraps before/after(:each) inside around" do
results = []
group = RSpec.describe do
around(:each) do |e|
results << "around (before)"
e.run
results << "around (after)"
end
before(:each) { results << "before" }
after(:each) { results << "after" }
example { results << "example" }
end
group.run
expect(results).to eq([
"around (before)",
"before",
"example",
"after",
"around (after)"
])
end
context "clearing ivars" do
it "sets ivars to nil to prep them for GC" do
group = RSpec.describe do
before(:all) { @before_all = :before_all }
before(:each) { @before_each = :before_each }
after(:each) { @after_each = :after_each }
after(:all) { @after_all = :after_all }
end
group.example("does something") do
expect(@before_all).to eq(:before_all)
expect(@before_each).to eq(:before_each)
end
expect(group.run(double.as_null_object)).to be_truthy
group.new do |example|
%w[@before_all @before_each @after_each @after_all].each do |ivar|
expect(example.instance_variable_get(ivar)).to be_nil
end
end
end
it "does not impact the before_all_ivars which are copied to each example" do
group = RSpec.describe do
before(:all) { @before_all = "abc" }
example("first") { expect(@before_all).not_to be_nil }
example("second") { expect(@before_all).not_to be_nil }
end
expect(group.run).to be_truthy
end
end
context 'when the example raises an error' do
def run_and_capture_reported_message(group)
reported_msg = nil
# We can't use should_receive(:message).with(/.../) here,
# because if that fails, it would fail within our example-under-test,
# and since there's already two errors, it would just be reported again.
allow(RSpec.configuration.reporter).to receive(:message) { |msg| reported_msg = msg }
group.run
reported_msg
end
it "prints any around hook errors rather than silencing them" do
group = RSpec.describe do
around(:each) { |e| e.run; raise "around" }
example("e") { raise "example" }
end
message = run_and_capture_reported_message(group)
expect(message).to match(/An error occurred in an `around.* hook/i)
end
it "prints any after hook errors rather than silencing them" do
group = RSpec.describe do
after(:each) { raise "after" }
example("e") { raise "example" }
end
message = run_and_capture_reported_message(group)
expect(message).to match(/An error occurred in an after.* hook/i)
end
it "does not print mock expectation errors" do
group = RSpec.describe do
example do
foo = double
expect(foo).to receive(:bar)
raise "boom"
end
end
message = run_and_capture_reported_message(group)
expect(message).to be_nil
end
it "leaves a raised exception unmodified (GH-1103)" do
# set the backtrace, otherwise MRI will build a whole new object,
# and thus mess with our expectations. Rubinius and JRuby are not
# affected.
exception = StandardError.new
exception.set_backtrace([])
group = RSpec.describe do
example { raise exception.freeze }
end
group.run
actual = group.examples.first.execution_result.exception
expect(actual.__id__).to eq(exception.__id__)
end
end
context "with --dry-run" do
before { RSpec.configuration.dry_run = true }
it "does not execute any examples or hooks" do
executed = []
RSpec.configure do |c|
c.before(:each) { executed << :before_each_config }
c.before(:all) { executed << :before_all_config }
c.after(:each) { executed << :after_each_config }
c.after(:all) { executed << :after_all_config }
c.around(:each) { |ex| executed << :around_each_config; ex.run }
end
group = RSpec.describe do
before(:all) { executed << :before_all }
before(:each) { executed << :before_each }
after(:all) { executed << :after_all }
after(:each) { executed << :after_each }
around(:each) { |ex| executed << :around_each; ex.run }
example { executed << :example }
context "nested" do
before(:all) { executed << :nested_before_all }
before(:each) { executed << :nested_before_each }
after(:all) { executed << :nested_after_all }
after(:each) { executed << :nested_after_each }
around(:each) { |ex| executed << :nested_around_each; ex.run }
example { executed << :nested_example }
end
end
group.run
expect(executed).to eq([])
end
end
end
describe "#pending" do
def expect_pending_result(example)
expect(example).to be_pending
expect(example.execution_result.status).to eq(:pending)
expect(example.execution_result.pending_message).to be
end
context "in the example" do
it "sets the example to pending" do
group = RSpec.describe do
example { pending; fail }
end
group.run
expect_pending_result(group.examples.first)
end
it "allows post-example processing in around hooks (see https://github.com/rspec/rspec-core/issues/322)" do
blah = nil
group = RSpec.describe do
around do |example|
example.run
blah = :success
end
example { pending }
end
group.run
expect(blah).to be(:success)
end
it 'sets the backtrace to the example definition so it can be located by the user' do
file = RSpec::Core::Metadata.relative_path(__FILE__)
expected = [file, __LINE__ + 2].map(&:to_s)
group = RSpec.describe do
example {
pending
}
end
group.run
actual = group.examples.first.exception.backtrace.first.split(':')[0..1]
expect(actual).to eq(expected)
end
end
context "in before(:each)" do
it "sets each example to pending" do
group = RSpec.describe do
before(:each) { pending }
example { fail }
example { fail }
end
group.run
expect_pending_result(group.examples.first)
expect_pending_result(group.examples.last)
end
it 'sets example to pending when failure occurs in before(:each)' do
group = RSpec.describe do
before(:each) { pending; fail }
example {}
end
group.run
expect_pending_result(group.examples.first)
end
end
context "in before(:all)" do
it "is forbidden" do
group = RSpec.describe do
before(:all) { pending }
example { fail }
example { fail }
end
group.run
expect(group.examples.first.exception).to be
expect(group.examples.first.exception.message).to \
match(/may not be used outside of examples/)
end
it "fails with an ArgumentError if a block is provided" do
group = RSpec.describe('group') do
before(:all) do
pending { :no_op }
end
example { fail }
end
example = group.examples.first
group.run
expect(example).to fail_with ArgumentError
expect(example.exception.message).to match(
/Passing a block within an example is now deprecated./
)
end
end
context "in around(:each)" do
it "sets the example to pending" do
group = RSpec.describe do
around(:each) { pending }
example { fail }
end
group.run
expect_pending_result(group.examples.first)
end
it 'sets example to pending when failure occurs in around(:each)' do
group = RSpec.describe do
around(:each) { pending; fail }
example {}
end
group.run
expect_pending_result(group.examples.first)
end
end
context "in after(:each)" do
it "sets each example to pending" do
group = RSpec.describe do
after(:each) { pending; fail }
example { }
example { }
end
group.run
expect_pending_result(group.examples.first)
expect_pending_result(group.examples.last)
end
end
end
describe "#skip" do
context "in the example" do
it "sets the example to skipped" do
group = RSpec.describe do
example { skip }
end
group.run
expect(group.examples.first).to be_skipped
end
it "allows post-example processing in around hooks (see https://github.com/rspec/rspec-core/issues/322)" do
blah = nil
group = RSpec.describe do
around do |example|
example.run
blah = :success
end
example { skip }
end
group.run
expect(blah).to be(:success)
end
context "with a message" do
it "sets the example to skipped with the provided message" do
group = RSpec.describe do
example { skip "lorem ipsum" }
end
group.run
expect(group.examples.first).to be_skipped_with("lorem ipsum")
end
end
end
context "in before(:each)" do
it "sets each example to skipped" do
group = RSpec.describe do
before(:each) { skip }
example {}
example {}
end
group.run
expect(group.examples.first).to be_skipped
expect(group.examples.last).to be_skipped
end
end
context "in before(:all)" do
it "sets each example to skipped" do
group = RSpec.describe do
before(:all) { skip("not done"); fail }
example {}
example {}
end
group.run
expect(group.examples.first).to be_skipped_with("not done")
expect(group.examples.last).to be_skipped_with("not done")
end
end
context "in around(:each)" do
it "sets the example to skipped" do
group = RSpec.describe do
around(:each) { skip }
example {}
end
group.run
expect(group.examples.first).to be_skipped
end
end
end
describe "timing" do
it "uses RSpec::Core::Time as to not be affected by changes to time in examples" do
reporter = double(:reporter).as_null_object
group = RSpec.describe
example = group.example
example.__send__ :start, reporter
allow(Time).to receive_messages(:now => Time.utc(2012, 10, 1))
example.__send__ :finish, reporter
expect(example.execution_result.run_time).to be < 0.2
end
end
it "does not interfere with per-example randomness when running examples in a random order" do
values = []
RSpec.configuration.order = :random
RSpec.describe do
# The bug was only triggered when the examples
# were in nested contexts; see https://github.com/rspec/rspec-core/pull/837
context { example { values << rand } }
context { example { values << rand } }
end.run
expect(values.uniq.count).to eq(2)
end
describe "optional block argument" do
it "contains the example" do |ex|
expect(ex).to be_an(RSpec::Core::Example)
expect(ex.description).to match(/contains the example/)
end
end
describe "setting the current example" do
it "sets RSpec.current_example to the example that is currently running" do
group = RSpec.describe("an example group")
current_examples = []
example1 = group.example("example 1") { current_examples << RSpec.current_example }
example2 = group.example("example 2") { current_examples << RSpec.current_example }
group.run
expect(current_examples).to eq([example1, example2])
end
end
describe "mock framework integration" do
it 'verifies mock expectations after each example' do
ex = nil
RSpec.describe do
let(:dbl) { double }
ex = example do
expect(dbl).to receive(:foo)
end
end.run
expect(ex).to fail_with(RSpec::Mocks::MockExpectationError)
end
it 'allows `after(:example)` hooks to satisfy mock expectations, since examples are not complete until their `after` hooks run' do
ex = nil
RSpec.describe do
let(:dbl) { double }
ex = example do
expect(dbl).to receive(:foo)
end
after { dbl.foo }
end.run
expect(ex).to pass
end
end
describe "exposing the examples reporter" do
it "returns a null reporter when the example hasnt run yet" do
example = RSpec.describe.example
expect(example.reporter).to be RSpec::Core::NullReporter
end
it "returns the reporter used to run the example when executed" do
reporter = double(:reporter).as_null_object
group = RSpec.describe
example = group.example
example.run group.new, reporter
expect(example.reporter).to be reporter
end
end
end
| 1 | 14,734 | Hmm, I wonder if we should revert #1862 instead? BTW, what failure do you get w/o this line? | rspec-rspec-core | rb |
@@ -428,6 +428,9 @@ func (cg *configGenerator) convertSlackConfig(ctx context.Context, in monitoring
return nil, errors.Errorf("failed to get key %q from secret %q", in.APIURL.Key, in.APIURL.Name)
}
out.APIURL = strings.TrimSpace(url)
+ if _, err := ValidateURL(out.APIURL); err != nil {
+ return nil, errors.Wrapf(err, "invalid 'apiURL' %s in slack config", out.APIURL)
+ }
}
var actions []slackAction | 1 | // Copyright 2020 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alertmanager
import (
"context"
"fmt"
"net"
"path"
"sort"
"strings"
"time"
"github.com/blang/semver/v4"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
"github.com/prometheus-operator/prometheus-operator/pkg/assets"
"github.com/prometheus-operator/prometheus-operator/pkg/operator"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/alertmanager/timeinterval"
"gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/types"
)
const inhibitRuleNamespaceKey = "namespace"
func loadCfg(s string) (*alertmanagerConfig, error) {
// Run upstream Load function to get any validation checks that it runs.
_, err := config.Load(s)
if err != nil {
return nil, err
}
cfg := &alertmanagerConfig{}
err = yaml.UnmarshalStrict([]byte(s), cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
func (c alertmanagerConfig) String() string {
b, err := yaml.Marshal(c)
if err != nil {
return fmt.Sprintf("<error creating config string: %s>", err)
}
return string(b)
}
type configGenerator struct {
logger log.Logger
amVersion semver.Version
store *assets.Store
}
func newConfigGenerator(logger log.Logger, amVersion semver.Version, store *assets.Store) *configGenerator {
cg := &configGenerator{
logger: logger,
amVersion: amVersion,
store: store,
}
return cg
}
// validateConfigInputs runs extra validation on the AlertManager fields which can't be done at the CRD schema validation level.
func validateConfigInputs(am *monitoringv1.Alertmanager) error {
if am.Spec.Retention != "" {
if err := operator.ValidateDurationField(am.Spec.Retention); err != nil {
return errors.Wrap(err, "invalid retention value specified")
}
}
if am.Spec.ClusterGossipInterval != "" {
if err := operator.ValidateDurationField(am.Spec.ClusterGossipInterval); err != nil {
return errors.Wrap(err, "invalid clusterGossipInterval value specified")
}
}
if am.Spec.ClusterPushpullInterval != "" {
if err := operator.ValidateDurationField(am.Spec.ClusterPushpullInterval); err != nil {
return errors.Wrap(err, "invalid clusterPushpullInterval value specified")
}
}
if am.Spec.ClusterPeerTimeout != "" {
if err := operator.ValidateDurationField(am.Spec.ClusterPeerTimeout); err != nil {
return errors.Wrap(err, "invalid clusterPeerTimeout value specified")
}
}
return nil
}
func (cg *configGenerator) generateConfig(
ctx context.Context,
baseConfig alertmanagerConfig,
amConfigs map[string]*monitoringv1alpha1.AlertmanagerConfig,
) ([]byte, error) {
// amConfigIdentifiers is a sorted slice of keys from
// amConfigs map, used to always generate the config in the
// same order.
amConfigIdentifiers := make([]string, len(amConfigs))
i := 0
for k := range amConfigs {
amConfigIdentifiers[i] = k
i++
}
sort.Strings(amConfigIdentifiers)
subRoutes := make([]*route, 0, len(amConfigs))
for _, amConfigIdentifier := range amConfigIdentifiers {
crKey := types.NamespacedName{
Name: amConfigs[amConfigIdentifier].Name,
Namespace: amConfigs[amConfigIdentifier].Namespace,
}
// Add inhibitRules to baseConfig.InhibitRules.
for _, inhibitRule := range amConfigs[amConfigIdentifier].Spec.InhibitRules {
baseConfig.InhibitRules = append(baseConfig.InhibitRules, cg.convertInhibitRule(&inhibitRule, crKey))
}
// Skip early if there's no route definition.
if amConfigs[amConfigIdentifier].Spec.Route == nil {
continue
}
subRoutes = append(subRoutes,
cg.enforceNamespaceForRoute(
cg.convertRoute(
amConfigs[amConfigIdentifier].Spec.Route, crKey),
amConfigs[amConfigIdentifier].Namespace,
),
)
for _, receiver := range amConfigs[amConfigIdentifier].Spec.Receivers {
receivers, err := cg.convertReceiver(ctx, &receiver, crKey)
if err != nil {
return nil, errors.Wrapf(err, "AlertmanagerConfig %s", crKey.String())
}
baseConfig.Receivers = append(baseConfig.Receivers, receivers)
}
for _, muteTimeInterval := range amConfigs[amConfigIdentifier].Spec.MuteTimeIntervals {
mti, err := convertMuteTimeInterval(&muteTimeInterval, crKey)
if err != nil {
return nil, errors.Wrapf(err, "AlertmanagerConfig %s", crKey.String())
}
baseConfig.MuteTimeIntervals = append(baseConfig.MuteTimeIntervals, mti)
}
}
// For alerts to be processed by the AlertmanagerConfig routes, they need
// to appear before the routes defined in the main configuration.
// Because all first-level AlertmanagerConfig routes have "continue: true",
// alerts will fall through.
baseConfig.Route.Routes = append(subRoutes, baseConfig.Route.Routes...)
generatedConf := &baseConfig
if err := generatedConf.sanitize(cg.amVersion, cg.logger); err != nil {
return nil, err
}
return yaml.Marshal(generatedConf)
}
// enforceNamespaceForRoute modifies the route configuration to match alerts
// originating only from the given namespace.
func (cg *configGenerator) enforceNamespaceForRoute(r *route, namespace string) *route {
matchersV2Allowed := cg.amVersion.GTE(semver.MustParse("0.22.0"))
// Routes created from AlertmanagerConfig resources should only match
// alerts that come from the same namespace.
if matchersV2Allowed {
r.Matchers = append(r.Matchers, monitoringv1alpha1.Matcher{
Name: "namespace",
Value: namespace,
MatchType: monitoringv1alpha1.MatchEqual,
}.String())
} else {
r.Match["namespace"] = namespace
}
// Alerts should still be evaluated by the following routes.
r.Continue = true
return r
}
func (cg *configGenerator) convertRoute(in *monitoringv1alpha1.Route, crKey types.NamespacedName) *route {
var matchers []string
// deprecated
match := map[string]string{}
matchRE := map[string]string{}
for _, matcher := range in.Matchers {
// prefer matchers to deprecated config
if matcher.MatchType != "" {
matchers = append(matchers, matcher.String())
continue
}
if matcher.Regex {
matchRE[matcher.Name] = matcher.Value
} else {
match[matcher.Name] = matcher.Value
}
}
var routes []*route
if len(in.Routes) > 0 {
routes = make([]*route, len(in.Routes))
children, err := in.ChildRoutes()
if err != nil {
// The controller should already have checked that ChildRoutes()
// doesn't return an error when selecting AlertmanagerConfig CRDs.
// If there's an error here, we have a serious bug in the code.
panic(err)
}
for i := range children {
routes[i] = cg.convertRoute(&children[i], crKey)
}
}
receiver := makeNamespacedString(in.Receiver, crKey)
var prefixedMuteTimeIntervals []string
if len(in.MuteTimeIntervals) > 0 {
for _, mti := range in.MuteTimeIntervals {
prefixedMuteTimeIntervals = append(prefixedMuteTimeIntervals, makeNamespacedString(mti, crKey))
}
}
return &route{
Receiver: receiver,
GroupByStr: in.GroupBy,
GroupWait: in.GroupWait,
GroupInterval: in.GroupInterval,
RepeatInterval: in.RepeatInterval,
Continue: in.Continue,
Match: match,
MatchRE: matchRE,
Matchers: matchers,
Routes: routes,
MuteTimeIntervals: prefixedMuteTimeIntervals,
}
}
// convertReceiver converts a monitoringv1alpha1.Receiver to an alertmanager.receiver
func (cg *configGenerator) convertReceiver(ctx context.Context, in *monitoringv1alpha1.Receiver, crKey types.NamespacedName) (*receiver, error) {
var pagerdutyConfigs []*pagerdutyConfig
if l := len(in.PagerDutyConfigs); l > 0 {
pagerdutyConfigs = make([]*pagerdutyConfig, l)
for i := range in.PagerDutyConfigs {
receiver, err := cg.convertPagerdutyConfig(ctx, in.PagerDutyConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "PagerDutyConfig[%d]", i)
}
pagerdutyConfigs[i] = receiver
}
}
var slackConfigs []*slackConfig
if l := len(in.SlackConfigs); l > 0 {
slackConfigs = make([]*slackConfig, l)
for i := range in.SlackConfigs {
receiver, err := cg.convertSlackConfig(ctx, in.SlackConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "SlackConfig[%d]", i)
}
slackConfigs[i] = receiver
}
}
var webhookConfigs []*webhookConfig
if l := len(in.WebhookConfigs); l > 0 {
webhookConfigs = make([]*webhookConfig, l)
for i := range in.WebhookConfigs {
receiver, err := cg.convertWebhookConfig(ctx, in.WebhookConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "WebhookConfig[%d]", i)
}
webhookConfigs[i] = receiver
}
}
var opsgenieConfigs []*opsgenieConfig
if l := len(in.OpsGenieConfigs); l > 0 {
opsgenieConfigs = make([]*opsgenieConfig, l)
for i := range in.OpsGenieConfigs {
receiver, err := cg.convertOpsgenieConfig(ctx, in.OpsGenieConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "OpsGenieConfigs[%d]", i)
}
opsgenieConfigs[i] = receiver
}
}
var weChatConfigs []*weChatConfig
if l := len(in.WeChatConfigs); l > 0 {
weChatConfigs = make([]*weChatConfig, l)
for i := range in.WeChatConfigs {
receiver, err := cg.convertWeChatConfig(ctx, in.WeChatConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "WeChatConfig[%d]", i)
}
weChatConfigs[i] = receiver
}
}
var emailConfigs []*emailConfig
if l := len(in.EmailConfigs); l > 0 {
emailConfigs = make([]*emailConfig, l)
for i := range in.EmailConfigs {
receiver, err := cg.convertEmailConfig(ctx, in.EmailConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "EmailConfig[%d]", i)
}
emailConfigs[i] = receiver
}
}
var victorOpsConfigs []*victorOpsConfig
if l := len(in.VictorOpsConfigs); l > 0 {
victorOpsConfigs = make([]*victorOpsConfig, l)
for i := range in.VictorOpsConfigs {
receiver, err := cg.convertVictorOpsConfig(ctx, in.VictorOpsConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "VictorOpsConfig[%d]", i)
}
victorOpsConfigs[i] = receiver
}
}
var pushoverConfigs []*pushoverConfig
if l := len(in.PushoverConfigs); l > 0 {
pushoverConfigs = make([]*pushoverConfig, l)
for i := range in.PushoverConfigs {
receiver, err := cg.convertPushoverConfig(ctx, in.PushoverConfigs[i], crKey)
if err != nil {
return nil, errors.Wrapf(err, "PushoverConfig[%d]", i)
}
pushoverConfigs[i] = receiver
}
}
return &receiver{
Name: makeNamespacedString(in.Name, crKey),
OpsgenieConfigs: opsgenieConfigs,
PagerdutyConfigs: pagerdutyConfigs,
SlackConfigs: slackConfigs,
WebhookConfigs: webhookConfigs,
WeChatConfigs: weChatConfigs,
EmailConfigs: emailConfigs,
VictorOpsConfigs: victorOpsConfigs,
PushoverConfigs: pushoverConfigs,
}, nil
}
func (cg *configGenerator) convertWebhookConfig(ctx context.Context, in monitoringv1alpha1.WebhookConfig, crKey types.NamespacedName) (*webhookConfig, error) {
out := &webhookConfig{
VSendResolved: in.SendResolved,
}
if in.URLSecret != nil {
url, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.URLSecret)
if err != nil {
return nil, errors.Errorf("failed to get key %q from secret %q", in.URLSecret.Key, in.URLSecret.Name)
}
out.URL = strings.TrimSpace(url)
} else if in.URL != nil {
out.URL = *in.URL
}
if in.HTTPConfig != nil {
httpConfig, err := cg.convertHTTPConfig(ctx, *in.HTTPConfig, crKey)
if err != nil {
return nil, err
}
out.HTTPConfig = httpConfig
}
if in.MaxAlerts > 0 {
out.MaxAlerts = in.MaxAlerts
}
return out, nil
}
func (cg *configGenerator) convertSlackConfig(ctx context.Context, in monitoringv1alpha1.SlackConfig, crKey types.NamespacedName) (*slackConfig, error) {
out := &slackConfig{
VSendResolved: in.SendResolved,
Channel: in.Channel,
Username: in.Username,
Color: in.Color,
Title: in.Title,
TitleLink: in.TitleLink,
Pretext: in.Pretext,
Text: in.Text,
ShortFields: in.ShortFields,
Footer: in.Footer,
Fallback: in.Fallback,
CallbackID: in.CallbackID,
IconEmoji: in.IconEmoji,
IconURL: in.IconURL,
ImageURL: in.ImageURL,
ThumbURL: in.ThumbURL,
LinkNames: in.LinkNames,
MrkdwnIn: in.MrkdwnIn,
}
if in.APIURL != nil {
url, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.APIURL)
if err != nil {
return nil, errors.Errorf("failed to get key %q from secret %q", in.APIURL.Key, in.APIURL.Name)
}
out.APIURL = strings.TrimSpace(url)
}
var actions []slackAction
if l := len(in.Actions); l > 0 {
actions = make([]slackAction, l)
for i, a := range in.Actions {
var action slackAction = slackAction{
Type: a.Type,
Text: a.Text,
URL: a.URL,
Style: a.Style,
Name: a.Name,
Value: a.Value,
}
if a.ConfirmField != nil {
action.ConfirmField = &slackConfirmationField{
Text: a.ConfirmField.Text,
Title: a.ConfirmField.Title,
OkText: a.ConfirmField.OkText,
DismissText: a.ConfirmField.DismissText,
}
}
actions[i] = action
}
out.Actions = actions
}
if l := len(in.Fields); l > 0 {
var fields []slackField = make([]slackField, l)
for i, f := range in.Fields {
var field slackField = slackField{
Title: f.Title,
Value: f.Value,
}
if f.Short != nil {
field.Short = *f.Short
}
fields[i] = field
}
out.Fields = fields
}
if in.HTTPConfig != nil {
httpConfig, err := cg.convertHTTPConfig(ctx, *in.HTTPConfig, crKey)
if err != nil {
return nil, err
}
out.HTTPConfig = httpConfig
}
return out, nil
}
func (cg *configGenerator) convertPagerdutyConfig(ctx context.Context, in monitoringv1alpha1.PagerDutyConfig, crKey types.NamespacedName) (*pagerdutyConfig, error) {
out := &pagerdutyConfig{
VSendResolved: in.SendResolved,
Class: in.Class,
Client: in.Client,
ClientURL: in.ClientURL,
Component: in.Component,
Description: in.Description,
Group: in.Group,
Severity: in.Severity,
URL: in.URL,
}
if in.RoutingKey != nil {
routingKey, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.RoutingKey)
if err != nil {
return nil, errors.Errorf("failed to get routing key %q from secret %q", in.RoutingKey.Key, in.RoutingKey.Name)
}
out.RoutingKey = routingKey
}
if in.ServiceKey != nil {
serviceKey, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.ServiceKey)
if err != nil {
return nil, errors.Errorf("failed to get service key %q from secret %q", in.ServiceKey.Key, in.ServiceKey.Name)
}
out.ServiceKey = serviceKey
}
var details map[string]string
if l := len(in.Details); l > 0 {
details = make(map[string]string, l)
for _, d := range in.Details {
details[d.Key] = d.Value
}
}
out.Details = details
var linkConfigs []pagerdutyLink
if l := len(in.PagerDutyLinkConfigs); l > 0 {
linkConfigs = make([]pagerdutyLink, l)
for i, lc := range in.PagerDutyLinkConfigs {
linkConfigs[i] = pagerdutyLink{
Href: lc.Href,
Text: lc.Text,
}
}
}
out.Links = linkConfigs
var imageConfig []pagerdutyImage
if l := len(in.PagerDutyImageConfigs); l > 0 {
imageConfig = make([]pagerdutyImage, l)
for i, ic := range in.PagerDutyImageConfigs {
imageConfig[i] = pagerdutyImage{
Src: ic.Src,
Alt: ic.Alt,
Href: ic.Href,
}
}
}
out.Images = imageConfig
if in.HTTPConfig != nil {
httpConfig, err := cg.convertHTTPConfig(ctx, *in.HTTPConfig, crKey)
if err != nil {
return nil, err
}
out.HTTPConfig = httpConfig
}
return out, nil
}
func (cg *configGenerator) convertOpsgenieConfig(ctx context.Context, in monitoringv1alpha1.OpsGenieConfig, crKey types.NamespacedName) (*opsgenieConfig, error) {
out := &opsgenieConfig{
VSendResolved: in.SendResolved,
APIURL: in.APIURL,
Message: in.Message,
Description: in.Description,
Source: in.Source,
Tags: in.Tags,
Note: in.Note,
Priority: in.Priority,
}
if in.APIKey != nil {
apiKey, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.APIKey)
if err != nil {
return nil, errors.Errorf("failed to get api key %q from secret %q", in.APIKey.Key, in.APIKey.Name)
}
out.APIKey = apiKey
}
var details map[string]string
if l := len(in.Details); l > 0 {
details = make(map[string]string, l)
for _, d := range in.Details {
details[d.Key] = d.Value
}
}
out.Details = details
var responders []opsgenieResponder
if l := len(in.Responders); l > 0 {
responders = make([]opsgenieResponder, 0, l)
for _, r := range in.Responders {
var responder opsgenieResponder = opsgenieResponder{
ID: r.ID,
Name: r.Name,
Username: r.Username,
Type: r.Type,
}
responders = append(responders, responder)
}
}
out.Responders = responders
if in.HTTPConfig != nil {
httpConfig, err := cg.convertHTTPConfig(ctx, *in.HTTPConfig, crKey)
if err != nil {
return nil, err
}
out.HTTPConfig = httpConfig
}
return out, nil
}
func (cg *configGenerator) convertWeChatConfig(ctx context.Context, in monitoringv1alpha1.WeChatConfig, crKey types.NamespacedName) (*weChatConfig, error) {
out := &weChatConfig{
VSendResolved: in.SendResolved,
APIURL: in.APIURL,
CorpID: in.CorpID,
AgentID: in.AgentID,
ToUser: in.ToUser,
ToParty: in.ToParty,
ToTag: in.ToTag,
Message: in.Message,
MessageType: in.MessageType,
}
if in.APISecret != nil {
apiSecret, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.APISecret)
if err != nil {
return nil, errors.Errorf("failed to get secret %q", in.APISecret)
}
out.APISecret = apiSecret
}
if in.HTTPConfig != nil {
httpConfig, err := cg.convertHTTPConfig(ctx, *in.HTTPConfig, crKey)
if err != nil {
return nil, err
}
out.HTTPConfig = httpConfig
}
return out, nil
}
func (cg *configGenerator) convertEmailConfig(ctx context.Context, in monitoringv1alpha1.EmailConfig, crKey types.NamespacedName) (*emailConfig, error) {
out := &emailConfig{
VSendResolved: in.SendResolved,
To: in.To,
From: in.From,
Hello: in.Hello,
AuthUsername: in.AuthUsername,
AuthIdentity: in.AuthIdentity,
HTML: in.HTML,
Text: in.Text,
RequireTLS: in.RequireTLS,
}
if in.Smarthost != "" {
out.Smarthost.Host, out.Smarthost.Port, _ = net.SplitHostPort(in.Smarthost)
}
if in.AuthPassword != nil {
authPassword, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.AuthPassword)
if err != nil {
return nil, errors.Errorf("failed to get secret %q", in.AuthPassword)
}
out.AuthPassword = authPassword
}
if in.AuthSecret != nil {
authSecret, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.AuthSecret)
if err != nil {
return nil, errors.Errorf("failed to get secret %q", in.AuthSecret)
}
out.AuthSecret = authSecret
}
if l := len(in.Headers); l > 0 {
headers := make(map[string]string, l)
for _, d := range in.Headers {
headers[strings.Title(d.Key)] = d.Value
}
out.Headers = headers
}
if in.TLSConfig != nil {
out.TLSConfig = cg.convertTLSConfig(ctx, in.TLSConfig, crKey)
}
return out, nil
}
func (cg *configGenerator) convertVictorOpsConfig(ctx context.Context, in monitoringv1alpha1.VictorOpsConfig, crKey types.NamespacedName) (*victorOpsConfig, error) {
out := &victorOpsConfig{
VSendResolved: in.SendResolved,
APIURL: in.APIURL,
RoutingKey: in.RoutingKey,
MessageType: in.MessageType,
EntityDisplayName: in.EntityDisplayName,
StateMessage: in.StateMessage,
MonitoringTool: in.MonitoringTool,
}
if in.APIKey != nil {
apiKey, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.APIKey)
if err != nil {
return nil, errors.Errorf("failed to get secret %q", in.APIKey)
}
out.APIKey = apiKey
}
var customFields map[string]string
if l := len(in.CustomFields); l > 0 {
// from https://github.com/prometheus/alertmanager/blob/a7f9fdadbecbb7e692d2cd8d3334e3d6de1602e1/config/notifiers.go#L497
reservedFields := map[string]struct{}{
"routing_key": {},
"message_type": {},
"state_message": {},
"entity_display_name": {},
"monitoring_tool": {},
"entity_id": {},
"entity_state": {},
}
customFields = make(map[string]string, l)
for _, d := range in.CustomFields {
if _, ok := reservedFields[d.Key]; ok {
return nil, errors.Errorf("VictorOps config contains custom field %s which cannot be used as it conflicts with the fixed/static fields", d.Key)
}
customFields[d.Key] = d.Value
}
}
out.CustomFields = customFields
if in.HTTPConfig != nil {
httpConfig, err := cg.convertHTTPConfig(ctx, *in.HTTPConfig, crKey)
if err != nil {
return nil, err
}
out.HTTPConfig = httpConfig
}
return out, nil
}
func (cg *configGenerator) convertPushoverConfig(ctx context.Context, in monitoringv1alpha1.PushoverConfig, crKey types.NamespacedName) (*pushoverConfig, error) {
out := &pushoverConfig{
VSendResolved: in.SendResolved,
Title: in.Title,
Message: in.Message,
URL: in.URL,
URLTitle: in.URLTitle,
Priority: in.Priority,
HTML: in.HTML,
}
{
userKey, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.UserKey)
if err != nil {
return nil, errors.Errorf("failed to get secret %q", in.UserKey)
}
if userKey == "" {
return nil, errors.Errorf("mandatory field %q is empty", "userKey")
}
out.UserKey = userKey
}
{
token, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.Token)
if err != nil {
return nil, errors.Errorf("failed to get secret %q", in.Token)
}
if token == "" {
return nil, errors.Errorf("mandatory field %q is empty", "token")
}
out.Token = token
}
{
if in.Retry != "" {
retry, _ := time.ParseDuration(in.Retry)
out.Retry = duration(retry)
}
if in.Expire != "" {
expire, _ := time.ParseDuration(in.Expire)
out.Expire = duration(expire)
}
}
if in.HTTPConfig != nil {
httpConfig, err := cg.convertHTTPConfig(ctx, *in.HTTPConfig, crKey)
if err != nil {
return nil, err
}
out.HTTPConfig = httpConfig
}
return out, nil
}
func (cg *configGenerator) convertInhibitRule(in *monitoringv1alpha1.InhibitRule, crKey types.NamespacedName) *inhibitRule {
matchersV2Allowed := cg.amVersion.GTE(semver.MustParse("0.22.0"))
var sourceMatchers []string
var targetMatchers []string
v2NamespaceMatcher := monitoringv1alpha1.Matcher{
Name: inhibitRuleNamespaceKey,
Value: crKey.Namespace,
MatchType: monitoringv1alpha1.MatchEqual,
}.String()
// todo (pgough) the following config are deprecated and can be removed when
// support matrix has reached >= 0.22.0
sourceMatch := map[string]string{}
sourceMatchRE := map[string]string{}
targetMatch := map[string]string{}
targetMatchRE := map[string]string{}
for _, sm := range in.SourceMatch {
// prefer matchers to deprecated syntax
if sm.MatchType != "" {
sourceMatchers = append(sourceMatchers, sm.String())
continue
}
if matchersV2Allowed {
if sm.Regex {
sourceMatchers = append(sourceMatchers, inhibitRuleRegexToV2(sm.Name, sm.Value))
} else {
sourceMatchers = append(sourceMatchers, inhibitRuleToV2(sm.Name, sm.Value))
}
continue
}
if sm.Regex {
sourceMatchRE[sm.Name] = sm.Value
} else {
sourceMatch[sm.Name] = sm.Value
}
}
delete(sourceMatchRE, inhibitRuleNamespaceKey)
if matchersV2Allowed {
if !contains(v2NamespaceMatcher, sourceMatchers) {
sourceMatchers = append(sourceMatchers, v2NamespaceMatcher)
}
delete(sourceMatch, inhibitRuleNamespaceKey)
} else {
sourceMatch[inhibitRuleNamespaceKey] = crKey.Namespace
}
for _, tm := range in.TargetMatch {
// prefer matchers to deprecated config
if tm.MatchType != "" {
targetMatchers = append(targetMatchers, tm.String())
continue
}
if matchersV2Allowed {
if tm.Regex {
targetMatchers = append(targetMatchers, inhibitRuleRegexToV2(tm.Name, tm.Value))
} else {
targetMatchers = append(targetMatchers, inhibitRuleToV2(tm.Name, tm.Value))
}
continue
}
if tm.Regex {
targetMatchRE[tm.Name] = tm.Value
} else {
targetMatch[tm.Name] = tm.Value
}
}
delete(targetMatchRE, inhibitRuleNamespaceKey)
if matchersV2Allowed {
if !contains(v2NamespaceMatcher, targetMatchers) {
targetMatchers = append(targetMatchers, v2NamespaceMatcher)
}
delete(targetMatch, inhibitRuleNamespaceKey)
} else {
targetMatch[inhibitRuleNamespaceKey] = crKey.Namespace
}
return &inhibitRule{
SourceMatch: sourceMatch,
SourceMatchRE: sourceMatchRE,
SourceMatchers: sourceMatchers,
TargetMatch: targetMatch,
TargetMatchRE: targetMatchRE,
TargetMatchers: targetMatchers,
Equal: in.Equal,
}
}
func convertMuteTimeInterval(in *monitoringv1alpha1.MuteTimeInterval, crKey types.NamespacedName) (*muteTimeInterval, error) {
muteTimeInterval := &muteTimeInterval{}
for _, timeInterval := range in.TimeIntervals {
ti := timeinterval.TimeInterval{}
for _, time := range timeInterval.Times {
parsedTime, err := time.Parse()
if err != nil {
return nil, err
}
ti.Times = append(ti.Times, timeinterval.TimeRange{
StartMinute: parsedTime.Start,
EndMinute: parsedTime.End,
})
}
for _, wd := range timeInterval.Weekdays {
parsedWeekday, err := wd.Parse()
if err != nil {
return nil, err
}
ti.Weekdays = append(ti.Weekdays, timeinterval.WeekdayRange{
InclusiveRange: timeinterval.InclusiveRange{
Begin: parsedWeekday.Start,
End: parsedWeekday.End,
},
})
}
for _, dom := range timeInterval.DaysOfMonth {
ti.DaysOfMonth = append(ti.DaysOfMonth, timeinterval.DayOfMonthRange{
InclusiveRange: timeinterval.InclusiveRange{
Begin: dom.Start,
End: dom.End,
},
})
}
for _, month := range timeInterval.Months {
parsedMonth, err := month.Parse()
if err != nil {
return nil, err
}
ti.Months = append(ti.Months, timeinterval.MonthRange{
InclusiveRange: timeinterval.InclusiveRange{
Begin: parsedMonth.Start,
End: parsedMonth.End,
},
})
}
for _, year := range timeInterval.Years {
parsedYear, err := year.Parse()
if err != nil {
return nil, err
}
ti.Years = append(ti.Years, timeinterval.YearRange{
InclusiveRange: timeinterval.InclusiveRange{
Begin: parsedYear.Start,
End: parsedYear.End,
},
})
}
muteTimeInterval.Name = makeNamespacedString(in.Name, crKey)
muteTimeInterval.TimeIntervals = append(muteTimeInterval.TimeIntervals, ti)
}
return muteTimeInterval, nil
}
func makeNamespacedString(in string, crKey types.NamespacedName) string {
if in == "" {
return ""
}
return crKey.Namespace + "-" + crKey.Name + "-" + in
}
func (cg *configGenerator) convertHTTPConfig(ctx context.Context, in monitoringv1alpha1.HTTPConfig, crKey types.NamespacedName) (*httpClientConfig, error) {
out := &httpClientConfig{
ProxyURL: in.ProxyURL,
}
if in.BasicAuth != nil {
username, err := cg.store.GetSecretKey(ctx, crKey.Namespace, in.BasicAuth.Username)
if err != nil {
return nil, errors.Errorf("failed to get BasicAuth username key %q from secret %q", in.BasicAuth.Username.Key, in.BasicAuth.Username.Name)
}
password, err := cg.store.GetSecretKey(ctx, crKey.Namespace, in.BasicAuth.Password)
if err != nil {
return nil, errors.Errorf("failed to get BasicAuth password key %q from secret %q", in.BasicAuth.Password.Key, in.BasicAuth.Password.Name)
}
if username != "" || password != "" {
out.BasicAuth = &basicAuth{Username: username, Password: password}
}
}
if in.Authorization != nil {
credentials, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.Authorization.Credentials)
if err != nil {
return nil, errors.Errorf("failed to get Authorization credentials key %q from secret %q", in.Authorization.Credentials.Key, in.Authorization.Credentials.Name)
}
if credentials != "" {
authorizationType := in.Authorization.Type
if authorizationType == "" {
authorizationType = "Bearer"
}
out.Authorization = &authorization{Type: authorizationType, Credentials: credentials}
}
}
if in.TLSConfig != nil {
out.TLSConfig = cg.convertTLSConfig(ctx, in.TLSConfig, crKey)
}
if in.BearerTokenSecret != nil {
bearerToken, err := cg.store.GetSecretKey(ctx, crKey.Namespace, *in.BearerTokenSecret)
if err != nil {
return nil, errors.Errorf("failed to get bearer token key %q from secret %q", in.BearerTokenSecret.Key, in.BearerTokenSecret.Name)
}
out.BearerToken = bearerToken
}
return out, nil
}
func (cg *configGenerator) convertTLSConfig(ctx context.Context, in *monitoringv1.SafeTLSConfig, crKey types.NamespacedName) tlsConfig {
out := tlsConfig{
ServerName: in.ServerName,
InsecureSkipVerify: in.InsecureSkipVerify,
}
if in.CA != (monitoringv1.SecretOrConfigMap{}) {
out.CAFile = path.Join(tlsAssetsDir, assets.TLSAssetKeyFromSelector(crKey.Namespace, in.CA).String())
}
if in.Cert != (monitoringv1.SecretOrConfigMap{}) {
out.CertFile = path.Join(tlsAssetsDir, assets.TLSAssetKeyFromSelector(crKey.Namespace, in.Cert).String())
}
if in.KeySecret != nil {
out.KeyFile = path.Join(tlsAssetsDir, assets.TLSAssetKeyFromSecretSelector(crKey.Namespace, in.KeySecret).String())
}
return out
}
// sanitize the config against a specific AlertManager version
// types may be sanitized in one of two ways:
// 1. stripping the unsupported config and log a warning
// 2. error which ensures that config will not be reconciled - this will be logged by a calling function
func (c *alertmanagerConfig) sanitize(amVersion semver.Version, logger log.Logger) error {
if c == nil {
return nil
}
c.Global.sanitize(amVersion, logger)
for _, receiver := range c.Receivers {
receiver.sanitize(amVersion, logger)
}
for i, rule := range c.InhibitRules {
if err := rule.sanitize(amVersion, logger); err != nil {
return errors.Wrapf(err, "inhibit_rules[%d]", i)
}
}
if len(c.MuteTimeIntervals) > 0 && !amVersion.GTE(semver.MustParse("0.22.0")) {
// mute time intervals are unsupported < 0.22.0, and we already log the situation
// when handling the routes so just set to nil
c.MuteTimeIntervals = nil
}
return c.Route.sanitize(amVersion, logger)
}
// sanitize globalConfig
func (gc *globalConfig) sanitize(amVersion semver.Version, logger log.Logger) {
if gc == nil {
return
}
if gc.HTTPConfig != nil {
gc.HTTPConfig.sanitize(amVersion, logger)
}
// We need to sanitize the config for slack globally
// As of v0.22.0 AlertManager config supports passing URL via file name
fileURLAllowed := amVersion.GTE(semver.MustParse("0.22.0"))
if gc.SlackAPIURLFile != "" {
if gc.SlackAPIURL != nil {
msg := "'slack_api_url' and 'slack_api_url_file' are mutually exclusive - 'slack_api_url' has taken precedence"
level.Warn(logger).Log("msg", msg)
gc.SlackAPIURLFile = ""
}
if !fileURLAllowed {
msg := "'slack_api_url_file' supported in AlertManager >= 0.22.0 only - dropping field from provided config"
level.Warn(logger).Log("msg", msg, "current_version", amVersion.String())
gc.SlackAPIURLFile = ""
}
}
}
// sanitize httpClientConfig
func (hc *httpClientConfig) sanitize(amVersion semver.Version, logger log.Logger) {
if hc == nil {
return
}
// we don't need to do any sanitization in this case and return early
if hc.Authorization == nil {
return
}
if hc.BasicAuth != nil {
msg := "'basicAuth' and 'authorization' are mutually exclusive, 'basicAuth' has taken precedence"
level.Warn(logger).Log("msg", msg)
hc.Authorization = nil
}
// we could have returned here but useful to grab the log and bubble up the warning
if httpAuthzAllowed := amVersion.GTE(semver.MustParse("0.22.0")); !httpAuthzAllowed {
msg := "'authorization' set in 'http_config' but supported in AlertManager >= 0.22.0 only - dropping field from provided config"
level.Warn(logger).Log("msg", msg, "current_version", amVersion.String())
hc.Authorization = nil
}
}
// sanitize the receiver
func (r *receiver) sanitize(amVersion semver.Version, logger log.Logger) {
if r == nil {
return
}
withLogger := log.With(logger, "receiver", r.Name)
for _, conf := range r.OpsgenieConfigs {
conf.sanitize(amVersion, withLogger)
}
for _, conf := range r.PagerdutyConfigs {
conf.sanitize(amVersion, withLogger)
}
for _, conf := range r.PagerdutyConfigs {
conf.sanitize(amVersion, withLogger)
}
for _, conf := range r.PushoverConfigs {
conf.sanitize(amVersion, withLogger)
}
for _, conf := range r.SlackConfigs {
conf.sanitize(amVersion, withLogger)
}
for _, conf := range r.VictorOpsConfigs {
conf.sanitize(amVersion, withLogger)
}
for _, conf := range r.WebhookConfigs {
conf.sanitize(amVersion, withLogger)
}
for _, conf := range r.WeChatConfigs {
conf.sanitize(amVersion, withLogger)
}
}
func (ogc *opsgenieConfig) sanitize(amVersion semver.Version, logger log.Logger) {
ogc.HTTPConfig.sanitize(amVersion, logger)
}
func (pdc *pagerdutyConfig) sanitize(amVersion semver.Version, logger log.Logger) {
pdc.HTTPConfig.sanitize(amVersion, logger)
}
func (poc *pushoverConfig) sanitize(amVersion semver.Version, logger log.Logger) {
poc.HTTPConfig.sanitize(amVersion, logger)
}
func (sc *slackConfig) sanitize(amVersion semver.Version, logger log.Logger) {
sc.HTTPConfig.sanitize(amVersion, logger)
if sc.APIURLFile == "" {
return
}
// We need to sanitize the config for slack receivers
// As of v0.22.0 AlertManager config supports passing URL via file name
fileURLAllowed := amVersion.GTE(semver.MustParse("0.22.0"))
if sc.APIURL != "" {
msg := "'api_url' and 'api_url_file' are mutually exclusive for slack receiver config - 'api_url' has taken precedence"
level.Warn(logger).Log("msg", msg)
sc.APIURLFile = ""
}
if !fileURLAllowed {
msg := "'api_url_file' supported in AlertManager >= 0.22.0 only - dropping field from provided config"
level.Warn(logger).Log("msg", msg, "current_version", amVersion.String())
sc.APIURLFile = ""
}
}
func (voc *victorOpsConfig) sanitize(amVersion semver.Version, logger log.Logger) {
voc.HTTPConfig.sanitize(amVersion, logger)
}
func (whc *webhookConfig) sanitize(amVersion semver.Version, logger log.Logger) {
whc.HTTPConfig.sanitize(amVersion, logger)
}
func (wcc *weChatConfig) sanitize(amVersion semver.Version, logger log.Logger) {
wcc.HTTPConfig.sanitize(amVersion, logger)
}
func (ir *inhibitRule) sanitize(amVersion semver.Version, logger log.Logger) error {
matchersV2Allowed := amVersion.GTE(semver.MustParse("0.22.0"))
if !matchersV2Allowed {
// check if rule has provided invalid syntax and error if true
if checkNotEmptyStrSlice(ir.SourceMatchers, ir.TargetMatchers) {
msg := fmt.Sprintf(`target_matchers and source_matchers matching is supported in Alertmanager >= 0.22.0 only (target_matchers=%v, source_matchers=%v)`, ir.TargetMatchers, ir.SourceMatchers)
return errors.New(msg)
}
return nil
}
// we log a warning if the rule continues to use deprecated values in addition
// to the namespace label we have injected - but we won't convert these
if checkNotEmptyMap(ir.SourceMatch, ir.TargetMatch, ir.SourceMatchRE, ir.TargetMatchRE) {
msg := "inhibit rule is using a deprecated match syntax which will be removed in future versions"
level.Warn(logger).Log("msg", msg, "source_match", ir.SourceMatch, "target_match", ir.TargetMatch, "source_match_re", ir.SourceMatchRE, "target_match_re", ir.TargetMatchRE)
}
// ensure empty data structures are assigned nil so their yaml output is sanitized
ir.TargetMatch = convertMapToNilIfEmpty(ir.TargetMatch)
ir.TargetMatchRE = convertMapToNilIfEmpty(ir.TargetMatchRE)
ir.SourceMatch = convertMapToNilIfEmpty(ir.SourceMatch)
ir.SourceMatchRE = convertMapToNilIfEmpty(ir.SourceMatchRE)
ir.TargetMatchers = convertSliceToNilIfEmpty(ir.TargetMatchers)
ir.SourceMatchers = convertSliceToNilIfEmpty(ir.SourceMatchers)
ir.Equal = convertSliceToNilIfEmpty(ir.Equal)
return nil
}
// sanitize a route and all its child routes.
// Warns if the config is using deprecated syntax against a later version.
// Returns an error if the config could potentially break routing logic
func (r *route) sanitize(amVersion semver.Version, logger log.Logger) error {
if r == nil {
return nil
}
matchersV2Allowed := amVersion.GTE(semver.MustParse("0.22.0"))
muteTimeIntervalsAllowed := matchersV2Allowed
withLogger := log.With(logger, "receiver", r.Receiver)
if !matchersV2Allowed && checkNotEmptyStrSlice(r.Matchers) {
return fmt.Errorf(`invalid syntax in route config for 'matchers' comparison based matching is supported in Alertmanager >= 0.22.0 only (matchers=%v)`, r.Matchers)
}
if matchersV2Allowed && checkNotEmptyMap(r.Match, r.MatchRE) {
msg := "'matchers' field is using a deprecated syntax which will be removed in future versions"
level.Warn(withLogger).Log("msg", msg, "match", r.Match, "match_re", r.MatchRE)
}
if !muteTimeIntervalsAllowed {
msg := "named mute time intervals in route is supported in Alertmanager >= 0.22.0 only - dropping config"
level.Warn(withLogger).Log("msg", msg, "mute_time_intervals", r.MuteTimeIntervals)
r.MuteTimeIntervals = nil
}
for i, child := range r.Routes {
if err := child.sanitize(amVersion, logger); err != nil {
return errors.Wrapf(err, "route[%d]", i)
}
}
// Set to nil if empty so that it doesn't show up in the resulting yaml.
r.Match = convertMapToNilIfEmpty(r.Match)
r.MatchRE = convertMapToNilIfEmpty(r.MatchRE)
r.Matchers = convertSliceToNilIfEmpty(r.Matchers)
return nil
}
func checkNotEmptyMap(in ...map[string]string) bool {
for _, input := range in {
if len(input) > 0 {
return true
}
}
return false
}
func checkNotEmptyStrSlice(in ...[]string) bool {
for _, input := range in {
if len(input) > 0 {
return true
}
}
return false
}
func convertMapToNilIfEmpty(in map[string]string) map[string]string {
if len(in) > 0 {
return in
}
return nil
}
func convertSliceToNilIfEmpty(in []string) []string {
if len(in) > 0 {
return in
}
return nil
}
// contains will return true if any slice value with all whitespace removed
// is equal to the provided value with all whitespace removed
func contains(value string, in []string) bool {
for _, str := range in {
if strings.ReplaceAll(value, " ", "") == strings.ReplaceAll(str, " ", "") {
return true
}
}
return false
}
func inhibitRuleToV2(name, value string) string {
return monitoringv1alpha1.Matcher{
Name: name,
Value: value,
MatchType: monitoringv1alpha1.MatchEqual,
}.String()
}
func inhibitRuleRegexToV2(name, value string) string {
return monitoringv1alpha1.Matcher{
Name: name,
Value: value,
MatchType: monitoringv1alpha1.MatchRegexp,
}.String()
}
func checkIsV2Matcher(in ...[]monitoringv1alpha1.Matcher) bool {
for _, input := range in {
for _, matcher := range input {
if matcher.MatchType != "" {
return true
}
}
}
return false
}
| 1 | 17,233 | since there are several places where we do 1) get secret key ref and 2) validate URL, maybe it's worth having a common method? it could also trim spaces as done here (but not at the other places currently). | prometheus-operator-prometheus-operator | go |
@@ -21,11 +21,11 @@ import (
func TestExtendPartialURL(t *testing.T) {
want := "projects/foo/zones/bar/disks/baz"
- if s := extendPartialURL("zones/bar/disks/baz", "foo"); s != want {
+ if s := normalizeToPartialURL("zones/bar/disks/baz", "foo"); s != want {
t.Errorf("got: %q, want: %q", s, want)
}
- if s := extendPartialURL("projects/foo/zones/bar/disks/baz", "gaz"); s != want {
+ if s := normalizeToPartialURL("projects/foo/zones/bar/disks/baz", "gaz"); s != want {
t.Errorf("got: %q, want %q", s, want)
}
} | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"testing"
)
func TestExtendPartialURL(t *testing.T) {
want := "projects/foo/zones/bar/disks/baz"
if s := extendPartialURL("zones/bar/disks/baz", "foo"); s != want {
t.Errorf("got: %q, want: %q", s, want)
}
if s := extendPartialURL("projects/foo/zones/bar/disks/baz", "gaz"); s != want {
t.Errorf("got: %q, want %q", s, want)
}
}
func TestResourcePopulate(t *testing.T) {
w := testWorkflow()
s, _ := w.NewStep("foo")
name := "name"
genName := w.genName(name)
tests := []struct {
desc string
r, wantR Resource
zone, wantName, wantZone string
wantErr bool
}{
{"defaults case", Resource{}, Resource{daisyName: name, RealName: genName, Project: w.Project}, "", genName, w.Zone, false},
{"nondefaults case", Resource{Project: "pfoo"}, Resource{daisyName: name, RealName: genName, Project: "pfoo"}, "zfoo", genName, "zfoo", false},
{"ExactName case", Resource{ExactName: true}, Resource{daisyName: name, RealName: name, Project: w.Project, ExactName: true}, "", name, w.Zone, false},
{"RealName case", Resource{RealName: "foo"}, Resource{daisyName: name, RealName: "foo", Project: w.Project}, "", "foo", w.Zone, false},
{"RealName and ExactName error case", Resource{RealName: "foo", ExactName: true}, Resource{}, "", "", "", true},
}
for _, tt := range tests {
gotName, gotZone, err := tt.r.populateWithZone(context.Background(), s, name, tt.zone)
if tt.wantErr && err == nil {
t.Errorf("%s: should have returned an error but didn't", tt.desc)
} else if !tt.wantErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
} else if err == nil {
if diffRes := diff(tt.r, tt.wantR, 0); diffRes != "" {
t.Errorf("%s: populated Resource does not match expectation: (-got +want)\n%s", tt.desc, diffRes)
}
if gotName != tt.wantName {
t.Errorf("%s: name population wrong; got: %q, want: %q", tt.desc, gotName, tt.wantName)
}
if gotZone != tt.wantZone {
t.Errorf("%s: zone population wrong; got: %q, want: %q", tt.desc, gotZone, tt.wantZone)
}
}
}
}
func TestResourceNameHelper(t *testing.T) {
w := testWorkflow()
want := w.genName("foo")
got := resourceNameHelper("foo", w, false)
if got != want {
t.Errorf("%q != %q", got, want)
}
want = "foo"
got = resourceNameHelper("foo", w, true)
if got != want {
t.Errorf("%q != %q", got, want)
}
}
func TestResourceValidate(t *testing.T) {
w := testWorkflow()
s, _ := w.NewStep("foo")
tests := []struct {
desc string
r Resource
wantErr bool
}{
{"good case", Resource{RealName: "good", Project: testProject}, false},
{"bad name case", Resource{RealName: "bad!", Project: testProject}, true},
{"bad project case", Resource{RealName: "good", Project: "bad!"}, true},
{"project DNE case", Resource{RealName: "good", Project: DNE}, true},
}
for _, tt := range tests {
err := tt.r.validate(context.Background(), s, "prefix")
if tt.wantErr && err == nil {
t.Errorf("%s: should have returned an error but didn't", tt.desc)
} else if !tt.wantErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
func TestResourceValidateWithZone(t *testing.T) {
w := testWorkflow()
s, _ := w.NewStep("foo")
tests := []struct {
desc, zone string
wantErr bool
}{
{"good case", testZone, false},
{"bad zone case", "bad!", true},
{"zone DNE case", DNE, true},
}
for _, tt := range tests {
r := Resource{RealName: "goodname", Project: w.Project}
err := r.validateWithZone(context.Background(), s, tt.zone, "prefix")
if tt.wantErr && err == nil {
t.Errorf("%s: should have returned an error but didn't", tt.desc)
} else if !tt.wantErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
| 1 | 9,917 | TestNormalize... I would also split them | GoogleCloudPlatform-compute-image-tools | go |
@@ -49,7 +49,7 @@ def readObjects(obj):
_startGenerator(readObjectsHelper_generator(obj))
def generateObjectSubtreeSpeech(obj,indexGen):
- index=indexGen.next()
+ index=next(indexGen)
speech.speakObject(obj,reason=controlTypes.REASON_SAYALL,index=index)
yield obj,index
child=obj.simpleFirstChild | 1 | #sayAllHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2012 NVDA Contributors
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import itertools
import queueHandler
import config
import speech
import textInfos
import globalVars
import api
import tones
import time
import controlTypes
CURSOR_CARET=0
CURSOR_REVIEW=1
_generatorID = None
lastSayAllMode=None
def _startGenerator(generator):
global _generatorID
stop()
_generatorID = queueHandler.registerGeneratorObject(generator)
def stop():
"""Stop say all if a say all is in progress.
"""
global _generatorID
if _generatorID is None:
return
queueHandler.cancelGeneratorObject(_generatorID)
_generatorID = None
def isRunning():
"""Determine whether say all is currently running.
@return: C{True} if say all is currently running, C{False} if not.
@rtype: bool
@note: If say all completes and there is no call to L{stop} (which is called from L{speech.cancelSpeech}), this will incorrectly return C{True}.
This should not matter, but is worth noting nevertheless.
"""
global _generatorID
return _generatorID is not None
def readObjects(obj):
_startGenerator(readObjectsHelper_generator(obj))
def generateObjectSubtreeSpeech(obj,indexGen):
index=indexGen.next()
speech.speakObject(obj,reason=controlTypes.REASON_SAYALL,index=index)
yield obj,index
child=obj.simpleFirstChild
while child:
childSpeech=generateObjectSubtreeSpeech(child,indexGen)
for r in childSpeech:
yield r
child=child.simpleNext
def readObjectsHelper_generator(obj):
lastSentIndex=0
lastReceivedIndex=0
speechGen=generateObjectSubtreeSpeech(obj,itertools.count())
objIndexMap={}
keepReading=True
while True:
# lastReceivedIndex might be None if other speech was interspersed with this say all.
# In this case, we want to send more text in case this was the last chunk spoken.
if lastReceivedIndex is None or (lastSentIndex-lastReceivedIndex)<=1:
if keepReading:
try:
o,lastSentIndex=speechGen.next()
except StopIteration:
keepReading=False
continue
objIndexMap[lastSentIndex]=o
receivedIndex=speech.getLastSpeechIndex()
if receivedIndex!=lastReceivedIndex and (lastReceivedIndex!=0 or receivedIndex!=None):
lastReceivedIndex=receivedIndex
lastReceivedObj=objIndexMap.get(lastReceivedIndex)
if lastReceivedObj is not None:
api.setNavigatorObject(lastReceivedObj, isFocus=lastSayAllMode==CURSOR_CARET)
#Clear old objects from the map
for i in objIndexMap.keys():
if i<=lastReceivedIndex:
del objIndexMap[i]
while speech.isPaused:
yield
yield
def readText(cursor):
global lastSayAllMode
lastSayAllMode=cursor
_startGenerator(readTextHelper_generator(cursor))
def readTextHelper_generator(cursor):
if cursor==CURSOR_CARET:
try:
reader=api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
except (NotImplementedError, RuntimeError):
return
else:
reader=api.getReviewPosition()
lastSentIndex=0
lastReceivedIndex=0
cursorIndexMap={}
keepReading=True
speakTextInfoState=speech.SpeakTextInfoState(reader.obj)
with SayAllProfileTrigger():
while True:
if not reader.obj:
# The object died, so we should too.
return
# lastReceivedIndex might be None if other speech was interspersed with this say all.
# In this case, we want to send more text in case this was the last chunk spoken.
if lastReceivedIndex is None or (lastSentIndex-lastReceivedIndex)<=10:
if keepReading:
bookmark=reader.bookmark
index=lastSentIndex+1
delta=reader.move(textInfos.UNIT_READINGCHUNK,1,endPoint="end")
if delta<=0:
speech.speakWithoutPauses(None)
keepReading=False
continue
speech.speakTextInfo(reader,unit=textInfos.UNIT_READINGCHUNK,reason=controlTypes.REASON_SAYALL,index=index,useCache=speakTextInfoState)
lastSentIndex=index
cursorIndexMap[index]=(bookmark,speakTextInfoState.copy())
try:
reader.collapse(end=True)
except RuntimeError: #MS Word when range covers end of document
# Word specific: without this exception to indicate that further collapsing is not posible, say-all could enter an infinite loop.
speech.speakWithoutPauses(None)
keepReading=False
else:
# We'll wait for speech to catch up a bit before sending more text.
if speech.speakWithoutPauses.lastSentIndex is None or (lastSentIndex-speech.speakWithoutPauses.lastSentIndex)>=10:
# There is a large chunk of pending speech
# Force speakWithoutPauses to send text to the synth so we can move on.
speech.speakWithoutPauses(None)
receivedIndex=speech.getLastSpeechIndex()
if receivedIndex!=lastReceivedIndex and (lastReceivedIndex!=0 or receivedIndex!=None):
lastReceivedIndex=receivedIndex
bookmark,state=cursorIndexMap.get(receivedIndex,(None,None))
if state:
state.updateObj()
if bookmark is not None:
updater=reader.obj.makeTextInfo(bookmark)
if cursor==CURSOR_CARET:
updater.updateCaret()
if cursor!=CURSOR_CARET or config.conf["reviewCursor"]["followCaret"]:
api.setReviewPosition(updater, isCaret=cursor==CURSOR_CARET)
elif not keepReading and lastReceivedIndex==lastSentIndex:
# All text has been sent to the synth.
# Turn the page and start again if the object supports it.
if isinstance(reader.obj,textInfos.DocumentWithPageTurns):
try:
reader.obj.turnPage()
except RuntimeError:
break
else:
reader=reader.obj.makeTextInfo(textInfos.POSITION_FIRST)
keepReading=True
else:
break
while speech.isPaused:
yield
yield
# Wait until the synth has actually finished speaking.
# Otherwise, if there is a triggered profile with a different synth,
# we will switch too early and truncate speech (even up to several lines).
# Send another index and wait for it.
index=lastSentIndex+1
speech.speak([speech.IndexCommand(index)])
while speech.getLastSpeechIndex()<index:
yield
yield
# Some synths say they've handled the index slightly sooner than they actually have,
# so wait a bit longer.
for i in xrange(30):
yield
class SayAllProfileTrigger(config.ProfileTrigger):
"""A configuration profile trigger for when say all is in progress.
"""
spec = "sayAll"
| 1 | 25,074 | All changes to this file are going to conflict with #7599. Please revert these as well. They will be addressed during the Python 3 transition. | nvaccess-nvda | py |
@@ -89,8 +89,10 @@ func (l *twoRandomChoicesList) Remove(peer peer.StatusPeer, _ peer.Identifier, p
}
func (l *twoRandomChoicesList) Choose(_ *transport.Request) peer.StatusPeer {
- l.m.RLock()
- defer l.m.RUnlock()
+ // Usage of a wite lock because r.random.Intn is not thread safe
+ // see: https://golang.org/pkg/math/rand/
+ l.m.Lock()
+ defer l.m.Unlock()
numSubs := len(l.subscribers)
if numSubs == 0 { | 1 | // Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tworandomchoices
import (
"math/rand"
"sync"
"time"
"go.uber.org/atomic"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/peer/abstractlist"
)
type twoRandomChoicesList struct {
subscribers []*subscriber
random *rand.Rand
m sync.RWMutex
}
// Option configures the peer list implementation constructor.
type Option interface {
apply(*options)
}
type options struct{}
// NewImplementation creates a new fewest pending heap
// abstractlist.Implementation.
//
// Use this constructor instead of NewList, when wanting to do custom peer
// connection management.
func NewImplementation(opts ...Option) abstractlist.Implementation {
return newTwoRandomChoicesList(10, rand.NewSource(time.Now().UnixNano()))
}
func newTwoRandomChoicesList(cap int, source rand.Source) *twoRandomChoicesList {
return &twoRandomChoicesList{
subscribers: make([]*subscriber, 0, cap),
random: rand.New(source),
}
}
func (l *twoRandomChoicesList) Add(peer peer.StatusPeer, _ peer.Identifier) abstractlist.Subscriber {
l.m.Lock()
defer l.m.Unlock()
index := len(l.subscribers)
l.subscribers = append(l.subscribers, &subscriber{
index: index,
peer: peer,
})
return l.subscribers[index]
}
func (l *twoRandomChoicesList) Remove(peer peer.StatusPeer, _ peer.Identifier, ps abstractlist.Subscriber) {
l.m.Lock()
defer l.m.Unlock()
sub, ok := ps.(*subscriber)
if !ok || len(l.subscribers) == 0 {
return
}
index := sub.index
last := len(l.subscribers) - 1
l.subscribers[index] = l.subscribers[last]
l.subscribers[index].index = index
l.subscribers = l.subscribers[0:last]
}
func (l *twoRandomChoicesList) Choose(_ *transport.Request) peer.StatusPeer {
l.m.RLock()
defer l.m.RUnlock()
numSubs := len(l.subscribers)
if numSubs == 0 {
return nil
}
if numSubs == 1 {
return l.subscribers[0].peer
}
i := l.random.Intn(numSubs)
j := i + 1 + l.random.Intn(numSubs-1)
if j >= numSubs {
j -= numSubs
}
if l.subscribers[i].pending.Load() > l.subscribers[j].pending.Load() {
i = j
}
return l.subscribers[i].peer
}
type subscriber struct {
index int
peer peer.StatusPeer
pending atomic.Int32
}
var _ abstractlist.Subscriber = (*subscriber)(nil)
func (s *subscriber) UpdatePendingRequestCount(pendingRequestCount int) {
s.pending.Store(int32(pendingRequestCount))
}
| 1 | 19,619 | Just thought, using spinlock (busy wait with compare and swap) that generates 2 random numbers inside rlock. We know that collisions are rare and we don't need that "large" write lock really but lock namely for "rand". Or least use a separate Lock (Mutex), namely for "rand" (wrap it into method). Mutex will use some sort of spinlock as well but it may use some linked list to make it more fair. | yarpc-yarpc-go | go |
@@ -2,6 +2,11 @@
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import re
+import sys
+
+from astroid.__pkginfo__ import version as astroid_version
+
+from pylint.__pkginfo__ import version as pylint_version
# Allow stopping after the first semicolon/hash encountered,
# so that an option can be continued with the reasons | 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import re
# Allow stopping after the first semicolon/hash encountered,
# so that an option can be continued with the reasons
# why it is active or disabled.
OPTION_RGX = re.compile(r"\s*#.*\bpylint:\s*([^;#]+)[;#]{0,1}")
PY_EXTS = (".py", ".pyc", ".pyo", ".pyw", ".so", ".dll")
MSG_STATE_CONFIDENCE = 2
_MSG_ORDER = "EWRCIF"
MSG_STATE_SCOPE_CONFIG = 0
MSG_STATE_SCOPE_MODULE = 1
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = "FR"
MSG_TYPES = {
"I": "info",
"C": "convention",
"R": "refactor",
"W": "warning",
"E": "error",
"F": "fatal",
}
MSG_TYPES_LONG = {v: k for k, v in MSG_TYPES.items()}
MSG_TYPES_STATUS = {"I": 0, "C": 16, "R": 8, "W": 4, "E": 2, "F": 1}
# You probably don't want to change the MAIN_CHECKER_NAME
# This would affect rcfile generation and retro-compatibility
# on all project using [MASTER] in their rcfile.
MAIN_CHECKER_NAME = "master"
class WarningScope:
LINE = "line-based-msg"
NODE = "node-based-msg"
| 1 | 11,953 | Can we grab it directly from `__pkginfo__` as that is the source of truth for the version? | PyCQA-pylint | py |
@@ -19,6 +19,7 @@ package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
+
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
import java.net.URI;
public class GridData implements DataFetcher {
private final Distributor distributor;
private final URI publicUri;
public GridData(Distributor distributor, URI publicUri) {
this.distributor = Require.nonNull("Distributor", distributor);
this.publicUri = Require.nonNull("Grid's public URI", publicUri);
}
@Override
public Object get(DataFetchingEnvironment environment) {
return new Grid(distributor, publicUri);
}
}
| 1 | 17,791 | We can revert this to reduce the diff of the PR. | SeleniumHQ-selenium | java |
@@ -94,7 +94,7 @@ class BazelBuildFileView {
goPkg = goPkg.replaceFirst("cloud\\/", "");
String goImport = "";
- if (isCloud) {
+ if (isCloud || protoPkg.contains("cloud")) {
goImport = "cloud.google.com/go/";
goPkg = goPkg.replaceFirst("v(.+);", "apiv$1;");
} else { | 1 | package com.google.api.codegen.bazel;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Pattern;
class BazelBuildFileView {
private static final Pattern LABEL_NAME = Pattern.compile(":\\w+$");
private final Map<String, String> tokens = new HashMap<>();
BazelBuildFileView(ApiVersionedDir bp) {
if (bp.getProtoPackage() == null) {
return;
}
tokens.put("name", bp.getName());
tokens.put("assembly_name", bp.getAssemblyName());
tokens.put("proto_srcs", joinSetWithIndentation(bp.getProtos()));
tokens.put("version", bp.getVersion());
tokens.put("package", bp.getProtoPackage());
String packPrefix = bp.getProtoPackage().replace(".", "/") + '/';
Set<String> actualImports = new TreeSet<>();
for (String imp : bp.getImports()) {
if (imp.startsWith(packPrefix) && imp.indexOf('/', packPrefix.length()) == -1) {
// Ignore imports from same package, as all protos in same package are put in same
// proto_library target.
continue;
}
String actualImport = imp.replace(".proto", "_proto");
if (actualImport.startsWith("google/protobuf/")) {
actualImport = actualImport.replace("google/protobuf/", "@com_google_protobuf//:");
} else {
actualImport = convertPathToLabel("", actualImport);
}
actualImports.add(actualImport);
}
tokens.put("proto_deps", joinSetWithIndentation(actualImports));
tokens.put("go_proto_importpath", bp.getLangProtoPackages().get("go").split(";")[0]);
tokens.put("go_proto_deps", joinSetWithIndentation(mapGoProtoDeps(actualImports)));
if (bp.getGapicYamlPath() == null) {
return;
}
String serviceConfigJson = bp.getServiceConfigJsonPath();
if (serviceConfigJson == null) {
serviceConfigJson = "";
}
tokens.put("grpc_service_config", convertPathToLabel(bp.getProtoPackage(), serviceConfigJson));
tokens.put("gapic_yaml", convertPathToLabel(bp.getProtoPackage(), bp.getGapicYamlPath()));
tokens.put("service_yaml", convertPathToLabel(bp.getProtoPackage(), bp.getServiceYamlPath()));
Set<String> javaTests = new TreeSet<>();
for (String service : bp.getServices()) {
String javaPackage = bp.getLangGapicPackages().get("java");
if (javaPackage == null) {
continue;
}
String actualService =
bp.getLangGapicNameOverrides()
.get("java")
.getOrDefault(bp.getProtoPackage() + "." + service, service);
if (actualService.startsWith("IAM")) {
actualService = actualService.replaceAll("^IAM", "Iam");
}
javaTests.add(javaPackage + "." + actualService + "ClientTest");
}
tokens.put("java_tests", joinSetWithIndentation(javaTests));
tokens.put("java_gapic_deps", joinSetWithIndentationNl(mapJavaGapicDeps(actualImports)));
tokens.put(
"java_gapic_test_deps", joinSetWithIndentationNl(mapJavaGapicTestDeps(actualImports)));
// Construct GAPIC import path & package name based on go_package proto option
String goImport =
assembleGoImportPath(
bp.getCloudScope(), bp.getProtoPackage(), bp.getLangProtoPackages().get("go"));
tokens.put("go_gapic_importpath", goImport);
tokens.put("go_gapic_test_importpath", goImport.split(";")[0]);
tokens.put("go_gapic_deps", joinSetWithIndentationNl(mapGoGapicDeps(actualImports)));
}
private String assembleGoImportPath(boolean isCloud, String protoPkg, String goPkg) {
goPkg = goPkg.replaceFirst("google\\.golang\\.org\\/genproto\\/googleapis\\/", "");
goPkg = goPkg.replaceFirst("cloud\\/", "");
String goImport = "";
if (isCloud) {
goImport = "cloud.google.com/go/";
goPkg = goPkg.replaceFirst("v(.+);", "apiv$1;");
} else {
goImport = "google.golang.org/";
String pkgName = goPkg.split(";")[1];
// use the proto package path for a non-Cloud Go import path
// example: google.golang.org/google/ads/googleads/v3/services;services
goPkg = protoPkg.replaceAll("\\.", "\\/");
goPkg += ";" + pkgName;
}
return goImport + goPkg;
}
private String convertPathToLabel(String pkg, String path) {
if (path == null) {
return path;
}
if (!path.contains("/")) {
return path;
}
String[] pkgTokens = pkg.isEmpty() ? new String[0] : pkg.split("\\.");
String[] pathTokens = path.split("/");
// Find pkgTokens suffix & pathTokens prefix intersection
int index = 0;
for (; index < pkgTokens.length && index < pathTokens.length; index++) {
if (!pathTokens[index].equals(pkgTokens[pkgTokens.length - index - 1])) {
break;
}
}
List<String> tokens = new ArrayList<>();
for (int i = 0; i < pkgTokens.length - index; i++) {
tokens.add(pkgTokens[i]);
}
for (int i = index; i < pathTokens.length; i++) {
tokens.add(pathTokens[i]);
}
StringBuilder sb = new StringBuilder("/");
for (String token : tokens) {
sb.append('/').append(token);
}
int lastSlashIndex = sb.lastIndexOf("/");
sb.replace(lastSlashIndex, lastSlashIndex + 1, ":");
return sb.toString();
}
private String joinSetWithIndentation(Set<String> set) {
return set.isEmpty() ? "" : '"' + String.join("\",\n \"", set) + "\",";
}
private String joinSetWithIndentationNl(Set<String> set) {
String rv = joinSetWithIndentation(set);
return rv.isEmpty() ? rv : "\n " + rv;
}
private String replaceLabelName(String labelPathAndName, String newLabelName) {
return LABEL_NAME.matcher(labelPathAndName).replaceAll(newLabelName);
}
private Set<String> mapJavaGapicDeps(Set<String> protoImports) {
Set<String> javaImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.endsWith(":iam_policy_proto") || protoImport.endsWith(":policy_proto")) {
javaImports.add(replaceLabelName(protoImport, ":iam_java_proto"));
} else if (protoImport.endsWith(":service_proto")) {
javaImports.add(replaceLabelName(protoImport, ":api_java_proto"));
}
}
return javaImports;
}
private Set<String> mapJavaGapicTestDeps(Set<String> protoImports) {
Set<String> javaImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.endsWith(":iam_policy_proto") || protoImport.endsWith(":policy_proto")) {
javaImports.add(replaceLabelName(protoImport, ":iam_java_grpc"));
}
}
return javaImports;
}
private Set<String> mapGoProtoDeps(Set<String> protoImports) {
Set<String> goImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.startsWith("@com_google_protobuf//")) {
continue;
}
if (protoImport.endsWith(":resource_proto")
|| protoImport.endsWith(":client_proto")
|| protoImport.endsWith(":field_behavior_proto")
|| protoImport.endsWith(":http_proto")) {
goImports.add(replaceLabelName(protoImport, ":annotations_go_proto"));
} else if (protoImport.endsWith(":operations_proto")) {
goImports.add(replaceLabelName(protoImport, ":longrunning_go_proto"));
} else if (protoImport.endsWith(":iam_policy_proto")
|| protoImport.endsWith(":policy_proto")) {
goImports.add(replaceLabelName(protoImport, ":iam_go_proto"));
} else if (protoImport.endsWith(":config_change_proto")) {
goImports.add(replaceLabelName(protoImport, ":configchange_go_proto"));
} else if (protoImport.endsWith(":service_proto") || protoImport.endsWith(":quota_proto")) {
goImports.add(replaceLabelName(protoImport, ":serviceconfig_go_proto"));
} else if (protoImport.endsWith(":postal_address_proto")) {
goImports.add(replaceLabelName(protoImport, ":postaladdress_go_proto"));
} else if (protoImport.endsWith(":monitored_resource_proto")) {
goImports.add(replaceLabelName(protoImport, ":monitoredres_go_proto"));
} else if (protoImport.endsWith(":launch_stage_proto")) {
goImports.add(replaceLabelName(protoImport, ":api_go_proto"));
} else {
goImports.add(protoImport.replaceAll("_proto$", "_go_proto"));
}
}
return goImports;
}
private Set<String> mapGoGapicDeps(Set<String> protoImports) {
Set<String> goImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.startsWith("@com_google_protobuf//")) {
if (protoImport.endsWith(":duration_proto")) {
goImports.add("@io_bazel_rules_go//proto/wkt:duration_go_proto");
}
continue;
}
if (protoImport.endsWith(":operations_proto")) {
goImports.add(replaceLabelName(protoImport, ":longrunning_go_gapic"));
goImports.add(replaceLabelName(protoImport, ":longrunning_go_proto"));
goImports.add("@com_google_cloud_go//longrunning:go_default_library");
for (String pi : protoImports) {
if (pi.startsWith("@com_google_protobuf//")) {
if (pi.endsWith(":struct_proto")) {
goImports.add("@io_bazel_rules_go//proto/wkt:struct_go_proto");
} else if (pi.endsWith(":any_proto")) {
goImports.add("@io_bazel_rules_go//proto/wkt:any_go_proto");
}
}
}
} else if (protoImport.endsWith(":iam_policy_proto")
|| protoImport.endsWith(":policy_proto")) {
goImports.add(replaceLabelName(protoImport, ":iam_go_proto"));
} else if (protoImport.endsWith(":service_proto")) {
goImports.add(replaceLabelName(protoImport, ":serviceconfig_go_proto"));
}
}
return goImports;
}
Map<String, String> getTokens() {
return Collections.unmodifiableMap(this.tokens);
}
}
| 1 | 30,596 | This looks weird. `isCloud` should define if it is a cloud or no. Here it does it partially, and it can be overriden by protoPkg value (which also an argument to this function). Please make sure that isCloud completely defines the cloud thing. (i.e. it an be as straightforward as moving `protoPkg.contains("cloud")` from here to the place where `assembleGoImportPath` is called. | googleapis-gapic-generator | java |
@@ -1693,7 +1693,9 @@ Ex_Lob_Error ExLob::readDataToLocalFile(char *fileName, Int64 offset, Int64 siz
// open the targte file for writing
int filePerms = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
int openFlags = O_RDWR ; // O_DIRECT needs mem alignment
- if ((LobTgtFileFlags)fileflags == Lob_Append_Or_Error )
+ if (((LobTgtFileFlags)fileflags == Lob_Append_Or_Error ) ||
+ ((LobTgtFileFlags)fileflags == Lob_Error_Or_Create ) ||
+ ((LobTgtFileFlags)fileflags == Lob_Append_Or_Create))
openFlags |= O_APPEND;
else
openFlags |= O_TRUNC; | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: ex_lob.C
* Description: class to store and retrieve LOB data.
*
*
* Created: 10/29/2012
* Language: C++
*
*
*
*
*****************************************************************************
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <malloc.h>
#include <string>
#include <errno.h>
#include <signal.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/time.h>
#define SQ_USE_HDFS 1
#ifdef SQ_USE_HDFS
#include "hdfs.h"
#include "jni.h"
#endif
#include "ExpLOBstats.h"
#include "ExpLOBaccess.h"
#include "ExpLOBinterface.h"
#include "NAVersionedObject.h"
#include "ComQueue.h"
#include "NAMemory.h"
#include <seabed/ms.h>
#include <../../sqf/src/seabed/src/trans.h>
#include <seabed/fserr.h>
#include <curl/curl.h>
extern int ms_transid_get(bool pv_supp,
bool pv_trace,
MS_Mon_Transid_Type *pp_transid,
MS_Mon_Transseq_Type *pp_startid);
extern int ms_transid_reinstate(MS_Mon_Transid_Type, MS_Mon_Transseq_Type);
// short LobServerFNum;
SB_Phandle_Type serverPhandle;
ExLob::ExLob() :
storage_(Lob_Invalid_Storage),
dir_(string()),
lobGlobalHeap_(NULL),
// fdDesc_(-1),
fdDesc_(NULL),
fs_(NULL),
fdData_(NULL),
openFlags_(0)
{
lobDataFile_[0] = '\0';
lobDescFile_[0] = '\0';
}
ExLob::~ExLob()
{
if (fdData_) {
hdfsCloseFile(fs_, fdData_);
fdData_ = NULL;
}
if (fdDesc_) {
hdfsCloseFile(fs_, fdDesc_);
fdDesc_ = NULL;
}
/*
Commenting this out. It is causing cores during hive access.
Note : Not calling hdfsDisconnect this will cause a leak that needs to be
fixed at a different place
if (fs_){
hdfsDisconnect(fs_);
fs_=NULL;
}*/
}
Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode,
char *dir,
LobsStorage storage,
char *hdfsServer, Int64 hdfsPort,
int bufferSize , short replication ,
int blockSize, Int64 lobMaxSize, ExLobGlobals *lobGlobals)
{
int openFlags;
mode_t filePerms = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
struct timespec startTime;
struct timespec endTime;
Int64 secs, nsecs, totalnsecs;
if (dir)
{
if (dir_.empty())
{
dir_ = string(dir);
}
snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s/%s", dir_.c_str(), lobFile);
snprintf(lobDescFile_, MAX_LOB_FILE_NAME_LEN, "%s/%s.desc", dir_.c_str(), lobFile);
}
else
{
snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s", lobFile);
snprintf(lobDescFile_, MAX_LOB_FILE_NAME_LEN, "%s.desc", lobFile);
}
if (storage_ != Lob_Invalid_Storage)
{
return LOB_INIT_ERROR;
} else
{
storage_ = storage;
}
stats_.init();
hdfsServer_ = hdfsServer;
hdfsPort_ = hdfsPort;
clock_gettime(CLOCK_MONOTONIC, &startTime);
if (lobGlobals->getHdfsFs() == NULL)
{
fs_ = hdfsConnect(hdfsServer_, hdfsPort_);
if (fs_ == NULL)
return LOB_HDFS_CONNECT_ERROR;
lobGlobals->setHdfsFs(fs_);
}
else
{
fs_ = lobGlobals->getHdfsFs();
}
clock_gettime(CLOCK_MONOTONIC, &endTime);
secs = endTime.tv_sec - startTime.tv_sec;
nsecs = endTime.tv_nsec - startTime.tv_nsec;
if (nsecs < 0)
{
secs--;
nsecs += NUM_NSECS_IN_SEC;
}
totalnsecs = (secs * NUM_NSECS_IN_SEC) + nsecs;
stats_.hdfsConnectionTime += totalnsecs;
if (mode == EX_LOB_CREATE)
{
// check if file is already created
hdfsFileInfo *fInfo = hdfsGetPathInfo(fs_, lobDataFile_);
if (fInfo != NULL)
{
hdfsFreeFileInfo(fInfo, 1);
return LOB_DATA_FILE_CREATE_ERROR;
}
openFlags = O_WRONLY | O_CREAT;
fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags, bufferSize, replication, blockSize);
if (!fdData_)
{
return LOB_DATA_FILE_CREATE_ERROR;
}
hdfsCloseFile(fs_, fdData_);
fdData_ = NULL;
if (!lobGlobals->isHive())
{
//Create the desc header file that holds info about the
//lob data file offsets etc.
fdDesc_ = hdfsOpenFile(fs_, lobDescFile_, O_WRONLY, bufferSize, replication, blockSize);
if (!fdDesc_)
{
return LOB_DESC_FILE_CREATE_ERROR;
}
//write empty header info into it.
ExLobDescHeader header(lobMaxSize);
Int64 numWritten = 0;
numWritten = hdfsWrite(fs_, fdDesc_, (void *)&header, sizeof(ExLobDescHeader));
if (numWritten <=0)
return LOB_DATA_WRITE_ERROR;
if (hdfsFlush(fs_, fdDesc_))
return LOB_DATA_FLUSH_ERROR;
hdfsCloseFile(fs_, fdDesc_);
fdDesc_ = NULL;
}
}
lobGlobalHeap_ = lobGlobals->getHeap();
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::fetchCursor()
{
Ex_Lob_Error err;
request_.setType(Lob_Req_Fetch_Cursor);
err = request_.send();
if (err != LOB_OPER_OK) {
return err;
}
err = request_.getError();
return err;
}
Ex_Lob_Error ExLob::delDesc(Int64 descNum)
{
Ex_Lob_Error err;
request_.setType(Lob_Req_Del_Desc);
request_.setDescNumIn(descNum);
err = request_.send();
if (err != LOB_OPER_OK) {
return err;
}
err = request_.getError();
return err;
}
Ex_Lob_Error ExLob::getDesc(ExLobDesc &desc)
{
Ex_Lob_Error err;
request_.setType(Lob_Req_Get_Desc);
err = request_.send();
if (err != LOB_OPER_OK) {
return err;
}
request_.getDescOut(desc);
err = request_.getError();
return err;
}
Ex_Lob_Error ExLob::putDesc(ExLobDesc &desc, Int64 descNum)
{
Ex_Lob_Error err;
request_.setType(Lob_Req_Put_Desc);
request_.setDescNumIn(descNum);
request_.putDescIn(desc);
err = request_.send();
if (err != LOB_OPER_OK) {
return err;
}
err = request_.getError();
return err;
}
Ex_Lob_Error ExLob::writeData(Int64 offset, char *data, Int32 size, Int64 &operLen)
{
Ex_Lob_Error err;
if (!fdData_ || (openFlags_ != (O_WRONLY | O_APPEND))) // file is not open for write
{
// get file info
hdfsFileInfo *fInfo = hdfsGetPathInfo(fs_, lobDataFile_);
if (fInfo == NULL) {
return LOB_DATA_FILE_NOT_FOUND_ERROR;
}
}
hdfsCloseFile(fs_, fdData_);
fdData_=NULL;
openFlags_ = O_WRONLY | O_APPEND;
fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags_, 0, 0, 0);
if (!fdData_) {
openFlags_ = -1;
return LOB_DATA_FILE_OPEN_ERROR;
}
if ((operLen = hdfsWrite(fs_, fdData_, data, size)) == -1) {
return LOB_DATA_WRITE_ERROR;
}
if (hdfsFlush(fs_, fdData_)) {
return LOB_DATA_FLUSH_ERROR;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::writeDataSimple(char *data, Int64 size, LobsSubOper subOperation, Int64 &operLen,
int bufferSize , short replication , int blockSize)
{
Ex_Lob_Error err;
if (!fdData_ || (openFlags_ != (O_WRONLY | O_APPEND))) // file is not open for write
{
// get file info
hdfsFileInfo *fInfo = hdfsGetPathInfo(fs_, lobDataFile_);
if (fInfo == NULL) {
return LOB_DATA_FILE_NOT_FOUND_ERROR;
} else {
// file exists, check the size
if (fInfo->mSize != 0) {
hdfsFreeFileInfo(fInfo, 1);
return LOB_DATA_FILE_NOT_EMPTY_ERROR;
}
}
hdfsCloseFile(fs_, fdData_);
fdData_=NULL;
openFlags_ = O_WRONLY | O_APPEND ;
fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags_, bufferSize, replication, blockSize);
if (!fdData_) {
openFlags_ = -1;
return LOB_DATA_FILE_OPEN_ERROR;
}
}
if (hdfsWrite(fs_, fdData_, data, size) == -1) {
return LOB_DATA_WRITE_ERROR;
}
if (hdfsFlush(fs_, fdData_)) {
return LOB_DATA_FLUSH_ERROR;
}
operLen = size;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::emptyDirectory()
{
Ex_Lob_Error err;
int numExistingFiles=0;
hdfsFileInfo *fileInfos = hdfsListDirectory(fs_, lobDataFile_, &numExistingFiles);
if (fileInfos == NULL)
{
return LOB_DATA_FILE_NOT_FOUND_ERROR; //here a directory
}
for (int i = 0; i < numExistingFiles; i++)
{
#ifdef USE_HADOOP_1
int retCode = hdfsDelete(fs_, fileInfos[i].mName);
#else
int retCode = hdfsDelete(fs_, fileInfos[i].mName, 0);
#endif
if (retCode !=0)
{
//ex_assert(retCode == 0, "delete returned error");
return LOB_DATA_FILE_DELETE_ERROR;
}
}
if (fileInfos)
{
hdfsFreeFileInfo(fileInfos, numExistingFiles);
}
return LOB_OPER_OK;
}
struct MemoryStruct {
char *memory;
size_t size;
NAHeap *heap;
};
// callback for writing from http file to memory while dynamically growing the size.
static size_t
WriteMemoryCallback(void *contents, size_t size, size_t nmemb, void *userp)
{
size_t realsize = size * nmemb;
struct MemoryStruct *mem = (struct MemoryStruct *)userp;
mem->memory = (char *)(mem->heap)->allocateMemory(mem->size + realsize + 1 );
if(mem->memory == NULL) {
/* out of memory! */
return 0;
}
memcpy(&(mem->memory[mem->size]), contents, realsize);
mem->size += realsize;
mem->memory[mem->size] = 0;
return realsize;
}
//Call back for retrieving http file header info
static size_t header_throw_away(void *ptr, size_t size, size_t nmemb, void *data)
{
/* we are not interested in the headers itself,
so we only return the size we would have saved ... */
return (size_t)(size * nmemb);
}
Ex_Lob_Error ExLob::statSourceFile(char *srcfile, Int64 &sourceEOF)
{
// check if the source file is a hdfs file or from local file system.
LobInputOutputFileType srcType = fileType(srcfile);
if (srcType == HDFS_FILE)
{
hdfsFile sourceFile = hdfsOpenFile(fs_,srcfile,O_RDONLY,0,0,0);
if (!sourceFile)
return LOB_SOURCE_FILE_OPEN_ERROR;
hdfsFileInfo *sourceFileInfo = hdfsGetPathInfo(fs_,srcfile);
// get EOD from source hdfs file.
if (sourceFileInfo)
sourceEOF = sourceFileInfo->mSize;
else
return LOB_SOURCE_FILE_OPEN_ERROR;
}
else if (srcType == LOCAL_FILE)
{
int openFlags = O_RDONLY;
int fdSrcFile = open(srcfile, openFlags);
if (fdSrcFile < 0) {
return LOB_SOURCE_FILE_OPEN_ERROR;
}
if (flock(fdSrcFile, LOCK_EX) == -1) {
return LOB_SOURCE_FILE_LOCK_ERROR;
}
struct stat statbuf;
if (stat(srcfile, &statbuf) != 0) {
return LOB_SOURCE_FILE_STAT_ERROR;
}
sourceEOF = statbuf.st_size;
flock(fdSrcFile, LOCK_UN);
close(fdSrcFile);
}
else if (srcType == CURL_FILE)
{
// This is an http/ftp file. Use curl interface to determine size
CURL *curl;
CURLcode res;
const time_t filetime = 0;
double filesize = 0;
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_URL, srcfile);
/* find file size from header */
/* No download if the file */
curl_easy_setopt(curl, CURLOPT_NOBODY, 1L);
/* Ask for filetime */
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_FILETIME, 1L);
/* No header output: TODO 14.1 http-style HEAD output for ftp */
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,header_throw_away);
curl_easy_setopt(curl, CURLOPT_HEADER, 0L);
res = curl_easy_perform(curl);
if(CURLE_OK == res) {
res = curl_easy_getinfo(curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &filesize);
if (res == CURLE_OK)
{
Int64 temp_fs = 0;
temp_fs = filesize;
sourceEOF = temp_fs;
}
else
return LOB_SOURCE_FILE_STAT_ERROR;
}
curl_easy_cleanup(curl);
}
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset)
{
Ex_Lob_Error lobErr = LOB_OPER_OK;
// check if the source file is a hdfs file or from local file system.
LobInputOutputFileType srcType = fileType(srcfile);
if (srcType == HDFS_FILE)
{
lobErr = readHdfsSourceFile(srcfile, fileData, size, offset);
}
else if (srcType == LOCAL_FILE)
{
lobErr = readLocalSourceFile(srcfile, fileData, size, offset);
}
else if(srcType == CURL_FILE)
{
lobErr = readExternalSourceFile((char *)srcfile, fileData, size, offset);
}
else
return LOB_SOURCE_FILE_OPEN_ERROR;
return lobErr;
}
Ex_Lob_Error ExLob::readHdfsSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset)
{
int openFlags = O_RDONLY;
hdfsFile fdSrcFile = hdfsOpenFile(fs_,srcfile, openFlags,0,0,0);
if (fdSrcFile == NULL) {
return LOB_SOURCE_FILE_OPEN_ERROR;
}
fileData = (char *) (getLobGlobalHeap())->allocateMemory(size);
if (fileData == (char *)-1) {
return LOB_SOURCE_DATA_ALLOC_ERROR;
}
if (hdfsPread(fs_,fdSrcFile, offset,fileData, size) == -1) {
hdfsCloseFile(fs_,fdSrcFile);
getLobGlobalHeap()->deallocateMemory(fileData);
fileData = NULL;
return LOB_SOURCE_FILE_READ_ERROR;
}
hdfsCloseFile(fs_,fdSrcFile);
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readLocalSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset)
{
int openFlags = O_RDONLY;
int fdSrcFile = open(srcfile, openFlags);
if (fdSrcFile < 0 ) {
return LOB_SOURCE_FILE_OPEN_ERROR;
}
if (flock(fdSrcFile, LOCK_EX) == -1) {
return LOB_SOURCE_FILE_LOCK_ERROR;
}
struct stat statbuf;
if (stat(srcfile, &statbuf) != 0) {
return LOB_SOURCE_FILE_STAT_ERROR;
}
fileData = (char *) (getLobGlobalHeap())->allocateMemory(size);
if (fileData == (char *)-1) {
return LOB_SOURCE_DATA_ALLOC_ERROR;
}
if (pread(fdSrcFile, fileData, size, offset) == -1) {
close(fdSrcFile);
getLobGlobalHeap()->deallocateMemory(fileData);
fileData = NULL;
return LOB_SOURCE_FILE_READ_ERROR;
}
flock(fdSrcFile, LOCK_UN);
close(fdSrcFile);
return LOB_OPER_OK ;
}
Ex_Lob_Error ExLob::readExternalSourceFile(char *srcfile, char *&fileData, Int32 &size,Int64 offset)
{
CURL *curl;
CURLcode res;
struct MemoryStruct chunk;
chunk.memory = (char *) (getLobGlobalHeap())->allocateMemory(size);
chunk.size = 0; /* no data at this point */
chunk.heap = getLobGlobalHeap();
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_URL, srcfile);
/* send all data to this function */
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
/* we pass our 'chunk' struct to the callback function */
curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
fileData = chunk.memory;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::writeDesc(Int64 &sourceLen, char *source, LobsSubOper subOper, Int64 &descNumOut, Int64 &operLen, Int64 lobMaxSize)
{
Ex_Lob_Error err;
Int64 dataOffset = 0;
// Calculate sourceLen for each subOper.
if (subOper == Lob_File)
{
err = statSourceFile(source, sourceLen);
if (err != LOB_OPER_OK)
return err;
}
if (sourceLen <= 0 || sourceLen > lobMaxSize)
{
return LOB_MAX_LIMIT_ERROR; //exceeded the size of the max lob size
}
err = allocateDesc((unsigned int)sourceLen, descNumOut, dataOffset, lobMaxSize);
operLen = 0;
if (err != LOB_OPER_OK)
return err;
//send a message to mxlobsrvr to insert into the descriptor tables
request_.setType(Lob_Req_Allocate_Desc);
request_.getDesc().setSize(sourceLen);
request_.setDescNumOut(descNumOut);
request_.setDataOffset(dataOffset);
err = request_.send();
if (err != LOB_OPER_OK) {
return err;
}
return err;
}
Ex_Lob_Error ExLob::writeLobData(char *source, Int64 sourceLen, LobsSubOper subOperation, Int64 tgtOffset,Int64 &operLen, Int64 lobMaxChunkMemSize)
{
Ex_Lob_Error err;
char *inputAddr = source;
Int64 readOffset = 0;
Int32 allocMemSize = 0;
Int64 inputSize = sourceLen;
Int64 writeOffset = tgtOffset;
while(inputSize > 0)
{
allocMemSize = MINOF(lobMaxChunkMemSize, inputSize);
if (subOperation == Lob_File)
{
err = readSourceFile(source, inputAddr, allocMemSize, readOffset);
if (err != LOB_OPER_OK)
return err;
}
else
{ // in memory
}
err = writeData(writeOffset, inputAddr, allocMemSize, operLen);
if (err != LOB_OPER_OK)
{
//handle errors that happen in one of the chunks.
return err;
}
if (subOperation == Lob_File) {
writeOffset = writeOffset+allocMemSize;
readOffset = readOffset+allocMemSize;
inputSize = inputSize-lobMaxChunkMemSize;
getLobGlobalHeap()->deallocateMemory(inputAddr);
}
else
{
writeOffset = writeOffset+allocMemSize;
inputSize = inputSize-lobMaxChunkMemSize;
inputAddr = inputAddr+allocMemSize;
}
}
hdfsCloseFile(fs_, fdData_);
fdData_=NULL;
return err;
}
Ex_Lob_Error ExLob::readToMem(char *memAddr, Int64 size, Int64 &operLen)
{
Ex_Lob_Error err = LOB_OPER_OK;
int cliErr;
operLen = 0;
ExLobDesc desc;
Int64 sizeToRead = 0;
err = getDesc(desc);
sizeToRead = MINOF(size,desc.getSize());
if (getRequest()->getBlackBoxLen() == -1)
sizeToRead = size;
err = readDataToMem(memAddr, desc.getOffset(),sizeToRead, operLen);
return err;
}
LobInputOutputFileType ExLob::fileType(char *ioFileName)
{
std::string fileTgt(ioFileName);
std:string hdfsDirStr("hdfs://");
std::string httpStr("http://");
std:: string fileDirStr("file://");
short found = 0;
LobInputOutputFileType filetype;
bool isHdfs = FALSE;
bool isLocal = FALSE;
bool isExternal = FALSE;
bool isHdfsDir = FALSE;
bool isFileDir = FALSE;
if (((found = fileTgt.find(hdfsDirStr)) != std::string::npos) && (found == 0))
{
return HDFS_FILE;
}
else if (((found = fileTgt.find(fileDirStr)) != std::string::npos) &&(found == 0))
return LOCAL_FILE;
else if (((found = fileTgt.find(httpStr)) != std::string::npos) && (found == 0))
return CURL_FILE;
else
return LOCAL_FILE;
}
Ex_Lob_Error ExLob::readToFile(char *tgtFileName, Int64 tgtLength, Int64 &operLen, Int64 lobMaxChunkMemLen, Int32 fileflags)
{
Ex_Lob_Error err = LOB_OPER_OK;
Int64 srcOffset = 0;
Int64 srcLength = 0;
LobInputOutputFileType tgtType = fileType(tgtFileName);
ExLobDesc desc;
err = getDesc(desc);
if (err != LOB_OPER_OK)
return err;
if (getRequest()->getBlackBoxLen() == -1) // mxlobsrvr returned -1 indicating multiple chunks for this particular lob handle
{
//the data retrieval in chunks is handled in readDataToMem.
}
else if (tgtLength <=0 )
{
return LOB_SOURCE_FILE_READ_ERROR;
}
else
{
srcOffset = desc.getOffset();
}
if (tgtType == HDFS_FILE)
{
err = readDataToHdfsFile(tgtFileName, srcOffset , tgtLength,operLen, lobMaxChunkMemLen, fileflags);
if (err != LOB_OPER_OK)
return err;
}
else if(tgtType == CURL_FILE)
{
err = readDataToExternalFile(tgtFileName, srcOffset, tgtLength, operLen, lobMaxChunkMemLen, fileflags);
if (err != LOB_OPER_OK)
return err;
}
else if (tgtType == LOCAL_FILE)
{
err = readDataToLocalFile(tgtFileName,srcOffset, tgtLength,operLen, lobMaxChunkMemLen, fileflags);
if (err != LOB_OPER_OK)
return err;
}
else
return LOB_TARGET_FILE_OPEN_ERROR; //unknown format
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::append(char *data, Int64 size, LobsSubOper so, Int64 headDescNum, Int64 &operLen, Int64 lobMaxSize,Int64 lobMaxChunkMemSize)
{
Ex_Lob_Error err = LOB_OPER_OK;
Int64 dummyParam;
Int64 dataOffset=0;
Int64 sourceLen = size;
if (so == Lob_File)
{
err = statSourceFile(data, sourceLen);
if (err != LOB_OPER_OK)
return err;
}
if (sourceLen <= 0 || sourceLen > lobMaxSize)
{
return LOB_MAX_LIMIT_ERROR; //exceeded the size of the max lob size
}
err = allocateDesc((unsigned int)sourceLen, dummyParam, dataOffset, lobMaxSize);
if (err != LOB_OPER_OK)
return err;
request_.setType(Lob_Req_Append);
request_.getDesc().setSize(sourceLen);
request_.setDataOffset(dataOffset);
request_.send();
err = request_.getError();
if (err != LOB_OPER_OK) {
return err;
}
int cliErr = request_.getCliError();
if (cliErr < 0 || cliErr == 100) { // some error or EOD.
return LOB_DESC_APPEND_ERROR;
}
err = writeLobData(data, sourceLen,so,dataOffset,operLen,lobMaxChunkMemSize);
if (err != LOB_OPER_OK)
return err;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::insertData(char *data, Int64 size, LobsSubOper so,Int64 headDescNum, Int64 &operLen, Int64 lobMaxSize, Int64 lobMaxChunkMemSize)
{
Ex_Lob_Error err;
ExLobDesc desc;
operLen = 0;
// get offset and input size from desc (the one that was just inserted into the descriptor handle table)
err = getDesc(desc);
if (err != LOB_OPER_OK)
return err;
int cliErr = request_.getCliError();
if (cliErr < 0 || cliErr == 100) { // some error or EOD.
return LOB_DESC_READ_ERROR;
}
if ((data == NULL)) {
return LOB_SOURCE_DATA_ERROR;
}
char *inputAddr = data;
Int64 inputSize = desc.getSize();
Int64 tgtOffset = desc.getOffset();
err = writeLobData(inputAddr, inputSize,so, tgtOffset,
operLen,lobMaxChunkMemSize);
if (err != LOB_OPER_OK)
return err;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::update(char *data, Int64 size, LobsSubOper so,Int64 headDescNum, Int64 &operLen, Int64 lobMaxSize, Int64 lobMaxChunkMemSize)
{
Ex_Lob_Error err = LOB_OPER_OK;
Int64 dummyParam;
Int64 dataOffset = 0;
Int64 sourceLen = size;
if (so == Lob_File)
{
err = statSourceFile(data, sourceLen);
if (err != LOB_OPER_OK)
return err;
}
if (sourceLen <= 0 || sourceLen > lobMaxSize)
{
return LOB_MAX_LIMIT_ERROR; //exceeded the size of the max lob size
}
err = allocateDesc((unsigned int)sourceLen, dummyParam, dataOffset, lobMaxSize);
if (err != LOB_OPER_OK)
return err;
// send a message to mxlobsrvr to do an update into descriptor tables
request_.setType(Lob_Req_Update);
request_.getDesc().setSize(sourceLen);
request_.setDataOffset(dataOffset);
request_.send();
err = request_.getError();
if (err != LOB_OPER_OK) {
return err;
}
int cliErr = request_.getCliError();
if (cliErr < 0 || cliErr == 100) { // some error or EOD.
return LOB_DESC_UPDATE_ERROR;
}
err = writeLobData(data, sourceLen,so,dataOffset,operLen,lobMaxChunkMemSize);
if (err != LOB_OPER_OK)
return err;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::delDesc()
{
Ex_Lob_Error err;
Int64 dummyParam;
request_.setType(Lob_Req_Del_Desc);
request_.send();
err = request_.getError();
return err;
}
Ex_Lob_Error ExLob::purgeLob()
{
if (hdfsDelete(fs_, lobDataFile_, 0) != 0)
{
return LOB_DATA_FILE_DELETE_ERROR;
}
if (hdfsDelete(fs_, lobDescFile_, 0) != 0)
{
return LOB_DESC_FILE_DELETE_ERROR;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::openCursor(char *handleIn, Int64 handleInLen)
{
Ex_Lob_Error err;
cursor_t cursor;
request_.setType(Lob_Req_Select_Cursor);
err = request_.send();
if (err != LOB_OPER_OK) {
return err;
}
err = request_.getError();
if (err != LOB_OPER_OK) {
return err;
}
cursor.bytesRead_ = -1;
cursor.descOffset_ = -1;
cursor.descSize_ = -1;
cursor.cliInterface_ = NULL; // used only in lob process
cursor.eod_ = false;
cursor.eor_ = false;
cursor.eol_ = false;
lobCursors_it it = lobCursors_.find(string(handleIn, handleInLen));
if (it == lobCursors_.end())
{
lobCursors_.insert(pair<string, cursor_t>
(string(handleIn, handleInLen), cursor));
}
else
{
it->second = cursor;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::openDataCursor(char *file, LobsCursorType type, Int64 range, Int64 bufMaxSize,
Int64 maxBytes, Int64 waited, ExLobGlobals *lobGlobals)
{
Ex_Lob_Error err;
cursor_t cursor;
clock_gettime(CLOCK_MONOTONIC, &cursor.openTime_);
// check to see if cursor is already open.
// occurs for pre-open cases
lobCursorLock_.lock();
lobCursors_it it = lobCursors_.find(string(file, strlen(file)));
if (it != lobCursors_.end()) {
clock_gettime(CLOCK_MONOTONIC, &cursor.openTime_);
lobCursorLock_.unlock();
return LOB_OPER_OK;
}
union ranges_t {
Int64 range64;
struct {
Lng32 beginRange;
Lng32 numRanges;
}r;
} ranges;
cursor.bytesRead_ = -1;
cursor.descOffset_ = -1;
cursor.descSize_ = -1;
cursor.cliInterface_ = NULL; // used only in lob process
cursor.eod_ = false;
cursor.eor_ = false;
cursor.eol_ = false;
cursor.type_ = type;
cursor.bufMaxSize_ = bufMaxSize;
cursor.maxBytes_ = maxBytes;
cursor.prefetch_ = !waited;
cursor.bufferHits_ = 0;
cursor.bufferMisses_ = 0;
strcpy(cursor.name_, file);
cursor.currentRange_ = -1;
cursor.endRange_ = -1;
cursor.currentStartOffset_ = -1;
cursor.descOffset_ = range;
cursor.currentFd_ = NULL;
cursor.currentBytesToRead_ = -1;
cursor.currentBytesRead_ = 0;
cursor.currentEod_ = false;
lobCursors_.insert(pair<string, cursor_t>
(string(file, strlen(file)), cursor));
it = lobCursors_.find(string(file, strlen(file))); // to get the actual cursor object in the map
if (!fdData_ || (openFlags_ != O_RDONLY))
{
hdfsCloseFile(fs_, fdData_);
fdData_ = NULL;
openFlags_ = O_RDONLY;
fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags_, 0, 0, 0);
if (!fdData_) {
openFlags_ = -1;
lobCursorLock_.unlock();
return LOB_DATA_FILE_OPEN_ERROR;
}
}
if (hdfsSeek(fs_, fdData_, (it->second).descOffset_) == -1) {
lobCursorLock_.unlock();
return LOB_DATA_FILE_POSITION_ERROR;
}
// start reading in a worker thread
lobGlobals->enqueuePrefetchRequest(this, &(it->second));
lobCursorLock_.unlock();
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readCursor(char *tgt, Int64 tgtSize, char *handleIn, Int64 handleInLen, Int64 &operLen)
{
int dataOffset;
Ex_Lob_Error result;
cursor_t cursor;
lobCursors_it it = lobCursors_.find(string(handleIn, handleInLen));
if (it == lobCursors_.end())
{
return LOB_CURSOR_NOT_OPEN;
}
else
{
cursor = it->second;
}
if (cursor.eod_) {
// remove cursor from the map.
// server has already closed the cursor.
closeCursor(handleIn, handleInLen);
// indicate EOD to SQL
operLen = 0;
return LOB_OPER_OK;
}
result = readCursorData(tgt, tgtSize, cursor, operLen); // increments cursor
if (result != LOB_OPER_OK)
return result;
it->second = cursor;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readDataCursorSimple(char *file, char *tgt, Int64 tgtSize,
Int64 &operLen, ExLobGlobals *lobGlobals)
{
int dataOffset;
Ex_Lob_Error result = LOB_OPER_OK;
cursor_t *cursor;
ExLobCursor::bufferList_t::iterator c_it;
ExLobCursorBuffer *buf = NULL;
Int64 bytesToCopy = 0;
operLen = 0;
Int64 len;
char *target = tgt;
bool done = false;
struct timespec startTime;
struct timespec endTime;
lobCursorLock_.lock();
lobCursors_it it = lobCursors_.find(string(file, strlen(file)));
if (it == lobCursors_.end())
{
lobCursorLock_.unlock();
return LOB_CURSOR_NOT_OPEN;
}
else
{
cursor = &(it->second);
}
lobCursorLock_.unlock();
while ((operLen < tgtSize) && !done && !cursor->eol_)
{
lobGlobals->traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.lock();
// if no buffers to read and is eor or eod, we are done.
// else wait for prefetch thread to wake us up.
if (cursor->prefetchBufList_.size() == 0) {
if (cursor->eor_ || cursor->eod_) {
done = true;
} else {
cursor->bufferMisses_++;
lobGlobals->traceMessage("wait on condition cursor",cursor,__LINE__);
cursor->lock_.wait();
}
lobGlobals->traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
continue;
}
// a buffer is available
c_it = cursor->prefetchBufList_.begin();
buf = *c_it;
lobGlobals->traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
bytesToCopy = min(buf->bytesRemaining_, tgtSize - operLen);
memcpy(target, buf->data_ + buf->bytesUsed_, bytesToCopy);
target += bytesToCopy;
if (bytesToCopy == buf->bytesRemaining_) { // buffer is now empty
buf->bytesRemaining_ = -1;
buf->bytesUsed_ = -1;
lobGlobals->postfetchBufListLock_.lock();
lobGlobals->postfetchBufList_.push_back(buf);
lobGlobals->postfetchBufListLock_.unlock();
lobGlobals->traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.lock();
c_it = cursor->prefetchBufList_.erase(c_it);
lobGlobals->traceMessage("signal condition cursor",cursor,__LINE__);
cursor->lock_.wakeOne(); // wake up prefetch thread if it was waiting for an empty buffer.
lobGlobals->traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
} else {
buf->bytesUsed_ += bytesToCopy;
buf->bytesRemaining_ -= bytesToCopy;
}
stats_.bytesPrefetched += bytesToCopy;
operLen += bytesToCopy;
}
// update stats
stats_.bytesRead += operLen;
stats_.bytesToRead += tgtSize;
stats_.numReadReqs++;
return LOB_OPER_OK;
}
void ExLobCursor::emptyPrefetchList(ExLobGlobals *lobGlobals)
{
ExLobCursor::bufferList_t::iterator c_it;
ExLobCursorBuffer *buf = NULL;
c_it = prefetchBufList_.begin();
while (c_it != prefetchBufList_.end())
{
buf = *c_it;
lobGlobals->postfetchBufListLock_.lock();
lobGlobals->postfetchBufList_.push_back(buf);
lobGlobals->postfetchBufListLock_.unlock();
c_it = prefetchBufList_.erase(c_it);
}
}
// Seems like this is currently unused.
// closeDataCusrorSimple takes care of destroying the cursor.But addign code
// similar to closeDataCursorSimple for correctness in case it is used in future
Ex_Lob_Error ExLob::deleteCursor(char *cursorName, ExLobGlobals *lobGlobals)
{
cursor_t *cursor = NULL;
lobCursorLock_.lock();
lobCursors_it it = lobCursors_.find(string(cursorName, strlen(cursorName)));
if (it != lobCursors_.end())
{
cursor = &(it->second);
lobGlobals->traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.lock();
cursor->emptyPrefetchList(lobGlobals);
lobGlobals->traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
lobCursors_.erase(it);
}
lobCursorLock_.unlock();
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::closeCursor(char *handleIn, Int64 handleInLen)
{
lobCursors_it it = lobCursors_.find(string(handleIn, handleInLen));
if (it != lobCursors_.end())
{
lobCursors_.erase(it);
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::closeDataCursorSimple(char *fileName, ExLobGlobals *lobGlobals)
{
cursor_t *cursor = NULL;
Int64 secs = 0;
Int64 nsecs = 0;
lobCursorLock_.lock();
lobCursors_it it = lobCursors_.find(string(fileName, strlen(fileName)));
if (it != lobCursors_.end())
{
cursor = &(it->second);
lobGlobals->traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.lock();
clock_gettime(CLOCK_MONOTONIC, &cursor->closeTime_);
secs = cursor->closeTime_.tv_sec - cursor->openTime_.tv_sec;
nsecs = cursor->closeTime_.tv_nsec - cursor->openTime_.tv_nsec;
if (cursor->eod_ || cursor->eor_) { // prefetch thread already done,
cursor->emptyPrefetchList(lobGlobals);
lobGlobals->traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
lobCursors_.erase(it); // so erase it here.
// no need to unlock as cursor object is gone.
} else {
cursor->eol_ = true; // prefetch thread will do the eol rituals
lobGlobals->traceMessage("signal condition cursor",cursor,__LINE__);
cursor->lock_.wakeOne(); // wakeup prefetch thread
lobGlobals->traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
}
}
lobCursorLock_.unlock();
if (nsecs < 0) {
secs--;
nsecs += NUM_NSECS_IN_SEC;
}
Int64 totalnsecs = (secs * NUM_NSECS_IN_SEC) + nsecs;
stats_.cursorElapsedTime += totalnsecs;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::print()
{
Ex_Lob_Error err;
request_.setType(Lob_Req_Print);
err = request_.send();
return err;
}
Ex_Lob_Error ExLob::doSanityChecks(char *dir, LobsStorage storage,
Int64 handleInLen, Int64 handleOutLen,
Int64 blackBoxLen)
{
#ifdef SQ_USE_HDFS
if (!fs_)
return LOB_HDFS_CONNECT_ERROR;
#else
if (fdData_ == -1)
return LOB_DATA_FILE_OPEN_ERROR;
#endif
if (dir_.compare(dir) != 0)
return LOB_DIR_NAME_ERROR;
if (storage_ != storage)
return LOB_STORAGE_TYPE_ERROR;
if (handleInLen > MAX_HANDLE_IN_LEN) {
return LOB_HANDLE_IN_LEN_ERROR;
}
if (handleOutLen > MAX_HANDLE_IN_LEN) {
return LOB_HANDLE_OUT_LEN_ERROR;
}
if (blackBoxLen > MAX_HANDLE_IN_LEN) {
return LOB_BLACK_BOX_LEN_ERROR;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::allocateDesc(ULng32 size, Int64 &descNum, Int64 &dataOffset, Int64 lobMaxSize)
{
Ex_Lob_Error err = LOB_OPER_OK;
Lng32 retval = 0;
Int64 numRead = 0;
Int64 numWritten = 0;
// TBD need a way to lock access to this file.
Int32 openFlags = O_RDONLY ;
fdDesc_ = hdfsOpenFile(fs_, lobDescFile_, O_RDONLY, 0, 0,0);
if (!fdDesc_) {
hdfsCloseFile(fs_,fdDesc_);
fdDesc_ = NULL;
return LOB_DESC_FILE_OPEN_ERROR;
}
ExLobDescHeader header(lobMaxSize);
numRead = hdfsPread(fs_,fdDesc_, 0, (void *)&header, sizeof(ExLobDescHeader) );
if (numRead <=0)
{
return LOB_DESC_HEADER_READ_ERROR;
}
if (header.getAvailSize() >= size) {
descNum = header.getFreeDesc();
dataOffset = header.getDataOffset();
header.incFreeDesc();
header.decAvailSize(size);
header.incDataOffset(size);
hdfsCloseFile(fs_,fdDesc_);
fdDesc_ = NULL;
openFlags = O_WRONLY;
fdDesc_ = hdfsOpenFile(fs_,lobDescFile_,openFlags,0,0,0);
if (!fdDesc_) {
return LOB_DESC_FILE_OPEN_ERROR;
}
numWritten = hdfsWrite(fs_,fdDesc_, (void *)&header, sizeof(ExLobDescHeader)) ;
if (numWritten <= 0)
{
return LOB_DESC_HEADER_WRITE_ERROR;
}
}
else {
return LOB_DATA_FILE_FULL_ERROR;
}
ExLobDesc desc(dataOffset, size, descNum);
hdfsCloseFile(fs_,fdDesc_);
fdDesc_=NULL;
openFlags = O_WRONLY| O_APPEND;
fdDesc_ = hdfsOpenFile(fs_,lobDescFile_,openFlags,0,0,0);
numWritten = hdfsWrite(fs_,fdDesc_, (void *)&desc, sizeof(ExLobDesc));
if (numWritten <= 0)
{
err = LOB_DESC_WRITE_ERROR;
}
hdfsCloseFile(fs_,fdDesc_);
fdDesc_=NULL;
// TBD need a way to unlock this hdfs file.
return err;
}
///////////////////////////////////////////////////////////////////////////////
// ExLobDescHeader definitions
///////////////////////////////////////////////////////////////////////////////
ExLobDescHeader::ExLobDescHeader(unsigned int size) :
freeDesc_(0),
dataOffset_(0),
availSize_(size)
{
}
ExLobDescHeader::~ExLobDescHeader()
{
}
///////////////////////////////////////////////////////////////////////////////
// ExLobDesc definitions
///////////////////////////////////////////////////////////////////////////////
ExLobDesc::ExLobDesc(int offset, int size, int tail) :
dataOffset_(offset),
dataSize_(size),
dataState_(EX_LOB_DATA_INITIALIZING),
tail_(tail),
next_(-1),
prev_(-1),
nextFree_(-1)
{
}
ExLobDesc::~ExLobDesc()
{
}
Ex_Lob_Error ExLob::readCursorData(char *tgt, Int64 tgtSize, cursor_t &cursor, Int64 &operLen)
{
ExLobDesc desc;
Ex_Lob_Error err;
Int64 bytesAvailable = 0;
Int64 bytesToCopy = 0;
Int64 bytesRead = 0;
operLen = 0;
tOffset offset;
struct timespec startTime;
struct timespec endTime;
while ( (operLen < tgtSize) && !cursor.eod_ )
{
if (cursor.bytesRead_ == cursor.descSize_) // time to read next chunck
{
err = fetchCursor();
if (err != LOB_OPER_OK) {
return err;
}
if (request_.getCliError() == 100) {
cursor.eod_ = true; // subsequent call will return 100 and close the cursor
continue;
} else {
cursor.descSize_ = request_.getDesc().getSize();
cursor.descOffset_ = request_.getDesc().getOffset();
cursor.bytesRead_ = 0;
}
}
bytesAvailable = cursor.descSize_ - cursor.bytesRead_;
bytesToCopy = min(bytesAvailable, tgtSize - operLen);
offset = cursor.descOffset_ + cursor.bytesRead_;
// #endif
if (!fdData_ || (openFlags_ != O_RDONLY))
{
hdfsCloseFile(fs_, fdData_);
fdData_=NULL;
openFlags_ = O_RDONLY;
fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags_, 0, 0, 0);
if (!fdData_) {
openFlags_ = -1;
return LOB_DATA_FILE_OPEN_ERROR;
}
}
clock_gettime(CLOCK_MONOTONIC, &startTime);
bytesRead = hdfsPread(fs_, fdData_, offset, tgt, bytesToCopy);
clock_gettime(CLOCK_MONOTONIC, &endTime);
Int64 secs = endTime.tv_sec - startTime.tv_sec;
Int64 nsecs = endTime.tv_nsec - startTime.tv_nsec;
if (nsecs < 0) {
secs--;
nsecs += NUM_NSECS_IN_SEC;
}
Int64 totalnsecs = (secs * NUM_NSECS_IN_SEC) + nsecs;
stats_.CumulativeReadTime += totalnsecs;
if (bytesRead == -1) {
return LOB_DATA_READ_ERROR;
} else if (bytesRead == 0) {
cursor.eod_ = true;
continue;
}
cursor.bytesRead_ += bytesRead;
operLen += bytesRead;
tgt += bytesRead;
}
hdfsCloseFile(fs_, fdData_);
fdData_ = NULL;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readCursorDataSimple(char *tgt, Int64 tgtSize, cursor_t &cursor, Int64 &operLen)
{
ExLobDesc desc;
Ex_Lob_Error err;
Int64 bytesAvailable = 0;
Int64 bytesToCopy = 0;
Int64 bytesRead = 0;
operLen = 0;
tOffset offset;
struct timespec startTime;
struct timespec endTime;
bool done = false;
if (!fdData_) {
return LOB_CURSOR_NOT_OPEN_ERROR;
}
if (cursor.bytesRead_ == -1) { // starting
cursor.bytesRead_ = 0;
}
clock_gettime(CLOCK_MONOTONIC, &startTime);
while ( (operLen < tgtSize) && !done )
{
//offset = cursor.descOffset_ + cursor.bytesRead_;
bytesToCopy = tgtSize - operLen;
offset = cursor.descOffset_ + cursor.bytesRead_;
// gets chunks of 64KB. Uses readDirect internally.
// bytesRead = hdfsPread(fs_, fdData_, offset, tgt, bytesToCopy);
bytesRead = hdfsRead(fs_, fdData_, tgt, bytesToCopy);
stats_.numHdfsReqs++;
if (bytesRead == -1) {
return LOB_DATA_READ_ERROR;
} else if (bytesRead == 0) {
done = true;
}
cursor.bytesRead_ += bytesRead;
operLen += bytesRead;
tgt += bytesRead;
}
clock_gettime(CLOCK_MONOTONIC, &endTime);
Int64 secs = endTime.tv_sec - startTime.tv_sec;
Int64 nsecs = endTime.tv_nsec - startTime.tv_nsec;
if (nsecs < 0) {
secs--;
nsecs += NUM_NSECS_IN_SEC;
}
Int64 totalnsecs = (secs * NUM_NSECS_IN_SEC) + nsecs;
stats_.CumulativeReadTime += totalnsecs;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readDataToMem(char *memAddr,
Int64 offset, Int64 size, Int64 &operLen)
{
Ex_Lob_Error err = LOB_OPER_OK;
operLen = 0;
Int64 bytesRead = 0;
NABoolean multipleChunks = FALSE;
if (getRequest()->getBlackBoxLen() == -1) // mxlobsrvr returned -1 indicating multiple chunks for this particular lob handle
{
multipleChunks = TRUE;
err = openCursor(getRequest()->getHandleIn(),
getRequest()->getHandleInLen());
//now we can fetch the descriptors for each chunk
}
else
if (err != LOB_OPER_OK)
return err;
int cliErr = request_.getCliError();
if (cliErr < 0 || cliErr == 100) {
return LOB_DESC_READ_ERROR;
}
if (fdData_)// we may have a stale handle. close and open to refresh
{
hdfsCloseFile(fs_, fdData_);
fdData_=NULL;
openFlags_ = O_RDONLY;
fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags_, 0, 0, 0);
if (!fdData_) {
openFlags_ = -1;
return LOB_DATA_FILE_OPEN_ERROR;
}
}
else
{
fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags_, 0, 0, 0);
if (!fdData_) {
openFlags_ = -1;
return LOB_DATA_FILE_OPEN_ERROR;
}
}
if (!multipleChunks)
{
if ((bytesRead = hdfsPread(fs_, fdData_, offset,
memAddr, size)) == -1) {
return LOB_DATA_READ_ERROR;
}
operLen = bytesRead;
return LOB_OPER_OK;
}
else
{
//handle reading the multiple chunks like a cursor
err = readCursor(memAddr,size, getRequest()->getHandleIn(),
getRequest()->getHandleInLen(), operLen);
if (err==LOB_OPER_OK)
closeCursor(getRequest()->getHandleIn(),
getRequest()->getHandleInLen());
else
return err;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readDataToLocalFile(char *fileName, Int64 offset, Int64 size, Int64 &writeOperLen, Int64 lobMaxChunkMemSize, Int32 fileflags)
{
Ex_Lob_Error err;
Int64 operLen = 0;
Int64 srcLen = size;
Int64 srcOffset = offset;
Int64 tgtOffset = 0;
char *lobData = 0;
Int64 chunkSize = 0;
if (srcLen <=0)
return LOB_SOURCE_DATA_ALLOC_ERROR;
// open the targte file for writing
int filePerms = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
int openFlags = O_RDWR ; // O_DIRECT needs mem alignment
if ((LobTgtFileFlags)fileflags == Lob_Append_Or_Error )
openFlags |= O_APPEND;
else
openFlags |= O_TRUNC;
int fdDestFile = open(fileName, openFlags, filePerms);
if (fdDestFile >=0 )
{
if ((LobTgtFileFlags)fileflags == Lob_Error_Or_Create)
return LOB_TARGET_FILE_EXISTS_ERROR;
}
if (fdDestFile == -1)
{
if (((LobTgtFileFlags)fileflags == Lob_Append_Or_Error) ||
((LobTgtFileFlags)fileflags == Lob_Truncate_Or_Error))
return LOB_TARGET_FILE_OPEN_ERROR;
else
{
openFlags = O_CREAT | O_RDWR ;
fdDestFile = open(fileName, openFlags, filePerms);
if (fdDestFile == -1)
return LOB_TARGET_FILE_OPEN_ERROR;
}
}
if ((srcLen < lobMaxChunkMemSize) && (getRequest()->getBlackBoxLen() != -1)) // simple single I/O case
{
lobData = (char *) (getLobGlobalHeap())->allocateMemory(srcLen);
if (lobData == NULL)
{
return LOB_SOURCE_DATA_ALLOC_ERROR;
}
err = readDataToMem(lobData, srcOffset,srcLen,operLen);
if (err != LOB_OPER_OK)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return err;
}
writeOperLen += pwrite(fdDestFile, lobData, srcLen, tgtOffset) ;
if (writeOperLen <= 0)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_TARGET_FILE_WRITE_ERROR;
}
getLobGlobalHeap()->deallocateMemory(lobData);
}
else // multiple chunks to read
{
err = openCursor(getRequest()->getHandleIn(),
getRequest()->getHandleInLen());
if (err != LOB_OPER_OK)
return err;
while ( srcLen > 0)
{
chunkSize = MINOF(srcLen, lobMaxChunkMemSize);
lobData = (char *) (getLobGlobalHeap())->allocateMemory(chunkSize);
if (lobData == NULL)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_SOURCE_DATA_ALLOC_ERROR;
}
//handle reading the multiple chunks like a cursor
err = readCursor(lobData,chunkSize, getRequest()->getHandleIn(),
getRequest()->getHandleInLen(), operLen);
if ((err != LOB_OPER_OK) || (operLen != chunkSize))
{
getLobGlobalHeap()->deallocateMemory(lobData);
return err;
}
writeOperLen += pwrite(fdDestFile, lobData, chunkSize, tgtOffset) ;
if (writeOperLen <= 0)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_TARGET_FILE_WRITE_ERROR;
}
getLobGlobalHeap()->deallocateMemory(lobData);
srcLen -= chunkSize;
tgtOffset += chunkSize;
}
closeCursor(getRequest()->getHandleIn(),
getRequest()->getHandleInLen());
}
close(fdDestFile);
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 size, Int64 &writeOperLen, Int64 lobMaxChunkMemLen, Int32 fileflags)
{
Ex_Lob_Error err;
Int64 operLen = 0;
Int64 srcLen = size;
Int64 srcOffset = offset;
Int64 tgtOffset = 0;
char *lobData = 0;
Int64 chunkSize = 0;
hdfsFile fdTgtFile;
// open and write to the target file
int openFlags = O_WRONLY;
if ((LobTgtFileFlags)fileflags == Lob_Append_Or_Error )
openFlags |= O_APPEND;
//hdfsFile fdTgtFile = hdfsOpenFile(fs_,tgtFileName, openFlags, 0,0,0);
if (hdfsExists(fs_,tgtFileName) == 0)
{
if ((LobTgtFileFlags)fileflags == Lob_Error_Or_Create)
return LOB_TARGET_FILE_EXISTS_ERROR;
else
{
openFlags = O_WRONLY ;
fdTgtFile = hdfsOpenFile(fs_, tgtFileName, openFlags, 0,0,0);
if (fdTgtFile == NULL)
return LOB_TARGET_FILE_OPEN_ERROR;
}
}
else
{
if (((LobTgtFileFlags)fileflags == Lob_Append_Or_Error) ||
((LobTgtFileFlags)fileflags == Lob_Truncate_Or_Error))
return LOB_TARGET_FILE_OPEN_ERROR;
else
{
openFlags = O_WRONLY ;
fdTgtFile = hdfsOpenFile(fs_, tgtFileName, openFlags, 0,0,0);
if (fdTgtFile == NULL)
return LOB_TARGET_FILE_OPEN_ERROR;
}
}
if ((srcLen < lobMaxChunkMemLen) && (getRequest()->getBlackBoxLen() != -1)) // simple single I/O case
{
lobData = (char *) (getLobGlobalHeap())->allocateMemory(srcLen);
if (lobData == NULL)
{
return LOB_SOURCE_DATA_ALLOC_ERROR;
}
err = readDataToMem(lobData, srcOffset,srcLen,operLen);
if (err != LOB_OPER_OK)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return err;
}
writeOperLen += hdfsWrite(fs_,fdTgtFile,lobData, srcLen);
if (writeOperLen <= 0)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_TARGET_FILE_WRITE_ERROR;
}
if (hdfsFlush(fs_, fdTgtFile))
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_DATA_FLUSH_ERROR;
}
getLobGlobalHeap()->deallocateMemory(lobData);
}
else
{// multiple chunks to read
err = openCursor(getRequest()->getHandleIn(),
getRequest()->getHandleInLen());
if (err != LOB_OPER_OK)
return err;
while ( srcLen > 0)
{
chunkSize = MINOF(srcLen, lobMaxChunkMemLen);
lobData = (char *) (getLobGlobalHeap())->allocateMemory(chunkSize);
if (lobData == NULL)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_SOURCE_DATA_ALLOC_ERROR;
}
//handle reading the multiple chunks like a cursor
err = readCursor(lobData,chunkSize, getRequest()->getHandleIn(),
getRequest()->getHandleInLen(), operLen);
if ((err != LOB_OPER_OK) || (operLen != chunkSize))
{
getLobGlobalHeap()->deallocateMemory(lobData);
return err;
}
writeOperLen += hdfsWrite(fs_,fdTgtFile,lobData, chunkSize);
if (writeOperLen <= 0)
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_TARGET_FILE_WRITE_ERROR;
}
if (hdfsFlush(fs_, fdTgtFile))
{
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_DATA_FLUSH_ERROR;
}
getLobGlobalHeap()->deallocateMemory(lobData);
srcLen -= chunkSize;
}
closeCursor(getRequest()->getHandleIn(),
getRequest()->getHandleInLen());
}
hdfsCloseFile(fs_, fdTgtFile);
fdTgtFile=NULL;
hdfsCloseFile(fs_,fdData_);
fdData_=NULL;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readDataToExternalFile(char *tgtFileName, Int64 offset, Int64 size, Int64 &operLen,Int64 lobMaxChunkMemLen,Int32 fileflags)
{
//TBD
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::closeFile()
{
if (fdData_)
{
hdfsCloseFile(fs_, fdData_);
fdData_ = NULL;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::readStats(char *statsBuffer)
{
memcpy(statsBuffer, (char *)&stats_, sizeof(stats_));
return LOB_OPER_OK;
}
Ex_Lob_Error ExLob::initStats()
{
stats_.init();
return LOB_OPER_OK;
}
void ExLobGlobals::traceMessage(const char *logMessage, ExLobCursor *cursor,
int line)
{
if ( threadTraceFile_ && logMessage)
{
fprintf(threadTraceFile_,
"Thread: 0x%lx Line: %d %s 0x%lx\n" ,
(unsigned long)pthread_self(), line, logMessage,
(unsigned long) cursor);
fflush(threadTraceFile_);
}
}
Ex_Lob_Error ExLobGlobals::performRequest(ExLobHdfsRequest *request)
{
Ex_Lob_Error err = LOB_OPER_OK;
ExLob *lobPtr;
ExLobCursorBuffer *buf;
ExLobCursor *cursor;
Int64 size;
NABoolean seenEOR = false;
NABoolean seenEOD = false;
ExLobCursor::bufferList_t::iterator c_it;
Int64 totalBufSize;
switch (request->reqType_)
{
case Lob_Hdfs_Cursor_Prefetch :
lobPtr = request->lobPtr_;
cursor = request->cursor_;
traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.lock();
while (!cursor->eod_ && !cursor->eor_ && !cursor->eol_)
{
postfetchBufListLock_.lock();
c_it = postfetchBufList_.begin();
if (c_it != postfetchBufList_.end()) {
buf = *c_it;
postfetchBufList_.erase(c_it);
postfetchBufListLock_.unlock();
traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
} else {
postfetchBufListLock_.unlock();
// there are no empty buffers.
// if prefetch list already has the max, wait for one to free up.
totalBufSize = cursor->prefetchBufList_.size() * cursor->bufMaxSize_;
if (totalBufSize > LOB_CURSOR_PREFETCH_BYTES_MAX) {
traceMessage("wait on condition cursor",cursor,__LINE__);
cursor->lock_.wait();
char buffer2[2048];
sprintf(buffer2, "cursor->eod_ %d cursor->eor_ %d "
"cursor->eol_ %d", cursor->eod_,
cursor->eor_, cursor->eol_);
traceMessage(buffer2, cursor, __LINE__);
continue;
}
// create a new buffer
traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
buf = new (getHeap()) ExLobCursorBuffer();
buf->data_ = (char *) (getHeap())->allocateMemory( cursor->bufMaxSize_);
lobPtr->stats_.buffersUsed++;
}
size = min(cursor->bufMaxSize_, (cursor->maxBytes_ - cursor->bytesRead_ + (16 * 1024)));
if (buf->data_) {
lobPtr->readCursorDataSimple(buf->data_, size, *cursor, buf->bytesRemaining_);
buf->bytesUsed_ = 0;
traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.lock();
if (size < (cursor->bufMaxSize_)) {
cursor->eor_ = true;
seenEOR = true;
}
if (buf->bytesRemaining_) {
cursor->prefetchBufList_.push_back(buf);
traceMessage("signal condition cursor",cursor,__LINE__);
cursor->lock_.wakeOne();
traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
} else {
cursor->eod_ = true;
seenEOD = true;
traceMessage("signal condition cursor",cursor,__LINE__);
cursor->lock_.wakeOne();
traceMessage("unlocking cursor",cursor,__LINE__);
cursor->lock_.unlock();
postfetchBufListLock_.lock();
postfetchBufList_.push_back(buf);
postfetchBufListLock_.unlock();
}
} else {
assert("data_ is null");
}
// Important! Break and do not access cursor object if we have reached
// end of data or range.
// The main thread could have destroyed the cursor
// in ::closeDataCursorSimple
if (seenEOD || seenEOR)
{
char buffer2[2048];
sprintf(buffer2, "seenEOD %d seenEOR %d",
seenEOD, seenEOR);
traceMessage(buffer2, cursor, __LINE__);
break;
}
traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.lock();
} // while
if (!seenEOD && !seenEOR)
{
traceMessage("locking cursor",cursor,__LINE__);
cursor->lock_.unlock();
if (cursor->eol_) { // never reaches here ??
lobPtr->deleteCursor(cursor->name_, this);
}
}
processPreOpens();
break;
default:
request->error_ = LOB_HDFS_REQUEST_UNKNOWN;
}
return LOB_OPER_OK;
}
Ex_Lob_Error ExLobDesc::print()
{
printf("%4d %4d %4d %4d %4d %4d %8d\n",
dataSize_, dataState_, tail_, prev_, next_, nextFree_, dataOffset_);
return LOB_OPER_OK;
}
///////////////////////////////////////////////////////////////////////////////
// ExLobGlobals definitions
///////////////////////////////////////////////////////////////////////////////
ExLobGlobals::ExLobGlobals() :
lobMap_(NULL),
fs_(NULL),
isCliInitialized_(FALSE),
isHive_(FALSE),
threadTraceFile_(NULL),
heap_(NULL)
{
//initialize the log file
if (getenv("TRACE_HDFS_THREAD_ACTIONS"))
{
char logFileName[50]= "";
sprintf(logFileName,"trace_threads.%d",getpid());
threadTraceFile_ = fopen(logFileName,"a");
}
}
ExLobGlobals::~ExLobGlobals()
{
ExLobCursor::bufferList_t::iterator c_it;
ExLobCursorBuffer *buf = NULL;
preOpenListLock_.lock();
preOpenList_.clear();
preOpenListLock_.unlock();
if (lobMap_)
delete lobMap_;
for (int i=0; i<NUM_WORKER_THREADS; i++) {
enqueueShutdownRequest();
}
for (int i=0; i<NUM_WORKER_THREADS; i++) {
pthread_join(threadId_[i], NULL);
}
// Free the post fetch bugf list AFTER the worker threads have left to
// avoid slow worker thread being stuck and master deallocating these
// buffers and not consuming the buffers which could cause a lock.
postfetchBufListLock_.lock();
c_it = postfetchBufList_.begin();
while (c_it != postfetchBufList_.end()) {
buf = *c_it;
if (buf->data_) {
heap_->deallocateMemory( buf->data_);
}
c_it = postfetchBufList_.erase(c_it);
}
postfetchBufListLock_.unlock();
//msg_mon_close_process(&serverPhandle);
if (threadTraceFile_)
fclose(threadTraceFile_);
threadTraceFile_ = NULL;
}
Ex_Lob_Error ExLobGlobals::setServerPhandle()
{
int nid;
int err = msg_mon_get_my_info(&nid, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
char server[12];
sprintf(server, "%s%d", "$ZLOBSRV", nid);
int oid;
err = msg_mon_open_process(server, &serverPhandle, &oid);
if (err != XZFIL_ERR_OK)
return LOB_SERVER_OPEN_ERROR;
return LOB_OPER_OK;
}
Ex_Lob_Error ExLobGlobals::resetServerPhandle()
{
Ex_Lob_Error err;
msg_mon_close_process(&serverPhandle);
err = setServerPhandle();
return err;
}
// called once per process
Ex_Lob_Error ExLobGlobals::initialize()
{
Ex_Lob_Error err = LOB_OPER_OK;
lobMap_ = (lobMap_t *) new (getHeap())lobMap_t; // Leaving this allocated from system heap. Since this class contains hdfsFS unable to derive from LOB heap
if (lobMap_ == NULL)
return LOB_INIT_ERROR;
err = setServerPhandle();
// start the worker threads
startWorkerThreads();
return err;
}
static void *workerThreadMain(void *arg)
{
// parameter passed to the thread is an instance of the ExLobHdfs object
ExLobGlobals *glob = (ExLobGlobals *)arg;
glob->doWorkInThread();
return NULL;
}
Ex_Lob_Error ExLobGlobals::startWorkerThreads()
{
int rc;
for (int i=0; i<NUM_WORKER_THREADS; i++) {
rc = pthread_create(&threadId_[i], NULL, workerThreadMain, this);
if (rc != 0)
return LOB_HDFS_THREAD_CREATE_ERROR;
}
return LOB_OPER_OK;
}
///////////////////////////////////////////////////////////////////////////////
// ExLobRequest definitions
///////////////////////////////////////////////////////////////////////////////
ExLobRequest::ExLobRequest() :
reqNum_(0),
descNumIn_(-1),
descNumOut_(-1),
handleInLen_(-1),
handleOutLen_(-1),
dataOffset_(-1),
type_(Lob_Req_Invalid),
storage_(Lob_Invalid_Storage),
operLen_(-1),
error_(LOB_INVALID_ERROR_VAL),
cliError_(-1),
status_(LOB_INVALID_ERROR_VAL),
transId_(0)
{
TRANSID_SET_NULL(transIdBig_);
}
void ExLobRequest::setValues(char *descFileName, Int64 descNumIn, Int64 handleInLen,
char *handleIn, LobsStorage storage, Int64 transId,
SB_Transid_Type transIdBig,
SB_Transseq_Type transStartId,
char *blackBox, Int64 blackBoxLen)
{
descNumIn_ = descNumIn;
handleInLen_ = handleInLen;
storage_ = storage;
strcpy(descFileName_, descFileName);
if (handleIn != NULL && handleInLen > 0) {
memcpy(handleIn_, handleIn, handleInLen);
}
cliError_ = -1;
error_ = LOB_INVALID_ERROR_VAL;
status_ = LOB_INVALID_ERROR_VAL;
transId_ = transId;
transIdBig_ = transIdBig;
transStartId_ = transStartId;
blackBoxLen_ = blackBoxLen;
if (blackBox != NULL && blackBoxLen > 0) {
memcpy(blackBox_, blackBox, blackBoxLen);
}
}
void ExLobRequest::getValues(Int64 &descNumOut, Int64 &handleOutLen,
char *handleOut, Ex_Lob_Error &requestStatus,
Int64 &cliError,
char *blackBox, Int64 &blackBoxLen)
{
descNumOut = descNumOut_;
handleOutLen = handleOutLen_;
requestStatus = error_;
cliError = cliError_;
if (handleOut != NULL && handleOutLen_ > 0) {
memcpy(handleOut, handleOut_, handleOutLen_);
}
blackBoxLen = blackBoxLen_;
if (blackBox != NULL && blackBoxLen_ > 0) {
memcpy(blackBox, blackBox_, blackBoxLen_);
}
// #endif
}
ExLobRequest::~ExLobRequest()
{
}
Ex_Lob_Error ExLobRequest::send()
{
int msgid;
int oid;
MS_Result_Type result;
short req_ctrl[BUFSIZ];
short rep_ctrl[BUFSIZ];
char *req_data = (char *)this;
ExLobRequest rep_data;
short req_data_len = sizeof(ExLobRequest);
short rep_data_max = sizeof(ExLobRequest);
int err=0;
int inx=0;
int retries = 3;
incrReqNum();
status_ = LOB_OPER_REQ_IN_PROGRESS;
do
{
err = BMSG_LINK_(&serverPhandle,
&msgid,
req_ctrl,
(ushort) (inx &1),
rep_ctrl,
1,
req_data,
req_data_len,
(char *)&rep_data,
rep_data_max,
0,0,0,0);
retries--;
err = BMSG_BREAK_(msgid, (short *) &result, &serverPhandle);
if (err == XZFIL_ERR_PATHDOWN) {
//lobGlobals->resetServerPhandle();
}
} while ( (err == XZFIL_ERR_PATHDOWN) && (retries > 0) ); // 201 if lobserver got restared
status_ = LOB_OPER_REQ_DONE;
if (err != XZFIL_ERR_OK)
return LOB_SEND_MSG_ERROR;
memcpy(this, &rep_data, rep_data_max);
return LOB_OPER_OK;
}
void ExLobRequest::getDescOut(ExLobDesc &desc)
{
memcpy(&desc, &desc_, sizeof(ExLobDesc));
}
void ExLobRequest::putDescIn(ExLobDesc &desc)
{
memcpy(&desc_, &desc, sizeof(ExLobDesc));
}
///////////////////////////////////////////////////////////////////////////////
// ExLobHdfs definitions
///////////////////////////////////////////////////////////////////////////////
#ifdef SQ_USE_HDFS
ExLobLock::ExLobLock()
: bellRang_(false),
waiters_(0)
{
pthread_mutexattr_t mutexAttr;
pthread_mutexattr_init( &mutexAttr );
pthread_mutex_init( &mutex_, &mutexAttr );
pthread_cond_init( &workBell_, NULL );
}
ExLobLock::~ExLobLock()
{
pthread_mutex_unlock( &mutex_ );
pthread_mutex_destroy(&mutex_);
pthread_cond_destroy(&workBell_);
}
void ExLobLock::lock()
{
pthread_mutex_lock( &mutex_ );
}
void ExLobLock::unlock()
{
pthread_mutex_unlock( &mutex_ );
}
void ExLobLock::wakeOne()
{
pthread_cond_signal(&workBell_);
}
void ExLobLock::wakeAll()
{
pthread_cond_broadcast(&workBell_);
}
void ExLobLock::wait()
{
waiters_++;
pthread_cond_wait(&workBell_, &mutex_);
waiters_--;
}
ExLobHdfsRequest::ExLobHdfsRequest(LobsHdfsRequestType reqType, hdfsFS fs,
hdfsFile file, char *buffer, int size) :
reqType_(reqType),
fs_(fs),
file_(file),
buffer_(buffer),
size_(size)
{
lobPtr_ = 0;
error_ = LOB_OPER_OK;
}
ExLobHdfsRequest::ExLobHdfsRequest(LobsHdfsRequestType reqType, ExLobCursor *cursor) :
reqType_(reqType),
cursor_(cursor)
{
buffer_=0;
lobPtr_=0;
fs_=0;
file_=0;
size_=0;
error_=LOB_OPER_OK;
}
ExLobHdfsRequest::ExLobHdfsRequest(LobsHdfsRequestType reqType, ExLob *lobPtr, ExLobCursor *cursor) :
reqType_(reqType),
lobPtr_(lobPtr),
cursor_(cursor)
{
buffer_=0;
fs_=0;
file_=0;
size_=0;
error_=LOB_OPER_OK;
}
ExLobHdfsRequest::ExLobHdfsRequest(LobsHdfsRequestType reqType) :
reqType_(reqType)
{
buffer_=0;
cursor_=0;
lobPtr_=0;
fs_=0;
file_=0;
size_=0;
error_=LOB_OPER_OK;
}
ExLobHdfsRequest::~ExLobHdfsRequest()
{
}
Ex_Lob_Error ExLobGlobals::enqueueRequest(ExLobHdfsRequest *request)
{
char buffer2[2048];
sprintf(buffer2, "enqueue request %d", request->reqType_);
traceMessage(buffer2, NULL, __LINE__);
reqQueueLock_.lock();
reqQueue_.push_back(request);
reqQueueLock_.wakeOne();
reqQueueLock_.unlock();
return LOB_OPER_OK;
}
Ex_Lob_Error ExLobGlobals::enqueuePrefetchRequest(ExLob *lobPtr, ExLobCursor *cursor)
{// Leaving this allocated from system heap. Since this class contains hdfsFS unable to derive from LOB heap
ExLobHdfsRequest *request = new ExLobHdfsRequest(Lob_Hdfs_Cursor_Prefetch, lobPtr, cursor);
if (!request) {
// return error
}
enqueueRequest(request);
return LOB_OPER_OK;
}
Ex_Lob_Error ExLobGlobals::enqueueShutdownRequest()
{
// Leaving this allocated from system heap. Since this class contains hdfsFS unable to derive from LOB heap
ExLobHdfsRequest *request = new ExLobHdfsRequest(Lob_Hdfs_Shutdown);
if (!request) {
// return error
}
enqueueRequest(request);
return LOB_OPER_OK;
}
ExLobHdfsRequest* ExLobGlobals::getHdfsRequest()
{
ExLobHdfsRequest *request;
reqList_t::iterator it;
reqQueueLock_.lock();
it = reqQueue_.begin();
request = NULL;
while(request == NULL)
{
if (it != reqQueue_.end())
{
request = *it;
it = reqQueue_.erase(it);
} else {
reqQueueLock_.wait();
it = reqQueue_.begin();
}
}
reqQueueLock_.unlock();
char buffer2[2048];
sprintf(buffer2, "got request %d", request->reqType_);
traceMessage(buffer2, NULL, __LINE__);
return request;
}
void ExLobGlobals::doWorkInThread()
{
ExLobHdfsRequest *request;
// mask all signals
struct sigaction act;
sigemptyset(&act.sa_mask);
sigset_t mask;
sigfillset(&mask);
int rc = pthread_sigmask(SIG_SETMASK, &mask, NULL);
if (rc != 0) {
return;
}
// enter processing zone
for (;;)
{
request = getHdfsRequest(); // will wait until new req arrives
if (request->isShutDown()) {
//we are asked to shutdown
//wake up next worker before going away
reqQueueLock_.lock();
reqQueueLock_.wakeOne();
reqQueueLock_.unlock();
break;
}
else {
performRequest(request);
delete request;
}
}
pthread_exit(0);
}
Ex_Lob_Error ExLobGlobals::addToPreOpenList(ExLobPreOpen *preOpenObj)
{
preOpenListLock_.lock();
preOpenList_.push_back(preOpenObj);
preOpenListLock_.unlock();
return LOB_OPER_OK;
}
Ex_Lob_Error ExLobGlobals::processPreOpens()
{
ExLobPreOpen *preOpenObj = NULL;
preOpenList_t::iterator p_it;
preOpenListLock_.lock();
if (!preOpenList_.empty())
{
p_it = preOpenList_.begin();
preOpenObj = *p_it;
preOpenList_.erase(p_it);
}
preOpenListLock_.unlock();
if (preOpenObj != NULL)
{
ExLob *lobPtr = preOpenObj->lobPtr_;
lobPtr->openDataCursor(preOpenObj->cursorName_, Lob_Cursor_Simple, preOpenObj->range_,
preOpenObj->bufMaxSize_, preOpenObj->maxBytes_,
preOpenObj->waited_, this);
}
return LOB_OPER_OK;
}
#endif
Ex_Lob_Error ExLobsOper (
char *lobName, // lob name
char *handleIn, // input handle (for cli calls)
Int64 handleInLen, // input handle len
char *hdfsServer, // server where hdfs fs resides
Int64 hdfsPort, // port number to access hdfs server
char *handleOut, // output handle (for cli calls)
Int64 &handleOutLen, // output handle len
Int64 descNumIn, // input desc Num (for flat files only)
Int64 &descNumOut, // output desc Num (for flat files only)
Int64 &retOperLen, // length of data involved in this operation
Int64 requestTagIn, // only for checking status
Int64 &requestTagOut, // returned with every request other than check status
Ex_Lob_Error &requestStatus, // returned req status
Int64 &cliError, // err returned by cli call
char *dir, // directory in the storage
LobsStorage storage, // storage type
char *source, // source (memory addr, filename, foreign lob etc)
Int64 sourceLen, // source len (memory len, foreign desc offset etc)
Int64 cursorBytes,
char *cursorId,
LobsOper operation, // LOB operation
LobsSubOper subOperation, // LOB sub operation
Int64 waited, // waited or nowaited
void *&globPtr, // ptr to the Lob objects.
Int64 transId,
void *blackBox, // black box to be sent to cli
Int64 blackBoxLen, // length of black box
Int64 lobMaxSize,
Int64 lobMaxChunkMemSize,
int bufferSize ,
short replication ,
int blockSize,
Lng32 openType)
{
Ex_Lob_Error err = LOB_OPER_OK;
ExLob *lobPtr = NULL;
char fn[MAX_LOB_FILE_NAME_LEN];
struct timespec startTime;
struct timespec endTime;
Int64 secs, nsecs, totalnsecs;
ExLobPreOpen *preOpenObj;
ExLobGlobals *lobGlobals = NULL;
retOperLen = 0;
ExLobDesc desc;
lobMap_t *lobMap = NULL;
lobMap_it it;
clock_gettime(CLOCK_MONOTONIC, &startTime);
char *fileName = lobName;
if (globPtr == NULL)
{
if (operation == Lob_Init)
{
globPtr = (void *) new ExLobGlobals();
if (globPtr == NULL)
return LOB_INIT_ERROR;
lobGlobals = (ExLobGlobals *)globPtr;
err = lobGlobals->initialize();
return err;
}
else
{
return LOB_GLOB_PTR_ERROR;
}
}
else
{
lobGlobals = (ExLobGlobals *)globPtr;
lobMap = lobGlobals->getLobMap();
it = lobMap->find(string(fileName));
if (it == lobMap->end())
{
//lobPtr = new (lobGlobals->getHeap())ExLob();
lobPtr = new ExLob();
if (lobPtr == NULL)
return LOB_ALLOC_ERROR;
err = lobPtr->initialize(fileName, (operation == Lob_Create) ? EX_LOB_CREATE : EX_LOB_RW, dir, storage, hdfsServer, hdfsPort, bufferSize, replication, blockSize,lobMaxSize,lobGlobals);
if (err != LOB_OPER_OK)
return err;
lobMap->insert(pair<string, ExLob*>(string(fileName), lobPtr));
}
else
{
lobPtr = it->second;
#ifndef SQ_USE_HDFS
err = lobPtr->doSanityChecks(dir, storage, handleInLen, handleOutLen, blackBoxLen);
if (err != LOB_OPER_OK)
return err;
#endif
}
}
MS_Mon_Transid_Type transIdBig;
MS_Mon_Transseq_Type transStartId;
if (!lobGlobals->isHive())
{
// get current transaction
int transIdErr = ms_transid_get(false, false, &transIdBig, &transStartId);
// set the pass thru request object values in the lob
lobPtr->getRequest()->setValues(lobPtr->getDescFileName(),
descNumIn, handleInLen, handleIn, storage,
transId, transIdBig, transStartId,
(char *)blackBox, blackBoxLen);
}
switch(operation)
{
case Lob_Create:
break;
case Lob_InsertDesc:
err = lobPtr->writeDesc(sourceLen, source, subOperation, descNumOut, retOperLen, lobMaxSize);
break;
case Lob_InsertData:
err = lobPtr->insertData(source, sourceLen, subOperation, descNumIn, retOperLen, lobMaxSize,lobMaxChunkMemSize);
break;
case Lob_InsertDataSimple:
err = lobPtr->writeDataSimple(source, sourceLen, subOperation, retOperLen,
bufferSize , replication , blockSize);
break;
case Lob_Read:
if (subOperation == Lob_Memory)
err = lobPtr->readToMem(source,sourceLen,retOperLen);
else if (subOperation == Lob_File)
err = lobPtr->readToFile(source, sourceLen, retOperLen, lobMaxChunkMemSize, openType);
else
err = LOB_SUBOPER_ERROR;
break;
case Lob_ReadDesc: // read desc only. Needed for pass thru.
err = lobPtr->getDesc(desc);
retOperLen = 0;
break;
/*** ssss
case Lob_ReadData: // read data only. Lob data file is already opened.
err = lobPtr->readDataFromFile(source, sourceLen, retOperLen);
break;
case Lob_ReadDataSimple:
err = lobPtr->readDataFromFile(source, sourceLen, retOperLen);
break;
***/
case Lob_OpenCursor:
err = lobPtr->openCursor(handleIn, handleInLen);
break;
case Lob_OpenDataCursorSimple:
if (openType == 1) { // preopen
sprintf(fn,"%s:%Lx:%s",lobPtr->getDataFileName(), (long long unsigned int)lobName, cursorId);
preOpenObj = new (lobGlobals->getHeap()) ExLobPreOpen(lobPtr, fn, descNumIn, sourceLen, cursorBytes, waited);
lobGlobals->addToPreOpenList(preOpenObj);
} else if (openType == 2) { // must open
sprintf(fn,"%s:%Lx:%s",lobPtr->getDataFileName(), (long long unsigned int)lobName, cursorId);
fileName = fn;
err = lobPtr->openDataCursor(fileName, Lob_Cursor_Simple, descNumIn, sourceLen, cursorBytes, waited, lobGlobals);
} else
err = LOB_SUBOPER_ERROR;
break;
case Lob_ReadCursor:
if (subOperation == Lob_Memory)
err = lobPtr->readCursor(source, sourceLen, handleIn, handleInLen, retOperLen);
else if (subOperation == Lob_File)
err = lobPtr->readCursor(source, -1, handleIn, handleInLen, retOperLen);
else
err = LOB_SUBOPER_ERROR;
break;
case Lob_ReadDataCursorSimple:
sprintf(fn,"%s:%Lx:%s",lobPtr->getDataFileName(), (long long unsigned int)lobName, cursorId);
fileName = fn;
err = lobPtr->readDataCursorSimple(fileName, source, sourceLen, retOperLen, lobGlobals);
break;
case Lob_CloseFile:
if (lobPtr->hasNoOpenCursors()) {
lobGlobals->traceMessage("Lob_CloseFile",NULL,__LINE__);
err = lobPtr->closeFile();
it = lobMap->find(string(lobName));
lobMap->erase(it);
delete lobPtr;
lobPtr = NULL;
}
break;
case Lob_CloseCursor:
err = lobPtr->closeCursor(handleIn, handleInLen);
break;
case Lob_CloseDataCursorSimple:
sprintf(fn,"%s:%Lx:%s",lobPtr->getDataFileName(), (long long unsigned int)lobName, cursorId);
fileName = fn;
err = lobPtr->closeDataCursorSimple(fileName, lobGlobals);
break;
case Lob_Append:
if (subOperation == Lob_Memory)
err = lobPtr->append(source, sourceLen, subOperation, descNumIn, retOperLen,lobMaxSize, lobMaxChunkMemSize);
else if (subOperation == Lob_File)
err = lobPtr->append(source, -1, subOperation, descNumIn, retOperLen,lobMaxSize, lobMaxChunkMemSize);
else
err = LOB_SUBOPER_ERROR;
break;
case Lob_Update:
if (subOperation == Lob_Memory)
err = lobPtr->update(source, sourceLen, subOperation, descNumIn, retOperLen, lobMaxSize, lobMaxChunkMemSize);
else if (subOperation == Lob_File)
err = lobPtr->update(source, -1, subOperation,descNumIn, retOperLen,lobMaxSize, lobMaxChunkMemSize);
else
err = LOB_SUBOPER_ERROR;
break;
case Lob_Delete:
err = lobPtr->delDesc();
break;
case Lob_Drop:
err = lobPtr->purgeLob();
it = lobMap->find(string(lobName));
lobMap->erase(it);
delete lobPtr;
lobPtr = NULL;
break;
case Lob_Purge:
err = lobPtr->purgeLob();
it = lobMap->find(string(lobName));
lobMap->erase(it);
delete lobPtr;
lobPtr = NULL;
break;
case Lob_Print:
err = lobPtr->print();
break;
case Lob_Stats:
err = lobPtr->readStats(source);
lobPtr->initStats(); // because file may remain open across cursors
break;
case Lob_Empty_Directory:
lobPtr->initialize(fileName, EX_LOB_RW,
dir, storage, hdfsServer, hdfsPort, bufferSize, replication, blockSize);
err = lobPtr->emptyDirectory();
break;
case Lob_Cleanup:
delete lobGlobals;
break;
default:
err = LOB_OPER_ERROR;
break;
}
if (!lobGlobals->isHive() )
{
if (lobPtr)
// set the pass thru request object values from the lob
lobPtr->getRequest()->getValues(descNumOut, handleOutLen, handleOut,
requestStatus, cliError,
(char *)blackBox, blackBoxLen); // reinstate the transaction
if (TRANSID_IS_VALID(transIdBig)) {
ms_transid_reinstate(transIdBig, transStartId);
}
}
clock_gettime(CLOCK_MONOTONIC, &endTime);
secs = endTime.tv_sec - startTime.tv_sec;
nsecs = endTime.tv_nsec - startTime.tv_nsec;
if (nsecs < 0) {
secs--;
nsecs += NUM_NSECS_IN_SEC;
}
totalnsecs = (secs * NUM_NSECS_IN_SEC) + nsecs;
if (lobPtr && lobPtr->getStats())
lobPtr->getStats()->hdfsAccessLayerTime += totalnsecs;
return err;
}
void cleanupLOBDataDescFiles(const char *lobHdfsServer,int lobHdfsPort,const char *lobHdfsLoc)
{
int numExistingFiles=0;
hdfsFS fs;
fs = hdfsConnect(lobHdfsServer, lobHdfsPort);
if (fs == NULL)
return;
// Get this list of all data and desc files in the lob sotrage location
hdfsFileInfo *fileInfos = hdfsListDirectory(fs, lobHdfsLoc, &numExistingFiles);
if (fileInfos == NULL)
return ;
//Delete each one in a loop
for (int i = 0; i < numExistingFiles; i++)
hdfsDelete(fs, fileInfos[i].mName, 0);
// *Note* : delete the memory allocated by libhdfs for the file info array
if (fileInfos)
{
hdfsFreeFileInfo(fileInfos, numExistingFiles);
}
}
| 1 | 8,358 | Declaring fileflags to be of type LobTgtFileFlags would eliminate the need for all this casting. And would be safer. (Not a show-stopper though.) | apache-trafodion | cpp |
@@ -124,7 +124,7 @@ public class DockerOptions {
for (int i = 0; i < maxContainerCount; i++) {
node.add(caps, new DockerSessionFactory(clientFactory, docker, image, caps));
}
- LOG.info(String.format(
+ LOG.finest(String.format(
"Mapping %s to docker image %s %d times",
caps,
name, | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.docker;
import static java.util.logging.Level.WARNING;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.docker.Docker;
import org.openqa.selenium.docker.DockerException;
import org.openqa.selenium.docker.Image;
import org.openqa.selenium.docker.ImageNamePredicate;
import org.openqa.selenium.grid.config.Config;
import org.openqa.selenium.grid.config.ConfigException;
import org.openqa.selenium.grid.node.local.LocalNode;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.logging.Logger;
public class DockerOptions {
private static final Logger LOG = Logger.getLogger(DockerOptions.class.getName());
private static final Json JSON = new Json();
private final Config config;
public DockerOptions(Config config) {
this.config = Objects.requireNonNull(config);
}
private URL getDockerUrl() {
try {
String raw = config.get("docker", "url")
.orElseThrow(() -> new ConfigException("No docker url configured"));
return new URL(raw);
} catch (MalformedURLException e) {
throw new UncheckedIOException(e);
}
}
private boolean isEnabled(HttpClient.Factory clientFactory) {
if (!config.getAll("docker", "configs").isPresent()) {
return false;
}
// Is the daemon up and running?
URL url = getDockerUrl();
HttpClient client = clientFactory.createClient(url);
try {
HttpResponse response = client.execute(new HttpRequest(GET, "/_ping"));
if (response.getStatus() != 200) {
LOG.warning(String.format("Docker config enabled, but daemon unreachable: %s", url));
return false;
}
return true;
} catch (IOException e) {
LOG.log(WARNING, "Unable to ping docker daemon. Docker disabled: " + e.getMessage());
return false;
}
}
public void configure(HttpClient.Factory clientFactory, LocalNode.Builder node)
throws IOException {
if (!isEnabled(clientFactory)) {
return;
}
List<String> allConfigs = config.getAll("docker", "configs")
.orElseThrow(() -> new DockerException("Unable to find docker configs"));
Multimap<String, Capabilities> kinds = HashMultimap.create();
for (int i = 0; i < allConfigs.size(); i++) {
String imageName = allConfigs.get(i);
i++;
if (i == allConfigs.size()) {
throw new DockerException("Unable to find JSON config");
}
Capabilities stereotype = JSON.toType(allConfigs.get(i), Capabilities.class);
kinds.put(imageName, stereotype);
}
HttpClient client = clientFactory.createClient(new URL("http://localhost:2375"));
Docker docker = new Docker(client);
loadImages(docker, kinds.keySet().toArray(new String[0]));
int maxContainerCount = Runtime.getRuntime().availableProcessors();
kinds.forEach((name, caps) -> {
Image image = docker.findImage(new ImageNamePredicate(name))
.orElseThrow(() -> new DockerException(
String.format("Cannot find image matching: %s", name)));
for (int i = 0; i < maxContainerCount; i++) {
node.add(caps, new DockerSessionFactory(clientFactory, docker, image, caps));
}
LOG.info(String.format(
"Mapping %s to docker image %s %d times",
caps,
name,
maxContainerCount));
});
}
private void loadImages(Docker docker, String... imageNames) {
CompletableFuture<Void> cd = CompletableFuture.allOf(
Arrays.stream(imageNames)
.map(entry -> {
int index = entry.lastIndexOf(':');
if (index == -1) {
throw new RuntimeException("Unable to determine tag from " + entry);
}
String name = entry.substring(0, index);
String version = entry.substring(index + 1);
return CompletableFuture.supplyAsync(() -> docker.pull(name, version));
}).toArray(CompletableFuture[]::new));
try {
cd.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
Throwable cause = e.getCause() != null ? e.getCause() : e;
if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
}
throw new RuntimeException(cause);
}
}
}
| 1 | 16,469 | This change prevents a user understanding how their server is configured. Best to leave at `info` level. | SeleniumHQ-selenium | js |
@@ -471,8 +471,10 @@ void TSSLSocket::checkHandshake() {
}
} while (rc == 2);
} else {
- // set the SNI hostname
- SSL_set_tlsext_host_name(ssl_, getHost().c_str());
+ /* OpenSSL < 0.9.8f does not have SSL_set_tlsext_host_name() */
+ #if defined(SSL_set_tlsext_host_name) // set the SNI hostname
+ SSL_set_tlsext_host_name(ssl_, getHost().c_str());
+ #endif
do {
rc = SSL_connect(ssl_);
if (rc <= 0) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/thrift-config.h>
#include <errno.h>
#include <string>
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <boost/lexical_cast.hpp>
#include <boost/shared_array.hpp>
#include <openssl/err.h>
#include <openssl/rand.h>
#include <openssl/ssl.h>
#include <openssl/x509v3.h>
#include <thrift/concurrency/Mutex.h>
#include <thrift/transport/TSSLSocket.h>
#include <thrift/transport/PlatformSocket.h>
#define OPENSSL_VERSION_NO_THREAD_ID 0x10000000L
using namespace std;
using namespace apache::thrift::concurrency;
struct CRYPTO_dynlock_value {
Mutex mutex;
};
namespace apache {
namespace thrift {
namespace transport {
// OpenSSL initialization/cleanup
static bool openSSLInitialized = false;
static boost::shared_array<Mutex> mutexes;
static void callbackLocking(int mode, int n, const char*, int) {
if (mode & CRYPTO_LOCK) {
mutexes[n].lock();
} else {
mutexes[n].unlock();
}
}
#if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_NO_THREAD_ID)
static unsigned long callbackThreadID() {
#ifdef _WIN32
return (unsigned long)GetCurrentThreadId();
#else
return (unsigned long)pthread_self();
#endif
}
#endif
static CRYPTO_dynlock_value* dyn_create(const char*, int) {
return new CRYPTO_dynlock_value;
}
static void dyn_lock(int mode, struct CRYPTO_dynlock_value* lock, const char*, int) {
if (lock != NULL) {
if (mode & CRYPTO_LOCK) {
lock->mutex.lock();
} else {
lock->mutex.unlock();
}
}
}
static void dyn_destroy(struct CRYPTO_dynlock_value* lock, const char*, int) {
delete lock;
}
void initializeOpenSSL() {
if (openSSLInitialized) {
return;
}
openSSLInitialized = true;
SSL_library_init();
SSL_load_error_strings();
// static locking
mutexes = boost::shared_array<Mutex>(new Mutex[ ::CRYPTO_num_locks()]);
if (mutexes == NULL) {
throw TTransportException(TTransportException::INTERNAL_ERROR,
"initializeOpenSSL() failed, "
"out of memory while creating mutex array");
}
#if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_NO_THREAD_ID)
CRYPTO_set_id_callback(callbackThreadID);
#endif
CRYPTO_set_locking_callback(callbackLocking);
// dynamic locking
CRYPTO_set_dynlock_create_callback(dyn_create);
CRYPTO_set_dynlock_lock_callback(dyn_lock);
CRYPTO_set_dynlock_destroy_callback(dyn_destroy);
}
void cleanupOpenSSL() {
if (!openSSLInitialized) {
return;
}
openSSLInitialized = false;
#if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_NO_THREAD_ID)
CRYPTO_set_id_callback(NULL);
#endif
CRYPTO_set_locking_callback(NULL);
CRYPTO_set_dynlock_create_callback(NULL);
CRYPTO_set_dynlock_lock_callback(NULL);
CRYPTO_set_dynlock_destroy_callback(NULL);
ERR_free_strings();
EVP_cleanup();
CRYPTO_cleanup_all_ex_data();
ERR_remove_state(0);
mutexes.reset();
}
static void buildErrors(string& message, int error = 0);
static bool matchName(const char* host, const char* pattern, int size);
static char uppercase(char c);
// SSLContext implementation
SSLContext::SSLContext(const SSLProtocol& protocol) {
if (protocol == SSLTLS) {
ctx_ = SSL_CTX_new(SSLv23_method());
#ifndef OPENSSL_NO_SSL3
} else if (protocol == SSLv3) {
ctx_ = SSL_CTX_new(SSLv3_method());
#endif
} else if (protocol == TLSv1_0) {
ctx_ = SSL_CTX_new(TLSv1_method());
} else if (protocol == TLSv1_1) {
ctx_ = SSL_CTX_new(TLSv1_1_method());
} else if (protocol == TLSv1_2) {
ctx_ = SSL_CTX_new(TLSv1_2_method());
} else {
/// UNKNOWN PROTOCOL!
throw TSSLException("SSL_CTX_new: Unknown protocol");
}
if (ctx_ == NULL) {
string errors;
buildErrors(errors);
throw TSSLException("SSL_CTX_new: " + errors);
}
SSL_CTX_set_mode(ctx_, SSL_MODE_AUTO_RETRY);
// Disable horribly insecure SSLv2 and SSLv3 protocols but allow a handshake
// with older clients so they get a graceful denial.
if (protocol == SSLTLS) {
SSL_CTX_set_options(ctx_, SSL_OP_NO_SSLv2);
SSL_CTX_set_options(ctx_, SSL_OP_NO_SSLv3); // THRIFT-3164
}
}
SSLContext::~SSLContext() {
if (ctx_ != NULL) {
SSL_CTX_free(ctx_);
ctx_ = NULL;
}
}
SSL* SSLContext::createSSL() {
SSL* ssl = SSL_new(ctx_);
if (ssl == NULL) {
string errors;
buildErrors(errors);
throw TSSLException("SSL_new: " + errors);
}
return ssl;
}
// TSSLSocket implementation
TSSLSocket::TSSLSocket(boost::shared_ptr<SSLContext> ctx)
: TSocket(), server_(false), ssl_(NULL), ctx_(ctx) {
}
TSSLSocket::TSSLSocket(boost::shared_ptr<SSLContext> ctx, boost::shared_ptr<THRIFT_SOCKET> interruptListener)
: TSocket(), server_(false), ssl_(NULL), ctx_(ctx) {
interruptListener_ = interruptListener;
}
TSSLSocket::TSSLSocket(boost::shared_ptr<SSLContext> ctx, THRIFT_SOCKET socket)
: TSocket(socket), server_(false), ssl_(NULL), ctx_(ctx) {
}
TSSLSocket::TSSLSocket(boost::shared_ptr<SSLContext> ctx, THRIFT_SOCKET socket, boost::shared_ptr<THRIFT_SOCKET> interruptListener)
: TSocket(socket, interruptListener), server_(false), ssl_(NULL), ctx_(ctx) {
}
TSSLSocket::TSSLSocket(boost::shared_ptr<SSLContext> ctx, string host, int port)
: TSocket(host, port), server_(false), ssl_(NULL), ctx_(ctx) {
}
TSSLSocket::TSSLSocket(boost::shared_ptr<SSLContext> ctx, string host, int port, boost::shared_ptr<THRIFT_SOCKET> interruptListener)
: TSocket(host, port), server_(false), ssl_(NULL), ctx_(ctx) {
interruptListener_ = interruptListener;
}
TSSLSocket::~TSSLSocket() {
close();
}
bool TSSLSocket::isOpen() {
if (ssl_ == NULL || !TSocket::isOpen()) {
return false;
}
int shutdown = SSL_get_shutdown(ssl_);
// "!!" is squelching C4800 "forcing bool -> true or false" performance warning
bool shutdownReceived = !!(shutdown & SSL_RECEIVED_SHUTDOWN);
bool shutdownSent = !!(shutdown & SSL_SENT_SHUTDOWN);
if (shutdownReceived && shutdownSent) {
return false;
}
return true;
}
bool TSSLSocket::peek() {
if (!isOpen()) {
return false;
}
checkHandshake();
int rc;
uint8_t byte;
do {
rc = SSL_peek(ssl_, &byte, 1);
if (rc < 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
waitForEvent(error == SSL_ERROR_WANT_READ);
continue;
default:;// do nothing
}
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_peek: " + errors);
} else if (rc == 0) {
ERR_clear_error();
break;
}
} while (true);
return (rc > 0);
}
void TSSLSocket::open() {
if (isOpen() || server()) {
throw TTransportException(TTransportException::BAD_ARGS);
}
TSocket::open();
}
void TSSLSocket::close() {
if (ssl_ != NULL) {
int rc;
do {
rc = SSL_shutdown(ssl_);
if (rc <= 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
waitForEvent(error == SSL_ERROR_WANT_READ);
rc = 2;
default:;// do nothing
}
}
} while (rc == 2);
if (rc < 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
GlobalOutput(("SSL_shutdown: " + errors).c_str());
}
SSL_free(ssl_);
ssl_ = NULL;
ERR_remove_state(0);
}
TSocket::close();
}
uint32_t TSSLSocket::read(uint8_t* buf, uint32_t len) {
checkHandshake();
int32_t bytes = 0;
for (int32_t retries = 0; retries < maxRecvRetries_; retries++) {
ERR_clear_error();
bytes = SSL_read(ssl_, buf, len);
if (bytes >= 0)
break;
int32_t errno_copy = THRIFT_GET_SOCKET_ERROR;
int32_t error = SSL_get_error(ssl_, bytes);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
if (retries++ >= maxRecvRetries_) {
// THRIFT_EINTR needs to be handled manually and we can tolerate
// a certain number
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
if (waitForEvent(error == SSL_ERROR_WANT_READ) == TSSL_EINTR ) {
// repeat operation
if (retries++ < maxRecvRetries_) {
// THRIFT_EINTR needs to be handled manually and we can tolerate
// a certain number
continue;
}
throw TTransportException(TTransportException::INTERNAL_ERROR, "too much recv retries");
}
continue;
default:;// do nothing
}
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_read: " + errors);
}
return bytes;
}
void TSSLSocket::write(const uint8_t* buf, uint32_t len) {
checkHandshake();
// loop in case SSL_MODE_ENABLE_PARTIAL_WRITE is set in SSL_CTX.
uint32_t written = 0;
while (written < len) {
ERR_clear_error();
int32_t bytes = SSL_write(ssl_, &buf[written], len - written);
if (bytes <= 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, bytes);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
waitForEvent(error == SSL_ERROR_WANT_READ);
continue;
default:;// do nothing
}
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_write: " + errors);
}
written += bytes;
}
}
void TSSLSocket::flush() {
// Don't throw exception if not open. Thrift servers close socket twice.
if (ssl_ == NULL) {
return;
}
checkHandshake();
BIO* bio = SSL_get_wbio(ssl_);
if (bio == NULL) {
throw TSSLException("SSL_get_wbio returns NULL");
}
if (BIO_flush(bio) != 1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("BIO_flush: " + errors);
}
}
void TSSLSocket::checkHandshake() {
if (!TSocket::isOpen()) {
throw TTransportException(TTransportException::NOT_OPEN);
}
if (ssl_ != NULL) {
return;
}
// set underlying socket to non-blocking
int flags;
if ((flags = THRIFT_FCNTL(socket_, THRIFT_F_GETFL, 0)) < 0
|| THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK) < 0) {
GlobalOutput.perror("thriftServerEventHandler: set THRIFT_O_NONBLOCK (THRIFT_FCNTL) ",
THRIFT_GET_SOCKET_ERROR);
::THRIFT_CLOSESOCKET(socket_);
return;
}
ssl_ = ctx_->createSSL();
//set read and write bios to non-blocking
BIO* wbio = BIO_new(BIO_s_mem());
if (wbio == NULL) {
throw TSSLException("SSL_get_wbio returns NULL");
}
BIO_set_nbio(wbio, 1);
BIO* rbio = BIO_new(BIO_s_mem());
if (rbio == NULL) {
throw TSSLException("SSL_get_rbio returns NULL");
}
BIO_set_nbio(rbio, 1);
SSL_set_bio(ssl_, rbio, wbio);
SSL_set_fd(ssl_, static_cast<int>(socket_));
int rc;
if (server()) {
do {
rc = SSL_accept(ssl_);
if (rc <= 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
waitForEvent(error == SSL_ERROR_WANT_READ);
rc = 2;
default:;// do nothing
}
}
} while (rc == 2);
} else {
// set the SNI hostname
SSL_set_tlsext_host_name(ssl_, getHost().c_str());
do {
rc = SSL_connect(ssl_);
if (rc <= 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
waitForEvent(error == SSL_ERROR_WANT_READ);
rc = 2;
default:;// do nothing
}
}
} while (rc == 2);
}
if (rc <= 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string fname(server() ? "SSL_accept" : "SSL_connect");
string errors;
buildErrors(errors, errno_copy);
throw TSSLException(fname + ": " + errors);
}
authorize();
}
void TSSLSocket::authorize() {
int rc = SSL_get_verify_result(ssl_);
if (rc != X509_V_OK) { // verify authentication result
throw TSSLException(string("SSL_get_verify_result(), ") + X509_verify_cert_error_string(rc));
}
X509* cert = SSL_get_peer_certificate(ssl_);
if (cert == NULL) {
// certificate is not present
if (SSL_get_verify_mode(ssl_) & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) {
throw TSSLException("authorize: required certificate not present");
}
// certificate was optional: didn't intend to authorize remote
if (server() && access_ != NULL) {
throw TSSLException("authorize: certificate required for authorization");
}
return;
}
// certificate is present
if (access_ == NULL) {
X509_free(cert);
return;
}
// both certificate and access manager are present
string host;
sockaddr_storage sa;
socklen_t saLength = sizeof(sa);
if (getpeername(socket_, (sockaddr*)&sa, &saLength) != 0) {
sa.ss_family = AF_UNSPEC;
}
AccessManager::Decision decision = access_->verify(sa);
if (decision != AccessManager::SKIP) {
X509_free(cert);
if (decision != AccessManager::ALLOW) {
throw TSSLException("authorize: access denied based on remote IP");
}
return;
}
// extract subjectAlternativeName
STACK_OF(GENERAL_NAME)* alternatives
= (STACK_OF(GENERAL_NAME)*)X509_get_ext_d2i(cert, NID_subject_alt_name, NULL, NULL);
if (alternatives != NULL) {
const int count = sk_GENERAL_NAME_num(alternatives);
for (int i = 0; decision == AccessManager::SKIP && i < count; i++) {
const GENERAL_NAME* name = sk_GENERAL_NAME_value(alternatives, i);
if (name == NULL) {
continue;
}
char* data = (char*)ASN1_STRING_data(name->d.ia5);
int length = ASN1_STRING_length(name->d.ia5);
switch (name->type) {
case GEN_DNS:
if (host.empty()) {
host = (server() ? getPeerHost() : getHost());
}
decision = access_->verify(host, data, length);
break;
case GEN_IPADD:
decision = access_->verify(sa, data, length);
break;
}
}
sk_GENERAL_NAME_pop_free(alternatives, GENERAL_NAME_free);
}
if (decision != AccessManager::SKIP) {
X509_free(cert);
if (decision != AccessManager::ALLOW) {
throw TSSLException("authorize: access denied");
}
return;
}
// extract commonName
X509_NAME* name = X509_get_subject_name(cert);
if (name != NULL) {
X509_NAME_ENTRY* entry;
unsigned char* utf8;
int last = -1;
while (decision == AccessManager::SKIP) {
last = X509_NAME_get_index_by_NID(name, NID_commonName, last);
if (last == -1)
break;
entry = X509_NAME_get_entry(name, last);
if (entry == NULL)
continue;
ASN1_STRING* common = X509_NAME_ENTRY_get_data(entry);
int size = ASN1_STRING_to_UTF8(&utf8, common);
if (host.empty()) {
host = (server() ? getPeerHost() : getHost());
}
decision = access_->verify(host, (char*)utf8, size);
OPENSSL_free(utf8);
}
}
X509_free(cert);
if (decision != AccessManager::ALLOW) {
throw TSSLException("authorize: cannot authorize peer");
}
}
unsigned int TSSLSocket::waitForEvent(bool wantRead) {
int fdSocket;
BIO* bio;
if (wantRead) {
bio = SSL_get_rbio(ssl_);
} else {
bio = SSL_get_wbio(ssl_);
}
if (bio == NULL) {
throw TSSLException("SSL_get_?bio returned NULL");
}
if (BIO_get_fd(bio, &fdSocket) <= 0) {
throw TSSLException("BIO_get_fd failed");
}
struct THRIFT_POLLFD fds[2];
std::memset(fds, 0, sizeof(fds));
fds[0].fd = fdSocket;
fds[0].events = wantRead ? THRIFT_POLLIN : THRIFT_POLLOUT;
if (interruptListener_) {
fds[1].fd = *(interruptListener_.get());
fds[1].events = THRIFT_POLLIN;
}
int ret = THRIFT_POLL(fds, interruptListener_ ? 2 : 1, -1);
if (ret < 0) {
// error cases
if (THRIFT_GET_SOCKET_ERROR == THRIFT_EINTR) {
return TSSL_EINTR; // repeat operation
}
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSSLSocket::read THRIFT_POLL() ", errno_copy);
throw TTransportException(TTransportException::UNKNOWN, "Unknown", errno_copy);
} else if (ret > 0){
if (fds[1].revents & THRIFT_POLLIN) {
throw TTransportException(TTransportException::INTERRUPTED, "Interrupted");
}
return TSSL_DATA;
} else {
throw TTransportException(TTransportException::TIMED_OUT, "THRIFT_POLL (timed out)");
}
}
// TSSLSocketFactory implementation
uint64_t TSSLSocketFactory::count_ = 0;
Mutex TSSLSocketFactory::mutex_;
bool TSSLSocketFactory::manualOpenSSLInitialization_ = false;
TSSLSocketFactory::TSSLSocketFactory(SSLProtocol protocol) : server_(false) {
Guard guard(mutex_);
if (count_ == 0) {
if (!manualOpenSSLInitialization_) {
initializeOpenSSL();
}
randomize();
}
count_++;
ctx_ = boost::shared_ptr<SSLContext>(new SSLContext(protocol));
}
TSSLSocketFactory::~TSSLSocketFactory() {
Guard guard(mutex_);
ctx_.reset();
count_--;
if (count_ == 0 && !manualOpenSSLInitialization_) {
cleanupOpenSSL();
}
}
boost::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket() {
boost::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_));
setup(ssl);
return ssl;
}
boost::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(boost::shared_ptr<THRIFT_SOCKET> interruptListener) {
boost::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, interruptListener));
setup(ssl);
return ssl;
}
boost::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(THRIFT_SOCKET socket) {
boost::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, socket));
setup(ssl);
return ssl;
}
boost::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(THRIFT_SOCKET socket, boost::shared_ptr<THRIFT_SOCKET> interruptListener) {
boost::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, socket, interruptListener));
setup(ssl);
return ssl;
}
boost::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(const string& host, int port) {
boost::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, host, port));
setup(ssl);
return ssl;
}
boost::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(const string& host, int port, boost::shared_ptr<THRIFT_SOCKET> interruptListener) {
boost::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, host, port, interruptListener));
setup(ssl);
return ssl;
}
void TSSLSocketFactory::setup(boost::shared_ptr<TSSLSocket> ssl) {
ssl->server(server());
if (access_ == NULL && !server()) {
access_ = boost::shared_ptr<AccessManager>(new DefaultClientAccessManager);
}
if (access_ != NULL) {
ssl->access(access_);
}
}
void TSSLSocketFactory::ciphers(const string& enable) {
int rc = SSL_CTX_set_cipher_list(ctx_->get(), enable.c_str());
if (ERR_peek_error() != 0) {
string errors;
buildErrors(errors);
throw TSSLException("SSL_CTX_set_cipher_list: " + errors);
}
if (rc == 0) {
throw TSSLException("None of specified ciphers are supported");
}
}
void TSSLSocketFactory::authenticate(bool required) {
int mode;
if (required) {
mode = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT | SSL_VERIFY_CLIENT_ONCE;
} else {
mode = SSL_VERIFY_NONE;
}
SSL_CTX_set_verify(ctx_->get(), mode, NULL);
}
void TSSLSocketFactory::loadCertificate(const char* path, const char* format) {
if (path == NULL || format == NULL) {
throw TTransportException(TTransportException::BAD_ARGS,
"loadCertificateChain: either <path> or <format> is NULL");
}
if (strcmp(format, "PEM") == 0) {
if (SSL_CTX_use_certificate_chain_file(ctx_->get(), path) == 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_CTX_use_certificate_chain_file: " + errors);
}
} else {
throw TSSLException("Unsupported certificate format: " + string(format));
}
}
void TSSLSocketFactory::loadPrivateKey(const char* path, const char* format) {
if (path == NULL || format == NULL) {
throw TTransportException(TTransportException::BAD_ARGS,
"loadPrivateKey: either <path> or <format> is NULL");
}
if (strcmp(format, "PEM") == 0) {
if (SSL_CTX_use_PrivateKey_file(ctx_->get(), path, SSL_FILETYPE_PEM) == 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_CTX_use_PrivateKey_file: " + errors);
}
}
}
void TSSLSocketFactory::loadTrustedCertificates(const char* path) {
if (path == NULL) {
throw TTransportException(TTransportException::BAD_ARGS,
"loadTrustedCertificates: <path> is NULL");
}
if (SSL_CTX_load_verify_locations(ctx_->get(), path, NULL) == 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_CTX_load_verify_locations: " + errors);
}
}
void TSSLSocketFactory::randomize() {
RAND_poll();
}
void TSSLSocketFactory::overrideDefaultPasswordCallback() {
SSL_CTX_set_default_passwd_cb(ctx_->get(), passwordCallback);
SSL_CTX_set_default_passwd_cb_userdata(ctx_->get(), this);
}
int TSSLSocketFactory::passwordCallback(char* password, int size, int, void* data) {
TSSLSocketFactory* factory = (TSSLSocketFactory*)data;
string userPassword;
factory->getPassword(userPassword, size);
int length = static_cast<int>(userPassword.size());
if (length > size) {
length = size;
}
strncpy(password, userPassword.c_str(), length);
userPassword.assign(userPassword.size(), '*');
return length;
}
// extract error messages from error queue
void buildErrors(string& errors, int errno_copy) {
unsigned long errorCode;
char message[256];
errors.reserve(512);
while ((errorCode = ERR_get_error()) != 0) {
if (!errors.empty()) {
errors += "; ";
}
const char* reason = ERR_reason_error_string(errorCode);
if (reason == NULL) {
THRIFT_SNPRINTF(message, sizeof(message) - 1, "SSL error # %lu", errorCode);
reason = message;
}
errors += reason;
}
if (errors.empty()) {
if (errno_copy != 0) {
errors += TOutput::strerror_s(errno_copy);
}
}
if (errors.empty()) {
errors = "error code: " + boost::lexical_cast<string>(errno_copy);
}
}
/**
* Default implementation of AccessManager
*/
Decision DefaultClientAccessManager::verify(const sockaddr_storage& sa) throw() {
(void)sa;
return SKIP;
}
Decision DefaultClientAccessManager::verify(const string& host,
const char* name,
int size) throw() {
if (host.empty() || name == NULL || size <= 0) {
return SKIP;
}
return (matchName(host.c_str(), name, size) ? ALLOW : SKIP);
}
Decision DefaultClientAccessManager::verify(const sockaddr_storage& sa,
const char* data,
int size) throw() {
bool match = false;
if (sa.ss_family == AF_INET && size == sizeof(in_addr)) {
match = (memcmp(&((sockaddr_in*)&sa)->sin_addr, data, size) == 0);
} else if (sa.ss_family == AF_INET6 && size == sizeof(in6_addr)) {
match = (memcmp(&((sockaddr_in6*)&sa)->sin6_addr, data, size) == 0);
}
return (match ? ALLOW : SKIP);
}
/**
* Match a name with a pattern. The pattern may include wildcard. A single
* wildcard "*" can match up to one component in the domain name.
*
* @param host Host name, typically the name of the remote host
* @param pattern Name retrieved from certificate
* @param size Size of "pattern"
* @return True, if "host" matches "pattern". False otherwise.
*/
bool matchName(const char* host, const char* pattern, int size) {
bool match = false;
int i = 0, j = 0;
while (i < size && host[j] != '\0') {
if (uppercase(pattern[i]) == uppercase(host[j])) {
i++;
j++;
continue;
}
if (pattern[i] == '*') {
while (host[j] != '.' && host[j] != '\0') {
j++;
}
i++;
continue;
}
break;
}
if (i == size && host[j] == '\0') {
match = true;
}
return match;
}
// This is to work around the Turkish locale issue, i.e.,
// toupper('i') != toupper('I') if locale is "tr_TR"
char uppercase(char c) {
if ('a' <= c && c <= 'z') {
return c + ('A' - 'a');
}
return c;
}
}
}
}
| 1 | 12,238 | Typically we would add something to the build system environment to differentiate this; also is there an alternative that can be used with older OpenSSL? Other folks who are committers will need to decide if it is worth supporting an older and likely quite vulnerable (to hacks) OpenSSL library. | apache-thrift | c |
@@ -367,7 +367,7 @@ var (
},
}
- readBlockProducersByHeightTests = []struct {
+ readActiveBlockProducersByHeightTests = []struct {
// Arguments
protocolID string
protocolType string | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"context"
"encoding/hex"
"math/big"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/iotexproject/iotex-election/test/mock/mock_committee"
"github.com/iotexproject/iotex-election/types"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/poll/pollpb"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/test/mock/mock_dispatcher"
ta "github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
)
const (
testTriePath = "trie.test"
testDBPath = "db.test"
)
var (
testTransfer, _ = testutil.SignedTransfer(ta.Addrinfo["alfa"].String(),
ta.Keyinfo["alfa"].PriKey, 3, big.NewInt(10), []byte{}, testutil.TestGasLimit,
big.NewInt(testutil.TestGasPrice))
testTransferPb = testTransfer.Proto()
testExecution, _ = testutil.SignedExecution(ta.Addrinfo["bravo"].String(),
ta.Keyinfo["bravo"].PriKey, 1, big.NewInt(0), testutil.TestGasLimit,
big.NewInt(testutil.TestGasPrice), []byte{})
testExecutionPb = testExecution.Proto()
testTransfer1, _ = testutil.SignedTransfer(ta.Addrinfo["charlie"].String(), ta.Keyinfo["producer"].PriKey, 1,
big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
transferHash1 = testTransfer1.Hash()
testVote1, _ = testutil.SignedVote(ta.Addrinfo["charlie"].String(), ta.Keyinfo["charlie"].PriKey, 5,
testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
voteHash1 = testVote1.Hash()
testExecution1, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["producer"].PriKey, 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
executionHash1 = testExecution1.Hash()
testExecution2, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["charlie"].PriKey, 6,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
executionHash2 = testExecution2.Hash()
testExecution3, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["alfa"].PriKey, 2,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
executionHash3 = testExecution3.Hash()
)
var (
delegates = []genesis.Delegate{
{
OperatorAddrStr: identityset.Address(0).String(),
VotesStr: "10",
},
{
OperatorAddrStr: identityset.Address(1).String(),
VotesStr: "10",
},
{
OperatorAddrStr: identityset.Address(2).String(),
VotesStr: "10",
},
}
)
var (
getAccountTests = []struct {
in string
address string
balance string
nonce uint64
pendingNonce uint64
}{
{ta.Addrinfo["charlie"].String(),
"io1d4c5lp4ea4754wy439g2t99ue7wryu5r2lslh2",
"3",
8,
9,
},
{
ta.Addrinfo["producer"].String(),
"io1mflp9m6hcgm2qcghchsdqj3z3eccrnekx9p0ms",
"9999999999999999999999999991",
1,
6,
},
}
getActionsTests = []struct {
start uint64
count uint64
numActions int
}{
{
1,
11,
11,
},
{
11,
5,
4,
},
}
getActionTests = []struct {
checkPending bool
in string
nonce uint64
senderPubKey string
}{
{
false,
hex.EncodeToString(transferHash1[:]),
1,
testTransfer1.SrcPubkey().HexString(),
},
{
false,
hex.EncodeToString(voteHash1[:]),
5,
testVote1.SrcPubkey().HexString(),
},
{
true,
hex.EncodeToString(executionHash1[:]),
5,
testExecution1.SrcPubkey().HexString(),
},
}
getActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
ta.Addrinfo["producer"].String(),
0,
3,
2,
},
{
ta.Addrinfo["charlie"].String(),
1,
8,
8,
},
}
getUnconfirmedActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
ta.Addrinfo["producer"].String(),
0,
4,
4,
},
}
getActionsByBlockTests = []struct {
blkHeight uint64
start uint64
count uint64
numActions int
}{
{
2,
0,
7,
7,
},
{
4,
0,
5,
5,
},
}
getBlockMetasTests = []struct {
start uint64
count uint64
numBlks int
}{
{
0,
4,
4,
},
{
1,
5,
3,
},
}
getBlockMetaTests = []struct {
blkHeight uint64
numActions int64
transferAmount string
}{
{
2,
7,
"4",
},
{
4,
5,
"0",
},
}
getChainMetaTests = []struct {
height uint64
numActions int64
tps int64
epoch iotextypes.EpochData
}{
{
4,
15,
15,
iotextypes.EpochData{
Num: 1,
Height: 1,
},
},
}
sendActionTests = []struct {
actionPb *iotextypes.Action
}{
{
testTransferPb,
},
{
testExecutionPb,
},
}
getReceiptByActionTests = []struct {
in string
status uint64
}{
{
hex.EncodeToString(executionHash2[:]),
1,
},
{
hex.EncodeToString(executionHash3[:]),
1,
},
}
readContractTests = []struct {
execHash string
retValue string
}{
{
hex.EncodeToString(executionHash2[:]),
"",
},
}
suggestGasPriceTests = []struct {
defaultGasPrice uint64
suggestedGasPrice uint64
}{
{
1,
1,
},
}
estimateGasForActionTests = []struct {
actionHash string
estimatedGas uint64
}{
{
hex.EncodeToString(transferHash1[:]),
10000,
},
{
hex.EncodeToString(voteHash1[:]),
10000,
},
}
readUnclaimedBalanceTests = []struct {
// Arguments
protocolID string
methodName string
addr string
// Expected values
returnErr bool
balance *big.Int
}{
{
protocolID: rewarding.ProtocolID,
methodName: "UnclaimedBalance",
addr: identityset.Address(0).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(144), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: rewarding.ProtocolID,
methodName: "UnclaimedBalance",
addr: identityset.Address(1).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(0), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: "Wrong ID",
methodName: "UnclaimedBalance",
addr: ta.Addrinfo["producer"].String(),
returnErr: true,
},
{
protocolID: rewarding.ProtocolID,
methodName: "Wrong Method",
addr: ta.Addrinfo["producer"].String(),
returnErr: true,
},
}
readBlockProducersByHeightTests = []struct {
// Arguments
protocolID string
protocolType string
methodName string
height uint64
numCandidateDelegates uint64
// Expected Values
numBlockProducers int
}{
{
protocolID: "poll",
protocolType: "lifeLongDelegates",
methodName: "BlockProducersByHeight",
height: 1,
numBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "lifeLongDelegates",
methodName: "BlockProducersByHeight",
height: 4,
numBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "BlockProducersByHeight",
height: 1,
numCandidateDelegates: 2,
numBlockProducers: 2,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "BlockProducersByHeight",
height: 4,
numCandidateDelegates: 1,
numBlockProducers: 1,
},
}
readActiveProducersByHeightTests = []struct {
// Arguments
protocolID string
protocolType string
methodName string
height uint64
numDelegates uint64
// Expected Values
numActiveBlockProducers int
}{
{
protocolID: "poll",
protocolType: "lifeLongDelegates",
methodName: "ActiveBlockProducersByHeight",
height: 1,
numActiveBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "lifeLongDelegates",
methodName: "ActiveBlockProducersByHeight",
height: 4,
numActiveBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "ActiveBlockProducersByHeight",
height: 1,
numDelegates: 2,
numActiveBlockProducers: 2,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "ActiveBlockProducersByHeight",
height: 4,
numDelegates: 1,
numActiveBlockProducers: 1,
},
}
)
func TestServer_GetAccount(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, true)
require.NoError(err)
// success
for _, test := range getAccountTests {
request := &iotexapi.GetAccountRequest{Address: test.in}
res, err := svr.GetAccount(context.Background(), request)
require.NoError(err)
accountMeta := res.AccountMeta
require.Equal(test.address, accountMeta.Address)
require.Equal(test.balance, accountMeta.Balance)
require.Equal(test.nonce, accountMeta.Nonce)
require.Equal(test.pendingNonce, accountMeta.PendingNonce)
}
// failure
_, err = svr.GetAccount(context.Background(), &iotexapi.GetAccountRequest{})
require.Error(err)
}
func TestServer_GetActions(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByIndex{
ByIndex: &iotexapi.GetActionsByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, true)
require.NoError(err)
for _, test := range getActionTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByHash{
ByHash: &iotexapi.GetActionByHashRequest{
ActionHash: test.in,
CheckPending: test.checkPending,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.Actions))
actPb := res.Actions[0]
require.Equal(test.nonce, actPb.GetCore().GetNonce())
require.Equal(test.senderPubKey, hex.EncodeToString(actPb.SenderPubKey))
}
}
func TestServer_GetActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByAddr{
ByAddr: &iotexapi.GetActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetUnconfirmedActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, true)
require.NoError(err)
for _, test := range getUnconfirmedActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_UnconfirmedByAddr{
UnconfirmedByAddr: &iotexapi.GetUnconfirmedActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetActionsByBlock(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsByBlockTests {
blk, err := svr.bc.GetBlockByHeight(test.blkHeight)
require.NoError(err)
blkHash := blk.HashBlock()
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByBlk{
ByBlk: &iotexapi.GetActionsByBlockRequest{
BlkHash: hex.EncodeToString(blkHash[:]),
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetBlockMetas(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getBlockMetasTests {
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByIndex{
ByIndex: &iotexapi.GetBlockMetasByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
require.NoError(err)
require.Equal(test.numBlks, len(res.BlkMetas))
var prevBlkPb *iotextypes.BlockMeta
for _, blkPb := range res.BlkMetas {
if prevBlkPb != nil {
require.True(blkPb.Height > prevBlkPb.Height)
}
prevBlkPb = blkPb
}
}
}
func TestServer_GetBlockMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getBlockMetaTests {
blk, err := svr.bc.GetBlockByHeight(test.blkHeight)
require.NoError(err)
blkHash := blk.HashBlock()
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByHash{
ByHash: &iotexapi.GetBlockMetaByHashRequest{
BlkHash: hex.EncodeToString(blkHash[:]),
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.BlkMetas))
blkPb := res.BlkMetas[0]
require.Equal(test.numActions, blkPb.NumActions)
require.Equal(test.transferAmount, blkPb.TransferAmount)
}
}
func TestServer_GetChainMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getChainMetaTests {
res, err := svr.GetChainMeta(context.Background(), &iotexapi.GetChainMetaRequest{})
require.NoError(err)
chainMetaPb := res.ChainMeta
require.Equal(test.height, chainMetaPb.Height)
require.Equal(test.numActions, chainMetaPb.NumActions)
require.Equal(test.tps, chainMetaPb.Tps)
require.Equal(test.epoch.Num, chainMetaPb.Epoch.Num)
require.Equal(test.epoch.Height, chainMetaPb.Epoch.Height)
}
}
func TestServer_SendAction(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
chain := mock_blockchain.NewMockBlockchain(ctrl)
mDp := mock_dispatcher.NewMockDispatcher(ctrl)
broadcastHandlerCount := 0
svr := Server{bc: chain, dp: mDp, broadcastHandler: func(_ context.Context, _ uint32, _ proto.Message) error {
broadcastHandlerCount++
return nil
}}
chain.EXPECT().ChainID().Return(uint32(1)).Times(4)
mDp.EXPECT().HandleBroadcast(gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
for i, test := range sendActionTests {
request := &iotexapi.SendActionRequest{Action: test.actionPb}
_, err := svr.SendAction(context.Background(), request)
require.NoError(err)
require.Equal(i+1, broadcastHandlerCount)
}
}
func TestServer_GetReceiptByAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getReceiptByActionTests {
request := &iotexapi.GetReceiptByActionRequest{ActionHash: test.in}
res, err := svr.GetReceiptByAction(context.Background(), request)
require.NoError(err)
receiptPb := res.Receipt
require.Equal(test.status, receiptPb.Status)
}
}
func TestServer_ReadContract(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range readContractTests {
hash, err := toHash256(test.execHash)
require.NoError(err)
exec, err := svr.bc.GetActionByActionHash(hash)
require.NoError(err)
request := &iotexapi.ReadContractRequest{Action: exec.Proto()}
res, err := svr.ReadContract(context.Background(), request)
require.NoError(err)
require.Equal(test.retValue, res.Data)
}
}
func TestServer_SuggestGasPrice(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
for _, test := range suggestGasPriceTests {
cfg.API.GasStation.DefaultGas = test.defaultGasPrice
svr, err := createServer(cfg, false)
require.NoError(err)
res, err := svr.SuggestGasPrice(context.Background(), &iotexapi.SuggestGasPriceRequest{})
require.NoError(err)
require.Equal(test.suggestedGasPrice, res.GasPrice)
}
}
func TestServer_EstimateGasForAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range estimateGasForActionTests {
hash, err := toHash256(test.actionHash)
require.NoError(err)
act, err := svr.bc.GetActionByActionHash(hash)
require.NoError(err)
request := &iotexapi.EstimateGasForActionRequest{Action: act.Proto()}
res, err := svr.EstimateGasForAction(context.Background(), request)
require.NoError(err)
require.Equal(test.estimatedGas, res.Gas)
}
}
func TestServer_ReadUnclaimedBalance(t *testing.T) {
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(t, err)
for _, test := range readUnclaimedBalanceTests {
out, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(test.addr)},
})
if test.returnErr {
require.Error(t, err)
continue
}
require.NoError(t, err)
val, ok := big.NewInt(0).SetString(string(out.Data), 10)
require.True(t, ok)
assert.Equal(t, test.balance, val)
}
}
func TestServer_ReadBlockProducersByHeight(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
committee := mock_committee.NewMockCommittee(ctrl)
r := types.NewElectionResultForTest(time.Now())
committee.EXPECT().ResultByHeight(gomock.Any()).Return(r, nil).Times(2)
committee.EXPECT().HeightByTime(gomock.Any()).Return(uint64(123456), nil).AnyTimes()
for _, test := range readBlockProducersByHeightTests {
var pol poll.Protocol
if test.protocolType == "lifeLongDelegates" {
cfg.Genesis.Delegates = delegates
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else {
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
func(uint64) uint64 { return 1 },
func(uint64) uint64 { return 1 },
test.numCandidateDelegates,
cfg.Genesis.NumDelegates,
)
}
svr, err := createServer(cfg, false)
require.NoError(err)
require.NoError(svr.registry.Register(poll.ProtocolID, pol))
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{byteutil.Uint64ToBytes(test.height)},
})
require.NoError(err)
var blockProducers pollpb.BlockProducerList
require.NoError(proto.Unmarshal(res.Data, &blockProducers))
require.Equal(test.numBlockProducers, len(blockProducers.BlockProducers))
}
}
func TestServer_ReadActiveBlockProducersByHeight(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
committee := mock_committee.NewMockCommittee(ctrl)
r := types.NewElectionResultForTest(time.Now())
committee.EXPECT().ResultByHeight(gomock.Any()).Return(r, nil).Times(2)
committee.EXPECT().HeightByTime(gomock.Any()).Return(uint64(123456), nil).AnyTimes()
for _, test := range readActiveProducersByHeightTests {
var pol poll.Protocol
if test.protocolType == "lifeLongDelegates" {
cfg.Genesis.Delegates = delegates
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else {
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
func(uint64) uint64 { return 1 },
func(uint64) uint64 { return 1 },
cfg.Genesis.NumCandidateDelegates,
test.numDelegates,
)
}
svr, err := createServer(cfg, false)
require.NoError(err)
require.NoError(svr.registry.Register(poll.ProtocolID, pol))
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{byteutil.Uint64ToBytes(test.height)},
})
require.NoError(err)
var activeBlockProducers pollpb.BlockProducerList
require.NoError(proto.Unmarshal(res.Data, &activeBlockProducers))
require.Equal(test.numActiveBlockProducers, len(activeBlockProducers.BlockProducers))
}
}
func addProducerToFactory(sf factory.Factory) error {
ws, err := sf.NewWorkingSet()
if err != nil {
return err
}
if _, err = accountutil.LoadOrCreateAccount(
ws,
ta.Addrinfo["producer"].String(),
unit.ConvertIotxToRau(10000000000),
); err != nil {
return err
}
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: ta.Addrinfo["producer"],
GasLimit: &gasLimit,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return err
}
return sf.Commit(ws)
}
func addTestingBlocks(bc blockchain.Blockchain) error {
addr0 := ta.Addrinfo["producer"].String()
priKey0 := ta.Keyinfo["producer"].PriKey
addr1 := ta.Addrinfo["alfa"].String()
priKey1 := ta.Keyinfo["alfa"].PriKey
addr2 := ta.Addrinfo["bravo"].String()
addr3 := ta.Addrinfo["charlie"].String()
priKey3 := ta.Keyinfo["charlie"].PriKey
addr4 := ta.Addrinfo["delta"].String()
// Add block 1
// Producer transfer--> C
tsf, err := testutil.SignedTransfer(addr3, priKey0, 1, big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[addr0] = []action.SealedEnvelope{tsf}
blk, err := bc.MintNewBlock(
actionMap,
time.Now().Unix(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 2
// Charlie transfer--> A, B, D, P
// Charlie vote--> C
// Charlie exec--> D
recipients := []string{addr1, addr2, addr4, addr0}
selps := make([]action.SealedEnvelope, 0)
for i, recipient := range recipients {
selp, err := testutil.SignedTransfer(recipient, priKey3, uint64(i+1), big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
selps = append(selps, selp)
}
vote1, err := testutil.SignedVote(addr3, priKey3, 5, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
execution1, err := testutil.SignedExecution(addr4, priKey3, 6,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
if err != nil {
return err
}
selps = append(selps, vote1)
selps = append(selps, execution1)
actionMap = make(map[string][]action.SealedEnvelope)
actionMap[addr3] = selps
if blk, err = bc.MintNewBlock(
actionMap,
time.Now().Unix(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 3
// Empty actions
if blk, err = bc.MintNewBlock(
nil,
time.Now().Unix(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 4
// Charlie vote--> C
// Charlie exec--> D
// Alfa vote--> A
// Alfa exec--> D
vote1, err = testutil.SignedVote(addr3, priKey3, 7, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
vote2, err := testutil.SignedVote(addr1, priKey1, 1, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
execution1, err = testutil.SignedExecution(addr4, priKey3, 8,
big.NewInt(2), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
if err != nil {
return err
}
execution2, err := testutil.SignedExecution(addr4, priKey1, 2,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
if err != nil {
return err
}
actionMap = make(map[string][]action.SealedEnvelope)
actionMap[addr3] = []action.SealedEnvelope{vote1, execution1}
actionMap[addr1] = []action.SealedEnvelope{vote2, execution2}
if blk, err = bc.MintNewBlock(
actionMap,
time.Now().Unix(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
return bc.CommitBlock(blk)
}
func addActsToActPool(ap actpool.ActPool) error {
// Producer transfer--> A
tsf1, err := testutil.SignedTransfer(ta.Addrinfo["alfa"].String(), ta.Keyinfo["producer"].PriKey, 2, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
// Producer vote--> P
vote1, err := testutil.SignedVote(ta.Addrinfo["producer"].String(), ta.Keyinfo["producer"].PriKey, 3, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
// Producer transfer--> B
tsf2, err := testutil.SignedTransfer(ta.Addrinfo["bravo"].String(), ta.Keyinfo["producer"].PriKey, 4, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
// Producer exec--> D
execution1, err := testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["producer"].PriKey, 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
if err != nil {
return err
}
if err := ap.Add(tsf1); err != nil {
return err
}
if err := ap.Add(vote1); err != nil {
return err
}
if err := ap.Add(tsf2); err != nil {
return err
}
return ap.Add(execution1)
}
func setupChain(cfg config.Config) (blockchain.Blockchain, *protocol.Registry, error) {
cfg.Chain.ProducerPrivKey = hex.EncodeToString(identityset.PrivateKey(0).Bytes())
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
if err != nil {
return nil, nil, err
}
// create chain
registry := protocol.Registry{}
bc := blockchain.NewBlockchain(
cfg,
blockchain.PrecreatedStateFactoryOption(sf),
blockchain.InMemDaoOption(),
blockchain.RegistryOption(®istry),
)
if bc == nil {
return nil, nil, errors.New("failed to create blockchain")
}
acc := account.NewProtocol()
v := vote.NewProtocol(bc)
evm := execution.NewProtocol(bc)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
)
r := rewarding.NewProtocol(bc, rolldposProtocol)
if err := registry.Register(rolldpos.ProtocolID, rolldposProtocol); err != nil {
return nil, nil, err
}
if err := registry.Register(account.ProtocolID, acc); err != nil {
return nil, nil, err
}
if err := registry.Register(vote.ProtocolID, v); err != nil {
return nil, nil, err
}
if err := registry.Register(execution.ProtocolID, evm); err != nil {
return nil, nil, err
}
if err := registry.Register(rewarding.ProtocolID, r); err != nil {
return nil, nil, err
}
sf.AddActionHandlers(acc, v, evm, r)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
bc.Validator().AddActionValidators(acc, v, evm, r)
return bc, ®istry, nil
}
func setupActPool(bc blockchain.Blockchain, cfg config.ActPool) (actpool.ActPool, error) {
ap, err := actpool.NewActPool(bc, cfg)
if err != nil {
return nil, err
}
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
ap.AddActionValidators(vote.NewProtocol(bc), execution.NewProtocol(bc))
return ap, nil
}
func newConfig() config.Config {
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.EnableIndex = true
return cfg
}
func createServer(cfg config.Config, needActPool bool) (*Server, error) {
bc, registry, err := setupChain(cfg)
if err != nil {
return nil, err
}
ctx := context.Background()
// Start blockchain
if err := bc.Start(ctx); err != nil {
return nil, err
}
// Create state for producer
if err := addProducerToFactory(bc.GetFactory()); err != nil {
return nil, err
}
// Add testing blocks
if err := addTestingBlocks(bc); err != nil {
return nil, err
}
var ap actpool.ActPool
if needActPool {
ap, err = setupActPool(bc, cfg.ActPool)
if err != nil {
return nil, err
}
// Add actions to actpool
if err := addActsToActPool(ap); err != nil {
return nil, err
}
}
apiCfg := config.API{TpsWindow: 10, MaxTransferPayloadBytes: 1024, GasStation: cfg.API.GasStation}
svr := &Server{
bc: bc,
ap: ap,
cfg: apiCfg,
gs: gasstation.NewGasStation(bc, apiCfg),
registry: registry,
}
return svr, nil
}
| 1 | 15,952 | `readActiveBlockProducersByHeightTests` is a global variable (from `gochecknoglobals`) | iotexproject-iotex-core | go |
@@ -447,10 +447,10 @@ public class SyncManager {
int totalSize = target.getTotalSize();
sync.setTotalSize(totalSize);
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
-
+ final String idField = sync.getTarget().getIdFieldName();
while (records != null) {
// Save to smartstore
- saveRecordsToSmartStore(soupName, records, mergeMode);
+ saveRecordsToSmartStore(soupName, records, mergeMode, idField);
countSaved += records.length();
maxTimeStamp = Math.max(maxTimeStamp, target.getLatestModificationTimeStamp(records));
| 1 | /*
* Copyright (c) 2014-2105, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartsync.manager;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import android.util.Log;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.rest.ApiVersionStrings;
import com.salesforce.androidsdk.rest.RestClient;
import com.salesforce.androidsdk.rest.RestRequest;
import com.salesforce.androidsdk.rest.RestResponse;
import com.salesforce.androidsdk.smartstore.app.SalesforceSDKManagerWithSmartStore;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartsync.app.SmartSyncSDKManager;
import com.salesforce.androidsdk.smartsync.util.Constants;
import com.salesforce.androidsdk.smartsync.util.SyncDownTarget;
import com.salesforce.androidsdk.smartsync.util.SyncOptions;
import com.salesforce.androidsdk.smartsync.util.SyncState;
import com.salesforce.androidsdk.smartsync.util.SyncState.MergeMode;
import com.salesforce.androidsdk.smartsync.util.SyncUpTarget;
import com.salesforce.androidsdk.util.JSONObjectHelper;
/**
* Sync Manager
*/
public class SyncManager {
// Constants
public static final int PAGE_SIZE = 2000;
private static final int UNCHANGED = -1;
// For user agent
private static final String SMART_SYNC = "SmartSync";
// Local fields
public static final String LOCALLY_CREATED = "__locally_created__";
public static final String LOCALLY_UPDATED = "__locally_updated__";
public static final String LOCALLY_DELETED = "__locally_deleted__";
public static final String LOCAL = "__local__";
// Static member
private static Map<String, SyncManager> INSTANCES = new HashMap<String, SyncManager>();
// Members
private Set<Long> runningSyncIds = new HashSet<Long>();
public final String apiVersion;
private final ExecutorService threadPool = Executors.newFixedThreadPool(1);
private SmartStore smartStore;
private RestClient restClient;
/**
* Private constructor
* @param smartStore
*/
private SyncManager(SmartStore smartStore, RestClient restClient) {
apiVersion = ApiVersionStrings.VERSION_NUMBER;
this.smartStore = smartStore;
this.restClient = restClient;
SyncState.setupSyncsSoupIfNeeded(smartStore);
}
/**
* Returns the instance of this class associated with current user.
*
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance() {
return getInstance(null, null);
}
/**
* Returns the instance of this class associated with this user account.
*
* @param account User account.
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account) {
return getInstance(account, null);
}
/**
* Returns the instance of this class associated with this user and community.
* Sync manager returned is ready to use.
*
* @param account User account.
* @param communityId Community ID.
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account, String communityId) {
return getInstance(account, communityId, null);
}
/**
* Returns the instance of this class associated with this user, community and smartstore.
*
* @param account User account. Pass null to user current user.
* @param communityId Community ID. Pass null if not applicable
* @param smartStore SmartStore instance. Pass null to use current user default smartstore.
*
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account, String communityId, SmartStore smartStore) {
if (account == null) {
account = SalesforceSDKManagerWithSmartStore.getInstance().getUserAccountManager().getCurrentUser();
}
if (smartStore == null) {
smartStore = SmartSyncSDKManager.getInstance().getSmartStore(account, communityId);
}
String uniqueId = (account != null ? account.getUserId() : "") + ":"
+ smartStore.getDatabase().getPath();
SyncManager instance = INSTANCES.get(uniqueId);
if (instance == null) {
RestClient restClient = null;
/*
* If account is still null, there is no user logged in, which means, the default
* RestClient should be set to the unauthenticated RestClient instance.
*/
if (account == null) {
restClient = SalesforceSDKManager.getInstance().getClientManager().peekUnauthenticatedRestClient();
} else {
restClient = SalesforceSDKManager.getInstance().getClientManager().peekRestClient(account);
}
instance = new SyncManager(smartStore, restClient);
INSTANCES.put(uniqueId, instance);
}
return instance;
}
/**
* Resets all the sync managers
*/
public static synchronized void reset() {
INSTANCES.clear();
}
/**
* Get details of a sync state
* @param syncId
* @return
* @throws JSONException
*/
public SyncState getSyncStatus(long syncId) throws JSONException {
return SyncState.byId(smartStore, syncId);
}
/**
* Create and run a sync down that will overwrite any modified records
* @param target
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncDown(SyncDownTarget target, String soupName, SyncUpdateCallback callback) throws JSONException {
SyncOptions options = SyncOptions.optionsForSyncDown(MergeMode.OVERWRITE);
return syncDown(target, options, soupName, callback);
}
/**
* Create and run a sync down
* @param target
* @param options
*@param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncDown(SyncDownTarget target, SyncOptions options, String soupName, SyncUpdateCallback callback) throws JSONException {
SyncState sync = SyncState.createSyncDown(smartStore, target, options, soupName);
runSync(sync, callback);
return sync;
}
/**
* Re-run sync but only fetch new/modified records
* @param syncId
* @param callback
* @throws JSONException
*/
public SyncState reSync(long syncId, SyncUpdateCallback callback) throws JSONException {
if (runningSyncIds.contains(syncId)) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": still running");
}
SyncState sync = SyncState.byId(smartStore, syncId);
if (sync == null) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": no sync found");
}
if (sync.getType() != SyncState.Type.syncDown) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": wrong type:" + sync.getType());
}
sync.setTotalSize(-1);
runSync(sync, callback);
return sync;
}
/**
* Run a sync
* @param sync
* @param callback
*/
public void runSync(final SyncState sync, final SyncUpdateCallback callback) {
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
threadPool.execute(new Runnable() {
@Override
public void run() {
try {
switch (sync.getType()) {
case syncDown:
syncDown(sync, callback);
break;
case syncUp:
syncUp(sync, callback);
break;
}
updateSync(sync, SyncState.Status.DONE, 100, callback);
} catch (Exception e) {
Log.e("SmartSyncManager:runSync", "Error during sync: " + sync.getId(), e);
// Update status to failed
updateSync(sync, SyncState.Status.FAILED, UNCHANGED, callback);
}
}
});
}
/**
* Create and run a sync up
* @param target
* @param options
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncUp(SyncUpTarget target, SyncOptions options, String soupName, SyncUpdateCallback callback) throws JSONException {
SyncState sync = SyncState.createSyncUp(smartStore, target, options, soupName);
runSync(sync, callback);
return sync;
}
/**
* Update sync with new status, progress, totalSize
* @param sync
* @param status
* @param progress pass -1 to keep the current value
* @param callback
*/
private void updateSync(SyncState sync, SyncState.Status status, int progress, SyncUpdateCallback callback) {
try {
sync.setStatus(status);
if (progress != UNCHANGED) sync.setProgress(progress);
sync.save(smartStore);
switch (status) {
case NEW:
break;
case RUNNING:
runningSyncIds.add(sync.getId());
break;
case DONE:
case FAILED:
runningSyncIds.remove(sync.getId());
break;
}
callback.onUpdate(sync);
}
catch (JSONException e) {
Log.e("SmartSyncManager:updateSync", "Unexpected json error for sync: " + sync.getId(), e);
}
}
private void syncUp(SyncState sync, SyncUpdateCallback callback) throws Exception {
final String soupName = sync.getSoupName();
final SyncUpTarget target = (SyncUpTarget) sync.getTarget();
final SyncOptions options = sync.getOptions();
final List<String> fieldlist = options.getFieldlist();
final MergeMode mergeMode = options.getMergeMode();
final Set<String> dirtyRecordIds = target.getIdsOfRecordsToSyncUp(this, soupName);
int totalSize = dirtyRecordIds.size();
sync.setTotalSize(totalSize);
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
int i = 0;
for (final String id : dirtyRecordIds) {
JSONObject record = smartStore.retrieve(soupName, Long.valueOf(id)).getJSONObject(0);
syncUpOneRecord(target, soupName, fieldlist, record, mergeMode);
// Updating status
int progress = (i + 1) * 100 / totalSize;
if (progress < 100) {
updateSync(sync, SyncState.Status.RUNNING, progress, callback);
}
// Incrementing i
i++;
}
}
private boolean isNewerThanServer(SyncUpTarget target, String objectType, String objectId, String lastModStr) throws JSONException, IOException {
if (lastModStr == null) {
// We didn't capture the last modified date so we can't really enforce merge mode, returning true so that we will behave like an "overwrite" merge mode
return true;
}
try {
String serverLastModStr = target.fetchLastModifiedDate(this, objectType, objectId);
long lastModifiedDate = Constants.TIMESTAMP_FORMAT.parse(lastModStr).getTime();
long serverLastModifiedDate = Constants.TIMESTAMP_FORMAT.parse(serverLastModStr).getTime();
return (serverLastModifiedDate <= lastModifiedDate);
} catch (Exception e) {
Log.e("SmartSyncManager:isNewerThanServer", "Couldn't figure out last modified date", e);
throw new SmartSyncException(e);
}
}
private boolean syncUpOneRecord(SyncUpTarget target, String soupName, List<String> fieldlist,
JSONObject record, MergeMode mergeMode) throws JSONException, IOException {
// Do we need to do a create, update or delete
Action action = null;
if (record.getBoolean(LOCALLY_DELETED))
action = Action.delete;
else if (record.getBoolean(LOCALLY_CREATED))
action = Action.create;
else if (record.getBoolean(LOCALLY_UPDATED))
action = Action.update;
if (action == null) {
// Nothing to do for this record
return true;
}
// Getting type and id
final String objectType = (String) SmartStore.project(record, Constants.SOBJECT_TYPE);
final String objectId = record.getString(target.getIdFieldName());
final String lastModStr = record.optString(target.getModificationDateFieldName());
/*
* Checks if we are attempting to update a record that has been updated
* on the server AFTER the client's last sync down. If the merge mode
* passed in tells us to leave the record alone under these
* circumstances, we will do nothing and return here.
*/
if (mergeMode == MergeMode.LEAVE_IF_CHANGED &&
(action == Action.update || action == Action.delete) &&
!isNewerThanServer(target, objectType, objectId, lastModStr)) {
// Nothing to do for this record
Log.i("SmartSyncManager:syncUpOneRecord",
"Record not synced since client does not have the latest from server");
return true;
}
// Fields to save (in the case of create or update)
Map<String, Object> fields = new HashMap<String, Object>();
if (action == Action.create || action == Action.update) {
for (String fieldName : fieldlist) {
if (!fieldName.equals(target.getIdFieldName()) && !fieldName.equals(SyncUpTarget.MODIFICATION_DATE_FIELD_NAME)) {
fields.put(fieldName, SmartStore.project(record, fieldName));
}
}
}
// Create/update/delete record on server and update smartstore
switch (action) {
case create:
String recordServerId = target.createOnServer(this, objectType, fields);
if (recordServerId != null) {
record.put(target.getIdFieldName(), recordServerId);
cleanAndSaveRecord(soupName, record);
}
break;
case delete:
if (target.deleteOnServer(this, objectType, objectId)) {
smartStore.delete(soupName, record.getLong(SmartStore.SOUP_ENTRY_ID));
}
break;
case update:
if (target.updateOnServer(this, objectType, objectId, fields)) {
cleanAndSaveRecord(soupName, record);
}
break;
}
return false;
}
private void cleanAndSaveRecord(String soupName, JSONObject record) throws JSONException {
record.put(LOCAL, false);
record.put(LOCALLY_CREATED, false);
record.put(LOCALLY_UPDATED, false);
record.put(LOCALLY_DELETED, false);
smartStore.update(soupName, record, record.getLong(SmartStore.SOUP_ENTRY_ID));
}
private void syncDown(SyncState sync, SyncUpdateCallback callback) throws Exception {
String soupName = sync.getSoupName();
SyncDownTarget target = (SyncDownTarget) sync.getTarget();
MergeMode mergeMode = sync.getMergeMode();
long maxTimeStamp = sync.getMaxTimeStamp();
JSONArray records = target.startFetch(this, maxTimeStamp);
int countSaved = 0;
int totalSize = target.getTotalSize();
sync.setTotalSize(totalSize);
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
while (records != null) {
// Save to smartstore
saveRecordsToSmartStore(soupName, records, mergeMode);
countSaved += records.length();
maxTimeStamp = Math.max(maxTimeStamp, target.getLatestModificationTimeStamp(records));
// Update sync status
if (countSaved < totalSize)
updateSync(sync, SyncState.Status.RUNNING, countSaved*100 / totalSize, callback);
// Fetch next records if any
records = target.continueFetch(this);
}
sync.setMaxTimeStamp(maxTimeStamp);
}
private Set<String> toSet(JSONArray jsonArray) throws JSONException {
Set<String> set = new HashSet<String>();
for (int i=0; i<jsonArray.length(); i++) {
set.add(jsonArray.getJSONArray(i).getString(0));
}
return set;
}
private void saveRecordsToSmartStore(String soupName, JSONArray records, MergeMode mergeMode)
throws JSONException {
// Gather ids of dirty records
Set<String> idsToSkip = null;
if (mergeMode == MergeMode.LEAVE_IF_CHANGED) {
idsToSkip = getDirtyRecordIds(soupName, Constants.ID);
}
smartStore.beginTransaction();
for (int i = 0; i < records.length(); i++) {
JSONObject record = records.getJSONObject(i);
// Skip?
if (mergeMode == MergeMode.LEAVE_IF_CHANGED) {
String id = JSONObjectHelper.optString(record, Constants.ID);
if (id != null && idsToSkip.contains(id)) {
continue; // don't write over dirty record
}
}
// Save
record.put(LOCAL, false);
record.put(LOCALLY_CREATED, false);
record.put(LOCALLY_UPDATED, false);
record.put(LOCALLY_DELETED, false);
smartStore.upsert(soupName, records.getJSONObject(i), Constants.ID, false);
}
smartStore.setTransactionSuccessful();
smartStore.endTransaction();
}
public Set<String> getDirtyRecordIds(String soupName, String idField) throws JSONException {
Set<String> idsToSkip = new HashSet<String>();
String dirtyRecordsSql = String.format("SELECT {%s:%s} FROM {%s} WHERE {%s:%s} = 'true'", soupName, idField, soupName, soupName, LOCAL);
final QuerySpec smartQuerySpec = QuerySpec.buildSmartQuerySpec(dirtyRecordsSql, PAGE_SIZE);
boolean hasMore = true;
for (int pageIndex = 0; hasMore; pageIndex++) {
JSONArray results = smartStore.query(smartQuerySpec, pageIndex);
hasMore = (results.length() == PAGE_SIZE);
idsToSkip.addAll(toSet(results));
}
return idsToSkip;
}
/**
* Send request after adding user-agent header that says SmartSync
* @param restRequest
* @return
* @throws IOException
*/
public RestResponse sendSyncWithSmartSyncUserAgent(RestRequest restRequest) throws IOException {
Map<String, String> headers = restRequest.getAdditionalHttpHeaders();
if (headers == null)
headers = new HashMap<String, String>();
headers.put(HttpAccess.USER_AGENT, SalesforceSDKManager.getInstance().getUserAgent(SMART_SYNC));
return restClient.sendSync(restRequest.getMethod(), restRequest.getPath(), restRequest.getRequestEntity(), headers);
}
/**
* Enum for action
*
*/
public enum Action {
create,
update,
delete
}
/**
* Exception thrown by smart sync manager
*
*/
public static class SmartSyncException extends RuntimeException {
public SmartSyncException(String message) {
super(message);
}
public SmartSyncException(Throwable e) {
super(e);
}
private static final long serialVersionUID = 1L;
}
/**
* Sets the rest client to be used.
*
* @param restClient
*/
public void setRestClient(RestClient restClient) {
this.restClient = restClient;
}
/**
* @return rest client in use
*/
public RestClient getRestClient() {
return this.restClient;
}
/**
* Callback to get sync status udpates
*/
public interface SyncUpdateCallback {
void onUpdate(SyncState sync);
}
}
| 1 | 14,778 | Falls back on `Constants.ID` if there's no custom field set. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -509,6 +509,7 @@ public class FileAccessIO<T extends DvObject> extends StorageIO<T> {
try {
in = new FileInputStream(getFileSystemPath().toFile());
+ in.skip(this.getOffset());
} catch (IOException ex) {
// We don't particularly care what the reason why we have
// failed to access the file was. | 1 | /*
Copyright (C) 2005-2012, by the President and Fellows of Harvard College.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Dataverse Network - A web application to share, preserve and analyze research data.
Developed at the Institute for Quantitative Social Science, Harvard University.
Version 3.0.
*/
package edu.harvard.iq.dataverse.dataaccess;
// java core imports:
import java.io.IOException;
import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
// NIO imports:
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
// Dataverse imports:
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.Dataverse;
import edu.harvard.iq.dataverse.DvObject;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
import java.io.FileNotFoundException;
import java.nio.channels.Channel;
import java.nio.file.DirectoryStream;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
public class FileAccessIO<T extends DvObject> extends StorageIO<T> {
public FileAccessIO() {
//Constructor only for testing
super(null, null, null);
}
public FileAccessIO(T dvObject, DataAccessRequest req, String driverId ) {
super(dvObject, req, driverId);
this.setIsLocalFile(true);
}
// "Direct" File Access IO, opened on a physical file not associated with
// a specific DvObject
public FileAccessIO(String storageLocation, String driverId) {
super(storageLocation, driverId);
this.setIsLocalFile(true);
physicalPath = Paths.get(storageLocation);
}
private Path physicalPath = null;
@Override
public void open (DataAccessOption... options) throws IOException {
DataFile dataFile;
Dataset dataset;
Dataverse dataverse = null;
DataAccessRequest req = this.getRequest();
if (isWriteAccessRequested(options)) {
isWriteAccess = true;
isReadAccess = false;
} else {
isWriteAccess = false;
isReadAccess = true;
}
if (dvObject instanceof DataFile) {
dataFile = this.getDataFile();
String storageIdentifier = dataFile.getStorageIdentifier();
if (req != null && req.getParameter("noVarHeader") != null) {
this.setNoVarHeader(true);
}
if (storageIdentifier == null || "".equals(storageIdentifier)) {
throw new IOException("Data Access: No local storage identifier defined for this datafile.");
}
if (isReadAccess) {
FileInputStream fin = openLocalFileAsInputStream();
if (fin == null) {
throw new IOException ("Failed to open local file "+getStorageLocation());
}
this.setInputStream(fin);
setChannel(fin.getChannel());
this.setSize(getLocalFileSize());
if (dataFile.getContentType() != null
&& dataFile.getContentType().equals("text/tab-separated-values")
&& dataFile.isTabularData()
&& dataFile.getDataTable() != null
&& (!this.noVarHeader())) {
List<DataVariable> datavariables = dataFile.getDataTable().getDataVariables();
String varHeaderLine = generateVariableHeader(datavariables);
this.setVarHeader(varHeaderLine);
}
} else if (isWriteAccess) {
// Creates a new directory as needed for a dataset.
Path datasetPath=Paths.get(getDatasetDirectory());
if (datasetPath != null && !Files.exists(datasetPath)) {
Files.createDirectories(datasetPath);
}
FileOutputStream fout = openLocalFileAsOutputStream();
if (fout == null) {
throw new IOException ("Failed to open local file "+getStorageLocation()+" for writing.");
}
this.setOutputStream(fout);
setChannel(fout.getChannel());
if (!storageIdentifier.startsWith(this.driverId + "://")) {
dvObject.setStorageIdentifier(this.driverId + "://" + storageIdentifier);
}
}
this.setMimeType(dataFile.getContentType());
try {
this.setFileName(dataFile.getFileMetadata().getLabel());
} catch (Exception ex) {
this.setFileName("unknown");
}
} else if (dvObject instanceof Dataset) {
//This case is for uploading a dataset related auxiliary file
//e.g. image thumbnails/metadata exports
dataset = this.getDataset();
if (isReadAccess) {
//TODO: Not necessary for dataset as there is no files associated with this
// FileInputStream fin = openLocalFileAsInputStream();
// Path path= dataset.getFileSystemDirectory();
// if (path == null) {
// throw new IOException("Failed to locate Dataset"+dataset.getIdentifier());
// }
//
// this.setInputStream(fin);
} else if (isWriteAccess) {
//this checks whether a directory for a dataset exists
Path datasetPath=Paths.get(getDatasetDirectory());
if (datasetPath != null && !Files.exists(datasetPath)) {
Files.createDirectories(datasetPath);
}
dataset.setStorageIdentifier(this.driverId + "://"+dataset.getAuthorityForFileStorage() + "/" + dataset.getIdentifierForFileStorage());
}
} else if (dvObject instanceof Dataverse) {
dataverse = this.getDataverse();
} else {
throw new IOException("Data Access: Invalid DvObject type");
}
// This "status" is a leftover from 3.6; we don't have a use for it
// in 4.0 yet; and we may not need it at all.
// -- L.A. 4.0.2
/*this.setStatus(200);*/
}
@Override
public void savePath(Path fileSystemPath) throws IOException {
// Since this is a local fileystem file, we can use the
// quick NIO Files.copy method:
Path outputPath = getFileSystemPath();
if (outputPath == null) {
throw new FileNotFoundException("FileAccessIO: Could not locate aux file for writing.");
}
Files.copy(fileSystemPath, outputPath, StandardCopyOption.REPLACE_EXISTING);
long newFileSize = outputPath.toFile().length();
// if it has worked successfully, we also need to reset the size
// of the object.
setSize(newFileSize);
}
@Override
public void saveInputStream(InputStream inputStream, Long filesize) throws IOException {
saveInputStream(inputStream);
}
@Override
public void saveInputStream(InputStream inputStream) throws IOException {
// Since this is a local fileystem file, we can use the
// quick NIO Files.copy method:
File outputFile = getFileSystemPath().toFile();
if (outputFile == null) {
throw new FileNotFoundException("FileAccessIO: Could not locate file for writing.");
}
try (OutputStream outputStream = new FileOutputStream(outputFile)) {
int read;
byte[] bytes = new byte[1024];
while ((read = inputStream.read(bytes)) != -1) {
outputStream.write(bytes, 0, read);
}
}
inputStream.close();
// if it has worked successfully, we also need to reset the size
// of the object.
setSize(outputFile.length());
}
@Override
public Channel openAuxChannel(String auxItemTag, DataAccessOption... options) throws IOException {
Path auxPath = getAuxObjectAsPath(auxItemTag);
if (isWriteAccessRequested(options)) {
if (dvObject instanceof Dataset && !this.canWrite()) {
// If this is a dataset-level auxilary file (a cached metadata export,
// dataset logo, etc.) there's a chance that no "real" files
// have been saved for this dataset yet, and thus the filesystem
// directory does not exist yet. Let's force a proper .open() on
// this StorageIO, that will ensure it is created:
open(DataAccessOption.WRITE_ACCESS);
}
FileOutputStream auxOut = new FileOutputStream(auxPath.toFile());
if (auxOut == null) {
throw new IOException("Failed to open Auxiliary File " + dvObject.getStorageIdentifier() + "." + auxItemTag + " for writing.");
}
return auxOut.getChannel();
}
// Read access requested.
// Check if this Aux object is cached; and if so, open for reading:
if (!auxPath.toFile().exists()) {
throw new FileNotFoundException("Auxiliary File " + dvObject.getStorageIdentifier() + "." + auxItemTag + " does not exist.");
}
FileInputStream auxIn = new FileInputStream(auxPath.toFile());
if (auxIn == null) {
throw new IOException("Failed to open Auxiliary File " + dvObject.getStorageIdentifier() + "." + auxItemTag + " for reading");
}
return auxIn.getChannel();
}
@Override
public boolean isAuxObjectCached(String auxItemTag) throws IOException {
// Check if the file exists:
Path auxPath = getAuxObjectAsPath(auxItemTag);
return auxPath.toFile().exists();
}
@Override
public long getAuxObjectSize(String auxItemTag) throws IOException {
Path auxPath = getAuxObjectAsPath(auxItemTag);
if (!auxPath.toFile().exists()) {
throw new FileNotFoundException ("Aux file does not exist.");
}
return auxPath.toFile().length();
}
@Override
public Path getAuxObjectAsPath(String auxItemTag) throws IOException {
if (auxItemTag == null || "".equals(auxItemTag)) {
throw new IOException("Null or invalid Auxiliary Object Tag.");
}
String datasetDirectory = getDatasetDirectory();
if (dvObject.getStorageIdentifier() == null || "".equals(dvObject.getStorageIdentifier())) {
throw new IOException("Data Access: No local storage identifier defined for this datafile.");
}
Path auxPath = null;
if (dvObject instanceof DataFile) {
auxPath = Paths.get(datasetDirectory, stripDriverId(dvObject.getStorageIdentifier()) + "." + auxItemTag);
} else if (dvObject instanceof Dataset) {
auxPath = Paths.get(datasetDirectory, auxItemTag);
} else if (dvObject instanceof Dataverse) {
} else {
throw new IOException("Aux path could not be generated for " + auxItemTag);
}
if (auxPath == null) {
throw new IOException("Invalid Path location for the auxiliary file " + dvObject.getStorageIdentifier() + "." + auxItemTag);
}
return auxPath;
}
@Override
public void backupAsAux(String auxItemTag) throws IOException {
Path auxPath = getAuxObjectAsPath(auxItemTag);
Files.move(getFileSystemPath(), auxPath, StandardCopyOption.REPLACE_EXISTING);
}
@Override
public void revertBackupAsAux(String auxItemTag) throws IOException {
Path auxPath = getAuxObjectAsPath(auxItemTag);
Files.move(auxPath, getFileSystemPath(), StandardCopyOption.REPLACE_EXISTING);
}
// this method copies a local filesystem Path into this DataAccess Auxiliary location:
@Override
public void savePathAsAux(Path fileSystemPath, String auxItemTag) throws IOException {
if (dvObject instanceof Dataset && !this.canWrite()) {
// see the comment in openAuxChannel()
open(DataAccessOption.WRITE_ACCESS);
}
// quick Files.copy method:
try {
Path auxPath = getAuxObjectAsPath(auxItemTag);
Files.copy(fileSystemPath, auxPath, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException ex) {
}
}
@Override
public void saveInputStreamAsAux(InputStream inputStream, String auxItemTag, Long filesize) throws IOException {
saveInputStreamAsAux(inputStream, auxItemTag);
}
@Override
public void saveInputStreamAsAux(InputStream inputStream, String auxItemTag) throws IOException {
if (dvObject instanceof Dataset && !this.canWrite()) {
// see the comment in openAuxChannel()
open(DataAccessOption.WRITE_ACCESS);
}
// Since this is a local fileystem file, we can use the
// quick NIO Files.copy method:
File outputFile = getAuxObjectAsPath(auxItemTag).toFile();
if (outputFile == null) {
throw new FileNotFoundException("FileAccessIO: Could not locate aux file for writing.");
}
try (OutputStream outputStream = new FileOutputStream(outputFile)) {
int read;
byte[] bytes = new byte[1024];
while ((read = inputStream.read(bytes)) != -1) {
outputStream.write(bytes, 0, read);
}
}
inputStream.close();
}
@Override
public List<String>listAuxObjects() throws IOException {
if (this.getDataFile() == null) {
throw new IOException("This FileAccessIO object hasn't been properly initialized.");
}
List<Path> cachedFiles = listCachedFiles();
if (cachedFiles == null) {
return null;
}
List<String> cachedFileNames = new ArrayList<>();
String baseName = stripDriverId(this.getDataFile().getStorageIdentifier()) + ".";
for (Path auxPath : cachedFiles) {
cachedFileNames.add(auxPath.getFileName().toString().substring(baseName.length()));
}
return cachedFileNames;
}
@Override
public void deleteAuxObject(String auxItemTag) throws IOException {
Path auxPath = getAuxObjectAsPath(auxItemTag);
Files.delete(auxPath);
}
@Override
public void deleteAllAuxObjects() throws IOException {
List<Path> cachedFiles = listCachedFiles();
if (cachedFiles == null) {
return;
}
for (Path auxPath : cachedFiles) {
Files.delete(auxPath);
}
}
@Override
public String getStorageLocation() {
// For a local file, the "storage location" is a complete, absolute
// filesystem path, with the "<driverId>://" prefix:
try {
Path testPath = getFileSystemPath();
if (testPath != null) {
return this.driverId + "://" + testPath.toString();
}
} catch (IOException ioex) {
// just return null, below:
}
return null;
}
@Override
public Path getFileSystemPath() throws IOException {
if (physicalPath != null) {
return physicalPath;
}
String datasetDirectory = getDatasetDirectory();
if (dvObject.getStorageIdentifier() == null || "".equals(dvObject.getStorageIdentifier())) {
throw new IOException("Data Access: No local storage identifier defined for this datafile.");
}
physicalPath = Paths.get(datasetDirectory, stripDriverId(dvObject.getStorageIdentifier()));
return physicalPath;
}
@Override
public boolean exists() throws IOException {
if (getFileSystemPath() == null) {
throw new FileNotFoundException("FileAccessIO: invalid Access IO object.");
}
return getFileSystemPath().toFile().exists();
}
/*@Override
public void delete() throws IOException {
Path victim = getFileSystemPath();
if (victim != null) {
Files.delete(victim);
} else {
throw new IOException("Could not locate physical file location for the Filesystem object.");
}
}*/
@Override
public void delete() throws IOException {
if (!isDirectAccess()) {
throw new IOException("Direct Access IO must be used to permanently delete stored file objects");
}
if (physicalPath == null) {
throw new IOException("Attempted delete on an unspecified physical path");
}
deleteAllAuxObjects();
Files.delete(physicalPath);
}
// Auxilary helper methods, filesystem access-specific:
private long getLocalFileSize () {
long fileSize = -1;
try {
File testFile = getFileSystemPath().toFile();
if (testFile != null) {
fileSize = testFile.length();
}
return fileSize;
} catch (IOException ex) {
return -1;
}
}
public FileInputStream openLocalFileAsInputStream () {
FileInputStream in;
try {
in = new FileInputStream(getFileSystemPath().toFile());
} catch (IOException ex) {
// We don't particularly care what the reason why we have
// failed to access the file was.
// From the point of view of the download subsystem, it's a
// binary operation -- it's either successfull or not.
// If we can't access it for whatever reason, we are saying
// it's 404 NOT FOUND in our HTTP response.
// TODO: no, we should probably provide some kind of diagnostics.
// -- L.A. 4.0.2
return null;
}
return in;
}
public FileOutputStream openLocalFileAsOutputStream () {
FileOutputStream out;
try {
out = new FileOutputStream(getFileSystemPath().toFile());
} catch (IOException ex) {
// We don't particularly care what the reason why we have
// failed to access the file was.
// From the point of view of the download subsystem, it's a
// binary operation -- it's either successfull or not.
// If we can't access it for whatever reason, we are saying
// it's 404 NOT FOUND in our HTTP response.
// TODO: no, we should probably provide some kind of diagnostics.
// -- L.A. 4.0.2
return null;
}
return out;
}
private String getDatasetDirectory() throws IOException {
if (dvObject == null) {
throw new IOException("No DvObject defined in the Data Access Object");
}
Path datasetDirectoryPath=null;
if (dvObject instanceof Dataset) {
datasetDirectoryPath = Paths.get(this.getDataset().getAuthorityForFileStorage(), this.getDataset().getIdentifierForFileStorage());
} else if (dvObject instanceof DataFile) {
datasetDirectoryPath = Paths.get(this.getDataFile().getOwner().getAuthorityForFileStorage(), this.getDataFile().getOwner().getIdentifierForFileStorage());
} else if (dvObject instanceof Dataverse) {
throw new IOException("FileAccessIO: Dataverses are not a supported dvObject");
}
if (datasetDirectoryPath == null) {
throw new IOException("Could not determine the filesystem directory of the parent dataset.");
}
String datasetDirectory = Paths.get(getFilesRootDirectory(), datasetDirectoryPath.toString()).toString();
if (dvObject.getStorageIdentifier() == null || dvObject.getStorageIdentifier().isEmpty()) {
throw new IOException("Data Access: No local storage identifier defined for this datafile.");
}
return datasetDirectory;
}
private String getFilesRootDirectory() {
String filesRootDirectory = System.getProperty("dataverse.files." + this.driverId + ".directory");
if (filesRootDirectory == null || filesRootDirectory.equals("")) {
filesRootDirectory = "/tmp/files";
}
return filesRootDirectory;
}
private List<Path> listCachedFiles() throws IOException {
List<Path> auxItems = new ArrayList<>();
// cached files for a given datafiles are stored on the filesystem
// as <filesystemname>.*; for example, <filename>.thumb64 or
// <filename>.RData.
String baseName;
Path datasetDirectoryPath;
if (isDirectAccess()) {
baseName = physicalPath.getFileName().toString();
datasetDirectoryPath = physicalPath.getParent();
} else {
if (this.getDataFile() == null || this.getDataFile().getStorageIdentifier() == null || this.getDataFile().getStorageIdentifier().isEmpty()) {
throw new IOException("Null or invalid DataFile in FileAccessIO object.");
}
baseName = stripDriverId(this.getDataFile().getStorageIdentifier());
datasetDirectoryPath = Paths.get(getDatasetDirectory());
}
if (datasetDirectoryPath == null) {
throw new IOException("Could not determine the filesystem directory of the parent dataset.");
}
DirectoryStream.Filter<Path> filter = new DirectoryStream.Filter<Path>() {
@Override
public boolean accept(Path file) throws IOException {
return (file.getFileName() != null
&& file.getFileName().toString().startsWith(baseName + "."));
}
};
DirectoryStream<Path> dirStream = Files.newDirectoryStream(datasetDirectoryPath, filter);
if (dirStream != null) {
for (Path filePath : dirStream) {
auxItems.add(filePath);
}
}
dirStream.close();
return auxItems;
}
@Override
public InputStream getAuxFileAsInputStream(String auxItemTag) throws IOException {
InputStream in = null;
if(this.isAuxObjectCached(auxItemTag))
{
Path path=getAuxObjectAsPath(auxItemTag);
in=Files.newInputStream(path);
}
return in;
}
private String stripDriverId(String storageIdentifier) {
int separatorIndex = storageIdentifier.indexOf("://");
if(separatorIndex>0) {
return storageIdentifier.substring(separatorIndex + 3);
}
return storageIdentifier;
}
}
| 1 | 45,163 | I believe this is what we want to rearrange: this in.skip() should not be happening here, in the open method, but in the setOffset() method itself. Because we want to be able to change that offset after the initial open. The setOffset() method will need to throw an IOException, if it's called while the InputStream is still null; or if the skip() call itself results in an IOException() | IQSS-dataverse | java |
@@ -32,6 +32,11 @@ public class MethodNameDeclaration extends AbstractNameDeclaration {
return p.isVarargs();
}
+ public boolean isPrimitiveReturnType() {
+ return getMethodNameDeclaratorNode().getParent().getResultType().getChild(0)
+ .getChild(0) instanceof ASTPrimitiveType;
+ }
+
public ASTMethodDeclarator getMethodNameDeclaratorNode() {
return (ASTMethodDeclarator) node;
} | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.symboltable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.java.ast.ASTFormalParameter;
import net.sourceforge.pmd.lang.java.ast.ASTFormalParameters;
import net.sourceforge.pmd.lang.java.ast.ASTMethodDeclarator;
import net.sourceforge.pmd.lang.java.ast.ASTPrimitiveType;
import net.sourceforge.pmd.lang.symboltable.AbstractNameDeclaration;
public class MethodNameDeclaration extends AbstractNameDeclaration {
public MethodNameDeclaration(ASTMethodDeclarator node) {
super(node);
}
public int getParameterCount() {
return ((ASTMethodDeclarator) node).getParameterCount();
}
public boolean isVarargs() {
ASTFormalParameters params = (ASTFormalParameters) node.getChild(0);
if (params.size() == 0) {
return false;
}
// If it's a varargs, it HAS to be the last parameter
ASTFormalParameter p = (ASTFormalParameter) params.getChild(params.size() - 1);
return p.isVarargs();
}
public ASTMethodDeclarator getMethodNameDeclaratorNode() {
return (ASTMethodDeclarator) node;
}
public String getParameterDisplaySignature() {
StringBuilder sb = new StringBuilder("(");
ASTFormalParameters params = (ASTFormalParameters) node.getChild(0);
// TODO - this can be optimized - add [0] then ,[n] in a loop.
// no need to trim at the end
for (int i = 0; i < ((ASTMethodDeclarator) node).getParameterCount(); i++) {
ASTFormalParameter p = (ASTFormalParameter) params.getChild(i);
sb.append(p.getTypeNode().getTypeImage());
if (p.isVarargs()) {
sb.append("...");
}
sb.append(',');
}
if (sb.charAt(sb.length() - 1) == ',') {
sb.deleteCharAt(sb.length() - 1);
}
sb.append(')');
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof MethodNameDeclaration)) {
return false;
}
MethodNameDeclaration other = (MethodNameDeclaration) o;
// compare name
if (!other.node.getImage().equals(node.getImage())) {
return false;
}
// compare parameter count - this catches the case where there are no
// params, too
if (((ASTMethodDeclarator) other.node).getParameterCount() != ((ASTMethodDeclarator) node)
.getParameterCount()) {
return false;
}
// compare parameter types
ASTFormalParameters myParams = (ASTFormalParameters) node.getChild(0);
ASTFormalParameters otherParams = (ASTFormalParameters) other.node.getChild(0);
for (int i = 0; i < ((ASTMethodDeclarator) node).getParameterCount(); i++) {
ASTFormalParameter myParam = (ASTFormalParameter) myParams.getChild(i);
ASTFormalParameter otherParam = (ASTFormalParameter) otherParams.getChild(i);
// Compare vararg
if (myParam.isVarargs() != otherParam.isVarargs()) {
return false;
}
Node myTypeNode = myParam.getTypeNode().getChild(0);
Node otherTypeNode = otherParam.getTypeNode().getChild(0);
// compare primitive vs reference type
if (myTypeNode.getClass() != otherTypeNode.getClass()) {
return false;
}
// simple comparison of type images
// this can be fooled by one method using "String"
// and the other method using "java.lang.String"
// once we get real types in here that should get fixed
String myTypeImg;
String otherTypeImg;
if (myTypeNode instanceof ASTPrimitiveType) {
myTypeImg = myTypeNode.getImage();
otherTypeImg = otherTypeNode.getImage();
} else {
myTypeImg = myTypeNode.getChild(0).getImage();
otherTypeImg = otherTypeNode.getChild(0).getImage();
}
if (!myTypeImg.equals(otherTypeImg)) {
return false;
}
// if type is ASTPrimitiveType and is an array, make sure the other
// one is also
}
return true;
}
@Override
public int hashCode() {
int hash = node.getImage().hashCode() * 31 + ((ASTMethodDeclarator) node).getParameterCount();
ASTFormalParameters myParams = (ASTFormalParameters) node.getChild(0);
for (int i = 0; i < ((ASTMethodDeclarator) node).getParameterCount(); i++) {
ASTFormalParameter myParam = (ASTFormalParameter) myParams.getChild(i);
Node myTypeNode = myParam.getTypeNode().getChild(0);
String myTypeImg;
if (myTypeNode instanceof ASTPrimitiveType) {
myTypeImg = myTypeNode.getImage();
} else {
myTypeImg = myTypeNode.getChild(0).getImage();
}
hash = hash * 31 + myTypeImg.hashCode();
}
return hash;
}
@Override
public String toString() {
return "Method " + node.getImage() + ", line " + node.getBeginLine() + ", params = "
+ ((ASTMethodDeclarator) node).getParameterCount();
}
}
| 1 | 19,244 | In case the method is `void`, there won't be any children and `getChild(0)` throws. We'll need to check with `isVoid()` for that case. I'll update this when I merge. | pmd-pmd | java |
@@ -83,6 +83,7 @@ func main() {
tch.NewInbound(channel, tch.ListenAddr(":28941")),
http.NewInbound(":24034"),
},
+ Interceptors: yarpc.Interceptors{requestLogInterceptor{}},
})
handler := handler{items: make(map[string]string)} | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package main
import (
"fmt"
"log"
"os"
"sync"
"github.com/yarpc/yarpc-go"
"github.com/yarpc/yarpc-go/encoding/json"
"github.com/yarpc/yarpc-go/transport"
"github.com/yarpc/yarpc-go/transport/http"
tch "github.com/yarpc/yarpc-go/transport/tchannel"
"github.com/uber/tchannel-go"
)
type getRequest struct {
Key string `json:"key"`
}
type getResponse struct {
Value string `json:"value"`
}
type setRequest struct {
Key string `json:"key"`
Value string `json:"value"`
}
type setResponse struct {
}
type handler struct {
sync.RWMutex
items map[string]string
}
func (h *handler) Get(req *json.Request, body *getRequest) (*getResponse, *json.Response, error) {
h.RLock()
result := &getResponse{Value: h.items[body.Key]}
h.RUnlock()
return result, nil, nil
}
func (h *handler) Set(req *json.Request, body *setRequest) (*setResponse, *json.Response, error) {
h.Lock()
h.items[body.Key] = body.Value
h.Unlock()
return &setResponse{}, nil, nil
}
func main() {
channel, err := tchannel.NewChannel("keyvalue", nil)
if err != nil {
log.Fatalln(err)
}
rpc := yarpc.New(yarpc.Config{
Name: "keyvalue",
Inbounds: []transport.Inbound{
tch.NewInbound(channel, tch.ListenAddr(":28941")),
http.NewInbound(":24034"),
},
})
handler := handler{items: make(map[string]string)}
json.Register(rpc, json.Procedure("get", handler.Get))
json.Register(rpc, json.Procedure("set", handler.Set))
if err := rpc.Start(); err != nil {
fmt.Println("error:", err.Error())
os.Exit(1)
}
select {}
}
| 1 | 9,567 | If the yarpc.Interceptors wrapper will be added to any user interceptor, why not do it transitively. Can save one step for users. Same apply to filter. | yarpc-yarpc-go | go |
@@ -1011,5 +1011,13 @@ func (e *ETCD) GetMembersClientURLs(ctx context.Context) ([]string, error) {
// RemoveSelf will remove the member if it exists in the cluster
func (e *ETCD) RemoveSelf(ctx context.Context) error {
- return e.removePeer(ctx, e.name, e.address, true)
+ if err := e.removePeer(ctx, e.name, e.address, true); err != nil {
+ return err
+ }
+
+ // backup the data dir to avoid issues when re-enabling etcd
+ oldDataDir := etcdDBDir(e.config) + "-old-" + strconv.Itoa(int(time.Now().Unix()))
+
+ // move the data directory to a temp path
+ return os.Rename(etcdDBDir(e.config), oldDataDir)
} | 1 | package etcd
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control/deps"
"github.com/rancher/k3s/pkg/daemons/executor"
"github.com/rancher/k3s/pkg/version"
"github.com/robfig/cron/v3"
"github.com/sirupsen/logrus"
etcd "go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/clientv3/snapshot"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/etcdserver/etcdserverpb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
)
type ETCD struct {
client *etcd.Client
config *config.Control
name string
runtime *config.ControlRuntime
address string
cron *cron.Cron
s3 *s3
}
type learnerProgress struct {
ID uint64 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
RaftAppliedIndex uint64 `json:"raftAppliedIndex,omitempty"`
LastProgress metav1.Time `json:"lastProgress,omitempty"`
}
// NewETCD creates a new value of type
// ETCD with an initialized cron value.
func NewETCD() *ETCD {
return &ETCD{
cron: cron.New(),
}
}
var (
learnerProgressKey = version.Program + "/etcd/learnerProgress"
// AddressKey will contain the value of api addresses list
AddressKey = version.Program + "/apiaddresses"
)
const (
snapshotPrefix = "etcd-snapshot-"
endpoint = "https://127.0.0.1:2379"
testTimeout = time.Second * 10
manageTickerTime = time.Second * 15
learnerMaxStallTime = time.Minute * 5
// defaultDialTimeout is intentionally short so that connections timeout within the testTimeout defined above
defaultDialTimeout = 2 * time.Second
// other defaults from k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go
defaultKeepAliveTime = 30 * time.Second
defaultKeepAliveTimeout = 10 * time.Second
maxBackupRetention = 5
)
// Members contains a slice that holds all
// members of the cluster.
type Members struct {
Members []*etcdserverpb.Member `json:"members"`
}
// EndpointName returns the name of the endpoint.
func (e *ETCD) EndpointName() string {
return "etcd"
}
// Test ensures that the local node is a voting member of the target cluster.
// If it is still a learner or not a part of the cluster, an error is raised.
func (e *ETCD) Test(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, testTimeout)
defer cancel()
status, err := e.client.Status(ctx, endpoint)
if err != nil {
return err
}
if status.IsLearner {
return errors.New("this server has not yet been promoted from learner to voting member")
}
members, err := e.client.MemberList(ctx)
if err != nil {
return err
}
var memberNameUrls []string
for _, member := range members.Members {
for _, peerURL := range member.PeerURLs {
if peerURL == e.peerURL() && e.name == member.Name {
return nil
}
}
if len(member.PeerURLs) > 0 {
memberNameUrls = append(memberNameUrls, member.Name+"="+member.PeerURLs[0])
}
}
return errors.Errorf("this server is a not a member of the etcd cluster. Found %v, expect: %s=%s", memberNameUrls, e.name, e.address)
}
// etcdDBDir returns the path to dataDir/db/etcd
func etcdDBDir(config *config.Control) string {
return filepath.Join(config.DataDir, "db", "etcd")
}
// walDir returns the path to etcdDBDir/member/wal
func walDir(config *config.Control) string {
return filepath.Join(etcdDBDir(config), "member", "wal")
}
// nameFile returns the path to etcdDBDir/name.
func nameFile(config *config.Control) string {
return filepath.Join(etcdDBDir(config), "name")
}
// ResetFile returns the path to etcdDBDir/reset-flag.
func ResetFile(config *config.Control) string {
return filepath.Join(config.DataDir, "db", "reset-flag")
}
// IsInitialized checks to see if a WAL directory exists. If so, we assume that etcd
// has already been brought up at least once.
func (e *ETCD) IsInitialized(ctx context.Context, config *config.Control) (bool, error) {
dir := walDir(config)
if s, err := os.Stat(dir); err == nil && s.IsDir() {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else {
return false, errors.Wrapf(err, "invalid state for wal directory %s", dir)
}
}
// Reset resets an etcd node
func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error) error {
// Wait for etcd to come up as a new single-node cluster, then exit
go func() {
t := time.NewTicker(5 * time.Second)
defer t.Stop()
for range t.C {
if err := e.Test(ctx); err == nil {
members, err := e.client.MemberList(ctx)
if err != nil {
continue
}
// storageBootstrap() - runtime structure has been written with correct certificate data
if err := rebootstrap(); err != nil {
logrus.Fatal(err)
}
// call functions to rewrite them from daemons/control/server.go (prepare())
if err := deps.GenServerDeps(e.config, e.runtime); err != nil {
logrus.Fatal(err)
}
if len(members.Members) == 1 && members.Members[0].Name == e.name {
logrus.Infof("Etcd is running, restart without --cluster-reset flag now. Backup and delete ${datadir}/server/db on each peer etcd server and rejoin the nodes")
os.Exit(0)
}
}
}
}()
// If asked to restore from a snapshot, do so
if e.config.ClusterResetRestorePath != "" {
if e.config.EtcdS3 {
if e.s3 == nil {
s3, err := newS3(ctx, e.config)
if err != nil {
return err
}
e.s3 = s3
}
logrus.Infof("Retrieving etcd snapshot %s from S3", e.config.ClusterResetRestorePath)
if err := e.s3.download(ctx); err != nil {
return err
}
logrus.Infof("S3 download complete for %s", e.config.ClusterResetRestorePath)
}
info, err := os.Stat(e.config.ClusterResetRestorePath)
if os.IsNotExist(err) {
return fmt.Errorf("etcd: snapshot path does not exist: %s", e.config.ClusterResetRestorePath)
}
if info.IsDir() {
return fmt.Errorf("etcd: snapshot path must be a file, not a directory: %s", e.config.ClusterResetRestorePath)
}
if err := e.Restore(ctx); err != nil {
return err
}
}
if err := e.setName(true); err != nil {
return err
}
// touch a file to avoid multiple resets
if err := ioutil.WriteFile(ResetFile(e.config), []byte{}, 0600); err != nil {
return err
}
return e.newCluster(ctx, true)
}
// Start starts the datastore
func (e *ETCD) Start(ctx context.Context, clientAccessInfo *clientaccess.Info) error {
existingCluster, err := e.IsInitialized(ctx, e.config)
if err != nil {
return errors.Wrapf(err, "configuration validation failed")
}
if !e.config.EtcdDisableSnapshots {
e.setSnapshotFunction(ctx)
e.cron.Start()
}
go e.manageLearners(ctx)
if existingCluster {
//check etcd dir permission
etcdDir := etcdDBDir(e.config)
info, err := os.Stat(etcdDir)
if err != nil {
return err
}
if info.Mode() != 0700 {
if err := os.Chmod(etcdDir, 0700); err != nil {
return err
}
}
opt, err := executor.CurrentETCDOptions()
if err != nil {
return err
}
return e.cluster(ctx, false, opt)
}
if clientAccessInfo == nil {
return e.newCluster(ctx, false)
}
err = e.join(ctx, clientAccessInfo)
return errors.Wrap(err, "joining etcd cluster")
}
// join attempts to add a member to an existing cluster
func (e *ETCD) join(ctx context.Context, clientAccessInfo *clientaccess.Info) error {
clientURLs, memberList, err := ClientURLs(ctx, clientAccessInfo, e.config.PrivateIP)
if err != nil {
return err
}
client, err := GetClient(ctx, e.runtime, clientURLs...)
if err != nil {
return err
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
var (
cluster []string
add = true
)
members, err := client.MemberList(ctx)
if err != nil {
logrus.Errorf("Failed to get member list from etcd cluster. Will assume this member is already added")
members = &etcd.MemberListResponse{
Members: append(memberList.Members, &etcdserverpb.Member{
Name: e.name,
PeerURLs: []string{e.peerURL()},
}),
}
add = false
}
for _, member := range members.Members {
for _, peer := range member.PeerURLs {
u, err := url.Parse(peer)
if err != nil {
return err
}
// An uninitialized member won't have a name
if u.Hostname() == e.address && (member.Name == e.name || member.Name == "") {
add = false
}
if member.Name == "" && u.Hostname() == e.address {
member.Name = e.name
}
if len(member.PeerURLs) > 0 {
cluster = append(cluster, fmt.Sprintf("%s=%s", member.Name, member.PeerURLs[0]))
}
}
}
if add {
logrus.Infof("Adding %s to etcd cluster %v", e.peerURL(), cluster)
if _, err = client.MemberAddAsLearner(ctx, []string{e.peerURL()}); err != nil {
return err
}
cluster = append(cluster, fmt.Sprintf("%s=%s", e.name, e.peerURL()))
}
logrus.Infof("Starting etcd for cluster %v", cluster)
return e.cluster(ctx, false, executor.InitialOptions{
Cluster: strings.Join(cluster, ","),
State: "existing",
})
}
// Register configures a new etcd client and adds db info routes for the http request handler.
func (e *ETCD) Register(ctx context.Context, config *config.Control, handler http.Handler) (http.Handler, error) {
e.config = config
e.runtime = config.Runtime
client, err := GetClient(ctx, e.runtime, endpoint)
if err != nil {
return nil, err
}
e.client = client
address, err := GetAdvertiseAddress(config.PrivateIP)
if err != nil {
return nil, err
}
e.address = address
e.config.Datastore.Endpoint = endpoint
e.config.Datastore.Config.CAFile = e.runtime.ETCDServerCA
e.config.Datastore.Config.CertFile = e.runtime.ClientETCDCert
e.config.Datastore.Config.KeyFile = e.runtime.ClientETCDKey
if err := e.setName(false); err != nil {
return nil, err
}
e.config.Runtime.ClusterControllerStart = func(ctx context.Context) error {
Register(ctx, e, e.config.Runtime.Core.Core().V1().Node())
return nil
}
tombstoneFile := filepath.Join(etcdDBDir(e.config), "tombstone")
if _, err := os.Stat(tombstoneFile); err == nil {
logrus.Infof("tombstone file has been detected, removing data dir to rejoin the cluster")
if _, err := backupDirWithRetention(etcdDBDir(e.config), maxBackupRetention); err != nil {
return nil, err
}
}
return e.handler(handler), err
}
// setName sets a unique name for this cluster member. The first time this is called,
// or if force is set to true, a new name will be generated and written to disk. The persistent
// name is used on subsequent calls.
func (e *ETCD) setName(force bool) error {
fileName := nameFile(e.config)
data, err := ioutil.ReadFile(fileName)
if os.IsNotExist(err) || force {
h, err := os.Hostname()
if err != nil {
return err
}
e.name = strings.SplitN(h, ".", 2)[0] + "-" + uuid.New().String()[:8]
if err := os.MkdirAll(filepath.Dir(fileName), 0700); err != nil {
return err
}
return ioutil.WriteFile(fileName, []byte(e.name), 0600)
} else if err != nil {
return err
}
e.name = string(data)
return nil
}
// handler wraps the handler with routes for database info
func (e *ETCD) handler(next http.Handler) http.Handler {
mux := mux.NewRouter()
mux.Handle("/db/info", e.infoHandler())
mux.NotFoundHandler = next
return mux
}
// infoHandler returns etcd cluster information. This is used by new members when joining the custer.
func (e *ETCD) infoHandler() http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
ctx, cancel := context.WithTimeout(req.Context(), 2*time.Second)
defer cancel()
members, err := e.client.MemberList(ctx)
if err != nil {
json.NewEncoder(rw).Encode(&Members{
Members: []*etcdserverpb.Member{
{
Name: e.name,
PeerURLs: []string{e.peerURL()},
ClientURLs: []string{e.clientURL()},
},
},
})
return
}
rw.Header().Set("Content-Type", "application/json")
json.NewEncoder(rw).Encode(&Members{
Members: members.Members,
})
})
}
// getClient returns an etcd client connected to the specified endpoints
func GetClient(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*etcd.Client, error) {
cfg, err := getClientConfig(ctx, runtime, endpoints...)
if err != nil {
return nil, err
}
return etcd.New(*cfg)
}
//getClientConfig generates an etcd client config connected to the specified endpoints
func getClientConfig(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*etcd.Config, error) {
tlsConfig, err := toTLSConfig(runtime)
if err != nil {
return nil, err
}
cfg := &etcd.Config{
Endpoints: endpoints,
TLS: tlsConfig,
Context: ctx,
DialTimeout: defaultDialTimeout,
DialKeepAliveTime: defaultKeepAliveTime,
DialKeepAliveTimeout: defaultKeepAliveTimeout,
}
return cfg, nil
}
// toTLSConfig converts the ControlRuntime configuration to TLS configuration suitable
// for use by etcd.
func toTLSConfig(runtime *config.ControlRuntime) (*tls.Config, error) {
clientCert, err := tls.LoadX509KeyPair(runtime.ClientETCDCert, runtime.ClientETCDKey)
if err != nil {
return nil, err
}
pool, err := certutil.NewPool(runtime.ETCDServerCA)
if err != nil {
return nil, err
}
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// getAdvertiseAddress returns the IP address best suited for advertising to clients
func GetAdvertiseAddress(advertiseIP string) (string, error) {
ip := advertiseIP
if ip == "" {
ipAddr, err := utilnet.ChooseHostInterface()
if err != nil {
return "", err
}
ip = ipAddr.String()
}
return ip, nil
}
// newCluster returns options to set up etcd for a new cluster
func (e *ETCD) newCluster(ctx context.Context, reset bool) error {
return e.cluster(ctx, reset, executor.InitialOptions{
AdvertisePeerURL: fmt.Sprintf("https://%s:2380", e.address),
Cluster: fmt.Sprintf("%s=https://%s:2380", e.name, e.address),
State: "new",
})
}
// peerURL returns the peer access address for the local node
func (e *ETCD) peerURL() string {
return fmt.Sprintf("https://%s:2380", e.address)
}
// clientURL returns the client access address for the local node
func (e *ETCD) clientURL() string {
return fmt.Sprintf("https://%s:2379", e.address)
}
// metricsURL returns the metrics access address
func (e *ETCD) metricsURL(expose bool) string {
if expose {
return fmt.Sprintf("http://%s:2381", e.address)
}
return "http://127.0.0.1:2381"
}
// cluster returns ETCDConfig for a cluster
func (e *ETCD) cluster(ctx context.Context, forceNew bool, options executor.InitialOptions) error {
return executor.ETCD(executor.ETCDConfig{
Name: e.name,
InitialOptions: options,
ForceNewCluster: forceNew,
ListenClientURLs: fmt.Sprintf(e.clientURL() + ",https://127.0.0.1:2379"),
ListenMetricsURLs: e.metricsURL(e.config.EtcdExposeMetrics),
ListenPeerURLs: e.peerURL(),
AdvertiseClientURLs: e.clientURL(),
DataDir: etcdDBDir(e.config),
ServerTrust: executor.ServerTrust{
CertFile: e.config.Runtime.ServerETCDCert,
KeyFile: e.config.Runtime.ServerETCDKey,
ClientCertAuth: true,
TrustedCAFile: e.config.Runtime.ETCDServerCA,
},
PeerTrust: executor.PeerTrust{
CertFile: e.config.Runtime.PeerServerClientETCDCert,
KeyFile: e.config.Runtime.PeerServerClientETCDKey,
ClientCertAuth: true,
TrustedCAFile: e.config.Runtime.ETCDPeerCA,
},
ElectionTimeout: 5000,
HeartbeatInterval: 500,
Logger: "zap",
LogOutputs: []string{"stderr"},
})
}
// removePeer removes a peer from the cluster. The peer ID and IP address must both match.
func (e *ETCD) removePeer(ctx context.Context, id, address string, removeSelf bool) error {
members, err := e.client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if member.Name != id {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
if e.address == address && !removeSelf {
return errors.New("node has been deleted from the cluster")
}
logrus.Infof("Removing name=%s id=%d address=%s from etcd", member.Name, member.ID, address)
_, err := e.client.MemberRemove(ctx, member.ID)
if err == rpctypes.ErrGRPCMemberNotFound {
return nil
}
return err
}
}
}
return nil
}
// manageLearners monitors the etcd cluster to ensure that learners are making progress towards
// being promoted to full voting member. The checks only run on the cluster member that is
// the etcd leader.
func (e *ETCD) manageLearners(ctx context.Context) error {
t := time.NewTicker(manageTickerTime)
defer t.Stop()
for range t.C {
ctx, cancel := context.WithTimeout(ctx, testTimeout)
defer cancel()
// Check to see if the local node is the leader. Only the leader should do learner management.
if status, err := e.client.Status(ctx, endpoint); err != nil {
logrus.Errorf("Failed to check local etcd status for learner management: %v", err)
continue
} else if status.Header.MemberId != status.Leader {
continue
}
progress, err := e.getLearnerProgress(ctx)
if err != nil {
logrus.Errorf("Failed to get recorded learner progress from etcd: %v", err)
continue
}
members, err := e.client.MemberList(ctx)
if err != nil {
logrus.Errorf("Failed to get etcd members for learner management: %v", err)
continue
}
for _, member := range members.Members {
if member.IsLearner {
if err := e.trackLearnerProgress(ctx, progress, member); err != nil {
logrus.Errorf("Failed to track learner progress towards promotion: %v", err)
}
break
}
}
}
return nil
}
// trackLearnerProcess attempts to promote a learner. If it cannot be promoted, progress through the raft index is tracked.
// If the learner does not make any progress in a reasonable amount of time, it is evicted from the cluster.
func (e *ETCD) trackLearnerProgress(ctx context.Context, progress *learnerProgress, member *etcdserverpb.Member) error {
// Try to promote it. If it can be promoted, no further tracking is necessary
if _, err := e.client.MemberPromote(ctx, member.ID); err != nil {
logrus.Debugf("Unable to promote learner %s: %v", member.Name, err)
} else {
logrus.Infof("Promoted learner %s", member.Name)
return nil
}
now := time.Now()
// If this is the first time we've tracked this member's progress, reset stats
if progress.Name != member.Name || progress.ID != member.ID {
progress.ID = member.ID
progress.Name = member.Name
progress.RaftAppliedIndex = 0
progress.LastProgress.Time = now
}
// Update progress by retrieving status from the member's first reachable client URL
for _, ep := range member.ClientURLs {
ctx, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
status, err := e.client.Status(ctx, ep)
if err != nil {
logrus.Debugf("Failed to get etcd status from learner %s at %s: %v", member.Name, ep, err)
continue
}
if progress.RaftAppliedIndex < status.RaftAppliedIndex {
logrus.Debugf("Learner %s has progressed from RaftAppliedIndex %d to %d", progress.Name, progress.RaftAppliedIndex, status.RaftAppliedIndex)
progress.RaftAppliedIndex = status.RaftAppliedIndex
progress.LastProgress.Time = now
}
break
}
// Warn if the learner hasn't made any progress
if !progress.LastProgress.Time.Equal(now) {
logrus.Warnf("Learner %s stalled at RaftAppliedIndex=%d for %s", progress.Name, progress.RaftAppliedIndex, now.Sub(progress.LastProgress.Time).String())
}
// See if it's time to evict yet
if now.Sub(progress.LastProgress.Time) > learnerMaxStallTime {
if _, err := e.client.MemberRemove(ctx, member.ID); err != nil {
return err
}
logrus.Warnf("Removed learner %s from etcd cluster", member.Name)
return nil
}
return e.setLearnerProgress(ctx, progress)
}
// getLearnerProgress returns the stored learnerProgress struct as retrieved from etcd
func (e *ETCD) getLearnerProgress(ctx context.Context) (*learnerProgress, error) {
progress := &learnerProgress{}
value, err := e.client.Get(ctx, learnerProgressKey)
if err != nil {
return nil, err
}
if value.Count < 1 {
return progress, nil
}
if err := json.NewDecoder(bytes.NewBuffer(value.Kvs[0].Value)).Decode(progress); err != nil {
return nil, err
}
return progress, nil
}
// setLearnerProgress stores the learnerProgress struct to etcd
func (e *ETCD) setLearnerProgress(ctx context.Context, status *learnerProgress) error {
w := &bytes.Buffer{}
if err := json.NewEncoder(w).Encode(status); err != nil {
return err
}
_, err := e.client.Put(ctx, learnerProgressKey, w.String())
return err
}
// clientURLs returns a list of all non-learner etcd cluster member client access URLs
func ClientURLs(ctx context.Context, clientAccessInfo *clientaccess.Info, selfIP string) ([]string, Members, error) {
var memberList Members
resp, err := clientAccessInfo.Get("/db/info")
if err != nil {
return nil, memberList, err
}
if err := json.Unmarshal(resp, &memberList); err != nil {
return nil, memberList, err
}
ip, err := GetAdvertiseAddress(selfIP)
if err != nil {
return nil, memberList, err
}
var clientURLs []string
members:
for _, member := range memberList.Members {
// excluding learner member from the client list
if member.IsLearner {
continue
}
for _, url := range member.ClientURLs {
if strings.Contains(url, ip) {
continue members
}
}
clientURLs = append(clientURLs, member.ClientURLs...)
}
return clientURLs, memberList, nil
}
// snapshotDir ensures that the snapshot directory exists, and then returns its path.
func snapshotDir(config *config.Control) (string, error) {
if config.EtcdSnapshotDir == "" {
// we have to create the snapshot dir if we are using
// the default snapshot dir if it doesn't exist
defaultSnapshotDir := filepath.Join(config.DataDir, "db", "snapshots")
s, err := os.Stat(defaultSnapshotDir)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(defaultSnapshotDir, 0700); err != nil {
return "", err
}
return defaultSnapshotDir, nil
}
return "", err
}
if s.IsDir() {
return defaultSnapshotDir, nil
}
}
return config.EtcdSnapshotDir, nil
}
// preSnapshotSetup checks to see if the necessary components are in place
// to perform an Etcd snapshot. This is necessary primarily for on-demand
// snapshots since they're performed before normal Etcd setup is completed.
func (e *ETCD) preSnapshotSetup(ctx context.Context, config *config.Control) error {
if e.client == nil {
if e.config == nil {
e.config = config
}
client, err := GetClient(ctx, e.config.Runtime, endpoint)
if err != nil {
return err
}
e.client = client
}
if e.runtime == nil {
e.runtime = config.Runtime
}
return nil
}
// Snapshot attempts to save a new snapshot to the configured directory, and then clean up any old
// snapshots in excess of the retention limits. This method is used in the internal cron snapshot
// system as well as used to do on-demand snapshots.
func (e *ETCD) Snapshot(ctx context.Context, config *config.Control) error {
if err := e.preSnapshotSetup(ctx, config); err != nil {
return err
}
status, err := e.client.Status(ctx, endpoint)
if err != nil {
return errors.Wrap(err, "failed to check etcd status for snapshot")
}
if status.IsLearner {
logrus.Warnf("Skipping snapshot: not supported for learner")
return nil
}
snapshotDir, err := snapshotDir(e.config)
if err != nil {
return errors.Wrap(err, "failed to get the snapshot dir")
}
cfg, err := getClientConfig(ctx, e.runtime, endpoint)
if err != nil {
return errors.Wrap(err, "failed to get config for etcd snapshot")
}
snapshotName := fmt.Sprintf("%s-%d", e.config.EtcdSnapshotName, time.Now().Unix())
snapshotPath := filepath.Join(snapshotDir, snapshotName)
logrus.Infof("Saving etcd snapshot to %s", snapshotPath)
if err := snapshot.NewV3(nil).Save(ctx, *cfg, snapshotPath); err != nil {
return errors.Wrap(err, "failed to save snapshot")
}
if e.config.EtcdS3 {
logrus.Infof("Saving etcd snapshot %s to S3", snapshotName)
if e.s3 == nil {
s3, err := newS3(ctx, config)
if err != nil {
return err
}
e.s3 = s3
}
if err := e.s3.upload(ctx, snapshotPath); err != nil {
return err
}
logrus.Infof("S3 upload complete for %s", snapshotName)
if e.config.EtcdSnapshotRetention >= 1 {
if err := e.s3.snapshotRetention(ctx); err != nil {
return errors.Wrap(err, "failed to apply s3 snapshot retention")
}
}
return nil
}
// check if we need to perform a retention check
if e.config.EtcdSnapshotRetention >= 1 {
if err := snapshotRetention(e.config.EtcdSnapshotRetention, snapshotDir); err != nil {
return errors.Wrap(err, "failed to apply snapshot retention")
}
}
return nil
}
// setSnapshotFunction schedules snapshots at the configured interval
func (e *ETCD) setSnapshotFunction(ctx context.Context) {
e.cron.AddFunc(e.config.EtcdSnapshotCron, func() {
if err := e.Snapshot(ctx, e.config); err != nil {
logrus.Error(err)
}
})
}
// Restore performs a restore of the ETCD datastore from
// the given snapshot path. This operation exists upon
// completion.
func (e *ETCD) Restore(ctx context.Context) error {
// check the old etcd data dir
oldDataDir := etcdDBDir(e.config) + "-old-" + strconv.Itoa(int(time.Now().Unix()))
if e.config.ClusterResetRestorePath == "" {
return errors.New("no etcd restore path was specified")
}
// make sure snapshot exists before restoration
if _, err := os.Stat(e.config.ClusterResetRestorePath); err != nil {
return err
}
// move the data directory to a temp path
if err := os.Rename(etcdDBDir(e.config), oldDataDir); err != nil {
return err
}
logrus.Infof("Pre-restore etcd database moved to %s", oldDataDir)
sManager := snapshot.NewV3(nil)
if err := sManager.Restore(snapshot.RestoreConfig{
SnapshotPath: e.config.ClusterResetRestorePath,
Name: e.name,
OutputDataDir: etcdDBDir(e.config),
OutputWALDir: walDir(e.config),
PeerURLs: []string{e.peerURL()},
InitialCluster: e.name + "=" + e.peerURL(),
}); err != nil {
return err
}
return nil
}
// snapshotRetention iterates through the snapshots and removes the oldest
// leaving the desired number of snapshots.
func snapshotRetention(retention int, snapshotDir string) error {
var snapshotFiles []os.FileInfo
if err := filepath.Walk(snapshotDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.HasPrefix(info.Name(), snapshotPrefix) {
snapshotFiles = append(snapshotFiles, info)
}
return nil
}); err != nil {
return err
}
if len(snapshotFiles) <= retention {
return nil
}
sort.Slice(snapshotFiles, func(i, j int) bool {
return snapshotFiles[i].Name() < snapshotFiles[j].Name()
})
delCount := len(snapshotFiles) - retention
for _, df := range snapshotFiles[:delCount] {
if err := os.Remove(filepath.Join(snapshotDir, df.Name())); err != nil {
return err
}
}
return nil
}
// backupDirWithRetention will move the dir to a backup dir
// and will keep only maxBackupRetention of dirs.
func backupDirWithRetention(dir string, maxBackupRetention int) (string, error) {
backupDir := dir + "-backup-" + strconv.Itoa(int(time.Now().Unix()))
if _, err := os.Stat(dir); err != nil {
return "", nil
}
files, err := ioutil.ReadDir(filepath.Dir(dir))
if err != nil {
return "", err
}
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime().After(files[j].ModTime())
})
count := 0
for _, f := range files {
if strings.HasPrefix(f.Name(), filepath.Base(dir)+"-backup") && f.IsDir() {
count++
if count > maxBackupRetention {
if err := os.RemoveAll(filepath.Join(filepath.Dir(dir), f.Name())); err != nil {
return "", err
}
}
}
}
// move the directory to a temp path
if err := os.Rename(dir, backupDir); err != nil {
return "", err
}
return backupDir, nil
}
// GetAPIServerURLFromETCD will try to fetch the version.Program/apiaddresses key from etcd
// when it succeed it will parse the first address in the list and return back an address
func GetAPIServerURLFromETCD(ctx context.Context, cfg *config.Control) (string, error) {
if cfg.Runtime == nil {
return "", fmt.Errorf("runtime is not ready yet")
}
cl, err := GetClient(ctx, cfg.Runtime, endpoint)
if err != nil {
return "", err
}
etcdResp, err := cl.KV.Get(ctx, AddressKey)
if err != nil {
return "", err
}
if etcdResp.Count < 1 {
return "", fmt.Errorf("servers addresses are not yet set")
}
var addresses []string
if err := json.Unmarshal(etcdResp.Kvs[0].Value, &addresses); err != nil {
return "", fmt.Errorf("failed to unmarshal etcd key: %v", err)
}
return addresses[0], nil
}
// GetMembersClientURLs will list through the member lists in etcd and return
// back a combined list of client urls for each member in the cluster
func (e *ETCD) GetMembersClientURLs(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, testTimeout)
defer cancel()
members, err := e.client.MemberList(ctx)
if err != nil {
return nil, err
}
var memberUrls []string
for _, member := range members.Members {
for _, clientURL := range member.ClientURLs {
memberUrls = append(memberUrls, string(clientURL))
}
}
return memberUrls, nil
}
// RemoveSelf will remove the member if it exists in the cluster
func (e *ETCD) RemoveSelf(ctx context.Context) error {
return e.removePeer(ctx, e.name, e.address, true)
}
| 1 | 9,316 | Is there anything we should do in the event we're unable to renaming the directory? | k3s-io-k3s | go |
@@ -17,12 +17,17 @@ limitations under the License.
package main
import (
- "github.com/google/knative-gcp/pkg/broker/ingress"
"go.uber.org/zap"
+
+ "github.com/google/knative-gcp/pkg/broker/ingress"
"knative.dev/pkg/logging"
"knative.dev/pkg/signals"
)
+// main creates and starts an ingress handler using default options.
+// 1. it listens on port 8080
+// 2. it reads "GOOGLE_CLOUD_PROJECT" env var for pubsub project.
+// 3. it expects broker configmap mounted at "/var/run/cloud-run-events/broker/targets"
func main() {
// Since we pass nil, a default config with no error will be returned.
cfg, _ := logging.NewConfigFromMap(nil) | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/google/knative-gcp/pkg/broker/ingress"
"go.uber.org/zap"
"knative.dev/pkg/logging"
"knative.dev/pkg/signals"
)
func main() {
// Since we pass nil, a default config with no error will be returned.
cfg, _ := logging.NewConfigFromMap(nil)
logger, _ := logging.NewLoggerFromConfig(cfg, "broker-ingress")
ctx := signals.NewContext()
ctx = logging.WithLogger(ctx, logger)
ingress, err := ingress.NewHandler(ctx)
if err != nil {
logger.Desugar().Fatal("Unable to create ingress handler: ", zap.Error(err))
}
logger.Info("Starting ingress.", zap.Any("ingress", ingress))
if err := ingress.Start(ctx); err != nil {
logger.Desugar().Fatal("failed to start ingress: ", zap.Error(err))
}
}
| 1 | 12,088 | How is this env var populated? Can this be retrieved from cluster metadata? | google-knative-gcp | go |
@@ -61,10 +61,11 @@ func (s *Service) reconcileInternetGateways() error {
EC2Client: s.scope.EC2,
BuildParams: s.getGatewayTagParams(*gateway.InternetGatewayId),
})
-
if err != nil {
+ record.Warnf(s.scope.Cluster, "FailedTagInternetGateway", "Failed to tag managed Internet Gateway %q: %v", gateway.InternetGatewayId, err)
return errors.Wrapf(err, "failed to tag internet gateway %q", *gateway.InternetGatewayId)
}
+ record.Eventf(s.scope.Cluster, "SuccessfulTagInternetGateway", "Tagged managed Internet Gateway %q", gateway.InternetGatewayId)
return nil
} | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ec2
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
"sigs.k8s.io/cluster-api-provider-aws/pkg/record"
)
func (s *Service) reconcileInternetGateways() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
s.scope.V(4).Info("Skipping internet gateways reconcile in unmanaged mode")
return nil
}
s.scope.V(2).Info("Reconciling internet gateways")
igs, err := s.describeVpcInternetGateways()
if awserrors.IsNotFound(err) {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
return errors.Errorf("failed to validate network: no internet gateways found in VPC %q", s.scope.VPC().ID)
}
ig, err := s.createInternetGateway()
if err != nil {
return nil
}
igs = []*ec2.InternetGateway{ig}
} else if err != nil {
return err
}
gateway := igs[0]
s.scope.VPC().InternetGatewayID = gateway.InternetGatewayId
// Make sure tags are up to date.
err = tags.Ensure(converters.TagsToMap(gateway.Tags), &tags.ApplyParams{
EC2Client: s.scope.EC2,
BuildParams: s.getGatewayTagParams(*gateway.InternetGatewayId),
})
if err != nil {
return errors.Wrapf(err, "failed to tag internet gateway %q", *gateway.InternetGatewayId)
}
return nil
}
func (s *Service) deleteInternetGateways() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
s.scope.V(4).Info("Skipping internet gateway deletion in unmanaged mode")
return nil
}
igs, err := s.describeVpcInternetGateways()
if awserrors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
for _, ig := range igs {
detachReq := &ec2.DetachInternetGatewayInput{
InternetGatewayId: ig.InternetGatewayId,
VpcId: aws.String(s.scope.VPC().ID),
}
if _, err := s.scope.EC2.DetachInternetGateway(detachReq); err != nil {
return errors.Wrapf(err, "failed to detach internet gateway %q", *ig.InternetGatewayId)
}
s.scope.Info("Detached internet gateway from VPC", "internet-gateway-id", *ig.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
deleteReq := &ec2.DeleteInternetGatewayInput{
InternetGatewayId: ig.InternetGatewayId,
}
if _, err = s.scope.EC2.DeleteInternetGateway(deleteReq); err != nil {
return errors.Wrapf(err, "failed to delete internet gateway %q", *ig.InternetGatewayId)
}
s.scope.Info("Deleted internet gateway in VPC", "internet-gateway-id", *ig.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
record.Eventf(s.scope.Cluster, "DeletedInternetGateway", "Deleted Internet Gateway %q previously attached to VPC %q", *ig.InternetGatewayId, s.scope.VPC().ID)
}
return nil
}
func (s *Service) createInternetGateway() (*ec2.InternetGateway, error) {
ig, err := s.scope.EC2.CreateInternetGateway(&ec2.CreateInternetGatewayInput{})
if err != nil {
return nil, errors.Wrap(err, "failed to create internet gateway")
}
s.scope.Info("Created internet gateway for VPC", "vpc-id", s.scope.VPC().ID)
_, err = s.scope.EC2.AttachInternetGateway(&ec2.AttachInternetGatewayInput{
InternetGatewayId: ig.InternetGateway.InternetGatewayId,
VpcId: aws.String(s.scope.VPC().ID),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to attach internet gateway %q to vpc %q", *ig.InternetGateway.InternetGatewayId, s.scope.VPC().ID)
}
s.scope.Info("attached internet gateway to VPC", "internet-gateway-id", *ig.InternetGateway.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
record.Eventf(s.scope.Cluster, "CreatedInternetGateway", "Created new Internet Gateway %q attached to VPC %q", *ig.InternetGateway.InternetGatewayId, s.scope.VPC().ID)
return ig.InternetGateway, nil
}
func (s *Service) describeVpcInternetGateways() ([]*ec2.InternetGateway, error) {
out, err := s.scope.EC2.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{
Filters: []*ec2.Filter{
filter.EC2.VPCAttachment(s.scope.VPC().ID),
},
})
if err != nil {
return nil, errors.Wrapf(err, "failed to describe nat gateways in vpc %q", s.scope.VPC().ID)
}
if len(out.InternetGateways) == 0 {
return nil, awserrors.NewNotFound(errors.Errorf("no nat gateways found in vpc %q", s.scope.VPC().ID))
}
return out.InternetGateways, nil
}
func (s *Service) getGatewayTagParams(id string) v1alpha2.BuildParams {
name := fmt.Sprintf("%s-igw", s.scope.Name())
return v1alpha2.BuildParams{
ClusterName: s.scope.Name(),
ResourceID: id,
Lifecycle: v1alpha2.ResourceLifecycleOwned,
Name: aws.String(name),
Role: aws.String(v1alpha2.CommonRoleTagValue),
}
}
| 1 | 10,339 | Should probably skip the success event here, since it could be a noop. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -1840,7 +1840,8 @@ define(['loading', 'appRouter', 'layoutManager', 'connectionManager', 'userSetti
chaptercardbuilder.buildChapterCards(item, chapters, {
itemsContainer: scenesContent,
backdropShape: 'overflowBackdrop',
- squareShape: 'overflowSquare'
+ squareShape: 'overflowSquare',
+ imageBlurhashes: item.ImageBlurHashes
});
});
} else { | 1 | define(['loading', 'appRouter', 'layoutManager', 'connectionManager', 'userSettings', 'cardBuilder', 'datetime', 'mediaInfo', 'backdrop', 'listView', 'itemContextMenu', 'itemHelper', 'dom', 'indicators', 'imageLoader', 'libraryMenu', 'globalize', 'browser', 'events', 'playbackManager', 'scrollStyles', 'emby-itemscontainer', 'emby-checkbox', 'emby-button', 'emby-playstatebutton', 'emby-ratingbutton', 'emby-scroller', 'emby-select'], function (loading, appRouter, layoutManager, connectionManager, userSettings, cardBuilder, datetime, mediaInfo, backdrop, listView, itemContextMenu, itemHelper, dom, indicators, imageLoader, libraryMenu, globalize, browser, events, playbackManager) {
'use strict';
function getPromise(apiClient, params) {
var id = params.id;
if (id) {
return apiClient.getItem(apiClient.getCurrentUserId(), id);
}
if (params.seriesTimerId) {
return apiClient.getLiveTvSeriesTimer(params.seriesTimerId);
}
if (params.genre) {
return apiClient.getGenre(params.genre, apiClient.getCurrentUserId());
}
if (params.musicgenre) {
return apiClient.getMusicGenre(params.musicgenre, apiClient.getCurrentUserId());
}
if (params.musicartist) {
return apiClient.getArtist(params.musicartist, apiClient.getCurrentUserId());
}
throw new Error('Invalid request');
}
function hideAll(page, className, show) {
var i;
var length;
var elems = page.querySelectorAll('.' + className);
for (i = 0, length = elems.length; i < length; i++) {
if (show) {
elems[i].classList.remove('hide');
} else {
elems[i].classList.add('hide');
}
}
}
function getContextMenuOptions(item, user, button) {
var options = {
item: item,
open: false,
play: false,
playAllFromHere: false,
queueAllFromHere: false,
positionTo: button,
cancelTimer: false,
record: false,
deleteItem: true === item.IsFolder,
shuffle: false,
instantMix: false,
user: user,
share: true
};
return options;
}
function getProgramScheduleHtml(items) {
var html = '';
html += '<div is="emby-itemscontainer" class="itemsContainer vertical-list" data-contextmenu="false">';
html += listView.getListViewHtml({
items: items,
enableUserDataButtons: false,
image: true,
imageSource: 'channel',
showProgramDateTime: true,
showChannel: false,
mediaInfo: false,
action: 'none',
moreButton: false,
recordButton: false
});
return html += '</div>';
}
function renderSeriesTimerSchedule(page, apiClient, seriesTimerId) {
apiClient.getLiveTvTimers({
UserId: apiClient.getCurrentUserId(),
ImageTypeLimit: 1,
EnableImageTypes: 'Primary,Backdrop,Thumb',
SortBy: 'StartDate',
EnableTotalRecordCount: false,
EnableUserData: false,
SeriesTimerId: seriesTimerId,
Fields: 'ChannelInfo,ChannelImage'
}).then(function (result) {
if (result.Items.length && result.Items[0].SeriesTimerId != seriesTimerId) {
result.Items = [];
}
var html = getProgramScheduleHtml(result.Items);
var scheduleTab = page.querySelector('.seriesTimerSchedule');
scheduleTab.innerHTML = html;
imageLoader.lazyChildren(scheduleTab);
});
}
function renderTimerEditor(page, item, apiClient, user) {
if ('Recording' !== item.Type || !user.Policy.EnableLiveTvManagement || !item.TimerId || 'InProgress' !== item.Status) {
return void hideAll(page, 'btnCancelTimer');
}
hideAll(page, 'btnCancelTimer', true);
}
function renderSeriesTimerEditor(page, item, apiClient, user) {
if ('SeriesTimer' !== item.Type) {
return void hideAll(page, 'btnCancelSeriesTimer');
}
if (user.Policy.EnableLiveTvManagement) {
require(['seriesRecordingEditor'], function (seriesRecordingEditor) {
seriesRecordingEditor.embed(item, apiClient.serverId(), {
context: page.querySelector('.seriesRecordingEditor')
});
});
page.querySelector('.seriesTimerScheduleSection').classList.remove('hide');
hideAll(page, 'btnCancelSeriesTimer', true);
return void renderSeriesTimerSchedule(page, apiClient, item.Id);
}
page.querySelector('.seriesTimerScheduleSection').classList.add('hide');
return void hideAll(page, 'btnCancelSeriesTimer');
}
function renderTrackSelections(page, instance, item, forceReload) {
var select = page.querySelector('.selectSource');
if (!item.MediaSources || !itemHelper.supportsMediaSourceSelection(item) || -1 === playbackManager.getSupportedCommands().indexOf('PlayMediaSource') || !playbackManager.canPlay(item)) {
page.querySelector('.trackSelections').classList.add('hide');
select.innerHTML = '';
page.querySelector('.selectVideo').innerHTML = '';
page.querySelector('.selectAudio').innerHTML = '';
page.querySelector('.selectSubtitles').innerHTML = '';
return;
}
playbackManager.getPlaybackMediaSources(item).then(function (mediaSources) {
instance._currentPlaybackMediaSources = mediaSources;
page.querySelector('.trackSelections').classList.remove('hide');
select.setLabel(globalize.translate('LabelVersion'));
var currentValue = select.value;
var selectedId = mediaSources[0].Id;
select.innerHTML = mediaSources.map(function (v) {
var selected = v.Id === selectedId ? ' selected' : '';
return '<option value="' + v.Id + '"' + selected + '>' + v.Name + '</option>';
}).join('');
if (mediaSources.length > 1) {
page.querySelector('.selectSourceContainer').classList.remove('hide');
} else {
page.querySelector('.selectSourceContainer').classList.add('hide');
}
if (select.value !== currentValue || forceReload) {
renderVideoSelections(page, mediaSources);
renderAudioSelections(page, mediaSources);
renderSubtitleSelections(page, mediaSources);
}
});
}
function renderVideoSelections(page, mediaSources) {
var mediaSourceId = page.querySelector('.selectSource').value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return 'Video' === m.Type;
});
var select = page.querySelector('.selectVideo');
select.setLabel(globalize.translate('LabelVideo'));
var selectedId = tracks.length ? tracks[0].Index : -1;
select.innerHTML = tracks.map(function (v) {
var selected = v.Index === selectedId ? ' selected' : '';
var titleParts = [];
var resolutionText = mediaInfo.getResolutionText(v);
if (resolutionText) {
titleParts.push(resolutionText);
}
if (v.Codec) {
titleParts.push(v.Codec.toUpperCase());
}
return '<option value="' + v.Index + '" ' + selected + '>' + (v.DisplayTitle || titleParts.join(' ')) + '</option>';
}).join('');
select.setAttribute('disabled', 'disabled');
if (tracks.length) {
page.querySelector('.selectVideoContainer').classList.remove('hide');
} else {
page.querySelector('.selectVideoContainer').classList.add('hide');
}
}
function renderAudioSelections(page, mediaSources) {
var mediaSourceId = page.querySelector('.selectSource').value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return 'Audio' === m.Type;
});
var select = page.querySelector('.selectAudio');
select.setLabel(globalize.translate('LabelAudio'));
var selectedId = mediaSource.DefaultAudioStreamIndex;
select.innerHTML = tracks.map(function (v) {
var selected = v.Index === selectedId ? ' selected' : '';
return '<option value="' + v.Index + '" ' + selected + '>' + v.DisplayTitle + '</option>';
}).join('');
if (tracks.length > 1) {
select.removeAttribute('disabled');
} else {
select.setAttribute('disabled', 'disabled');
}
if (tracks.length) {
page.querySelector('.selectAudioContainer').classList.remove('hide');
} else {
page.querySelector('.selectAudioContainer').classList.add('hide');
}
}
function renderSubtitleSelections(page, mediaSources) {
var mediaSourceId = page.querySelector('.selectSource').value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return 'Subtitle' === m.Type;
});
var select = page.querySelector('.selectSubtitles');
select.setLabel(globalize.translate('LabelSubtitles'));
var selectedId = null == mediaSource.DefaultSubtitleStreamIndex ? -1 : mediaSource.DefaultSubtitleStreamIndex;
if (tracks.length) {
var selected = -1 === selectedId ? ' selected' : '';
select.innerHTML = '<option value="-1">' + globalize.translate('Off') + '</option>' + tracks.map(function (v) {
selected = v.Index === selectedId ? ' selected' : '';
return '<option value="' + v.Index + '" ' + selected + '>' + v.DisplayTitle + '</option>';
}).join('');
page.querySelector('.selectSubtitlesContainer').classList.remove('hide');
} else {
select.innerHTML = '';
page.querySelector('.selectSubtitlesContainer').classList.add('hide');
}
}
function reloadPlayButtons(page, item) {
var canPlay = false;
if ('Program' == item.Type) {
var now = new Date();
if (now >= datetime.parseISO8601Date(item.StartDate, true) && now < datetime.parseISO8601Date(item.EndDate, true)) {
hideAll(page, 'btnPlay', true);
canPlay = true;
} else {
hideAll(page, 'btnPlay');
}
hideAll(page, 'btnResume');
hideAll(page, 'btnInstantMix');
hideAll(page, 'btnShuffle');
} else if (playbackManager.canPlay(item)) {
hideAll(page, 'btnPlay', true);
var enableInstantMix = -1 !== ['Audio', 'MusicAlbum', 'MusicGenre', 'MusicArtist'].indexOf(item.Type);
hideAll(page, 'btnInstantMix', enableInstantMix);
var enableShuffle = item.IsFolder || -1 !== ['MusicAlbum', 'MusicGenre', 'MusicArtist'].indexOf(item.Type);
hideAll(page, 'btnShuffle', enableShuffle);
canPlay = true;
hideAll(page, 'btnResume', item.UserData && item.UserData.PlaybackPositionTicks > 0);
} else {
hideAll(page, 'btnPlay');
hideAll(page, 'btnResume');
hideAll(page, 'btnInstantMix');
hideAll(page, 'btnShuffle');
}
return canPlay;
}
function reloadUserDataButtons(page, item) {
var i;
var length;
var btnPlaystates = page.querySelectorAll('.btnPlaystate');
for (i = 0, length = btnPlaystates.length; i < length; i++) {
var btnPlaystate = btnPlaystates[i];
if (itemHelper.canMarkPlayed(item)) {
btnPlaystate.classList.remove('hide');
btnPlaystate.setItem(item);
} else {
btnPlaystate.classList.add('hide');
btnPlaystate.setItem(null);
}
}
var btnUserRatings = page.querySelectorAll('.btnUserRating');
for (i = 0, length = btnUserRatings.length; i < length; i++) {
var btnUserRating = btnUserRatings[i];
if (itemHelper.canRate(item)) {
btnUserRating.classList.remove('hide');
btnUserRating.setItem(item);
} else {
btnUserRating.classList.add('hide');
btnUserRating.setItem(null);
}
}
}
function getArtistLinksHtml(artists, serverId, context) {
var html = [];
for (var i = 0, length = artists.length; i < length; i++) {
var artist = artists[i];
var href = appRouter.getRouteUrl(artist, {
context: context,
itemType: 'MusicArtist',
serverId: serverId
});
html.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + href + '">' + artist.Name + '</a>');
}
return html = html.join(' / ');
}
function renderName(item, container, isStatic, context) {
var parentRoute;
var parentNameHtml = [];
var parentNameLast = false;
if (item.AlbumArtists) {
parentNameHtml.push(getArtistLinksHtml(item.AlbumArtists, item.ServerId, context));
parentNameLast = true;
} else if (item.ArtistItems && item.ArtistItems.length && 'MusicVideo' === item.Type) {
parentNameHtml.push(getArtistLinksHtml(item.ArtistItems, item.ServerId, context));
parentNameLast = true;
} else if (item.SeriesName && 'Episode' === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeriesId,
Name: item.SeriesName,
Type: 'Series',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + '</a>');
} else if (item.IsSeries || item.EpisodeTitle) {
parentNameHtml.push(item.Name);
}
if (item.SeriesName && 'Season' === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeriesId,
Name: item.SeriesName,
Type: 'Series',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + '</a>');
} else if (null != item.ParentIndexNumber && 'Episode' === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeasonId,
Name: item.SeasonName,
Type: 'Season',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeasonName + '</a>');
} else if (null != item.ParentIndexNumber && item.IsSeries) {
parentNameHtml.push(item.SeasonName || 'S' + item.ParentIndexNumber);
} else if (item.Album && item.AlbumId && ('MusicVideo' === item.Type || 'Audio' === item.Type)) {
parentRoute = appRouter.getRouteUrl({
Id: item.AlbumId,
Name: item.Album,
Type: 'MusicAlbum',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.Album + '</a>');
} else if (item.Album) {
parentNameHtml.push(item.Album);
}
// FIXME: This whole section needs some refactoring, so it becames easier to scale across all form factors. See GH #1022
var html = '';
var tvShowHtml = parentNameHtml[0];
var tvSeasonHtml = parentNameHtml[1];
if (parentNameHtml.length) {
if (parentNameLast) {
// Music
if (layoutManager.mobile) {
html = '<h3 class="parentName" style="margin: .25em 0;">' + parentNameHtml.join('</br>') + '</h3>';
} else {
html = '<h3 class="parentName" style="margin: .25em 0;">' + parentNameHtml.join(' - ') + '</h3>';
}
} else {
if (layoutManager.mobile) {
html = '<h1 class="parentName" style="margin: .1em 0 .25em;">' + parentNameHtml.join('</br>') + '</h1>';
} else {
html = '<h1 class="parentName" style="margin: .1em 0 .25em;">' + tvShowHtml + '</h1>';
}
}
}
var name = itemHelper.getDisplayName(item, {
includeParentInfo: false
});
var offset = parentNameLast ? '.25em' : '.5em';
if (html && !parentNameLast) {
if (!layoutManager.mobile && tvSeasonHtml) {
html += '<h3 class="itemName infoText" style="margin: .25em 0 .5em;">' + tvSeasonHtml + ' - ' + name + '</h3>';
} else {
html += '<h3 class="itemName infoText" style="margin: .25em 0 .5em;">' + name + '</h3>';
}
} else {
html = '<h1 class="itemName infoText" style="margin: .1em 0 ' + offset + ';">' + name + '</h1>' + html;
}
if (item.OriginalTitle && item.OriginalTitle != item.Name) {
html += '<h4 class="itemName infoText" style="margin: -' + offset + ' 0 0;">' + item.OriginalTitle + '</h4>';
}
container.innerHTML = html;
if (html.length) {
container.classList.remove('hide');
} else {
container.classList.add('hide');
}
}
function setTrailerButtonVisibility(page, item) {
if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf('PlayTrailers')) {
hideAll(page, 'btnPlayTrailer', true);
} else {
hideAll(page, 'btnPlayTrailer');
}
}
function renderBackdrop(item) {
if (dom.getWindowSize().innerWidth >= 1000) {
backdrop.setBackdrops([item]);
} else {
backdrop.clear();
}
}
function renderDetailPageBackdrop(page, item, apiClient) {
var imgUrl;
var hasbackdrop = false;
var itemBackdropElement = page.querySelector('#itemBackdrop');
var usePrimaryImage = item.MediaType === 'Video' && item.Type !== 'Movie' && item.Type !== 'Trailer' ||
item.MediaType && item.MediaType !== 'Video' ||
item.Type === 'MusicAlbum' ||
item.Type === 'Person';
if (!layoutManager.mobile && !userSettings.detailsBanner()) {
return false;
}
if ('Program' === item.Type && item.ImageTags && item.ImageTags.Thumb) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Thumb',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ImageTags.Thumb
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (usePrimaryImage && item.ImageTags && item.ImageTags.Primary) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Primary',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ImageTags.Primary
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (item.BackdropImageTags && item.BackdropImageTags.length) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Backdrop',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.BackdropImageTags[0]
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (item.ParentBackdropItemId && item.ParentBackdropImageTags && item.ParentBackdropImageTags.length) {
imgUrl = apiClient.getScaledImageUrl(item.ParentBackdropItemId, {
type: 'Backdrop',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ParentBackdropImageTags[0]
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (item.ImageTags && item.ImageTags.Thumb) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Thumb',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ImageTags.Thumb
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else {
itemBackdropElement.style.backgroundImage = '';
}
if ('Person' === item.Type) {
// FIXME: This hides the backdrop on all persons to fix a margin issue. Ideally, a proper fix should be made.
page.classList.add('noBackdrop');
itemBackdropElement.classList.add('personBackdrop');
} else {
itemBackdropElement.classList.remove('personBackdrop');
}
return hasbackdrop;
}
function reloadFromItem(instance, page, params, item, user) {
var context = params.context;
page.querySelector('.detailPagePrimaryContainer').classList.add('detailSticky');
renderName(item, page.querySelector('.nameContainer'), false, context);
var apiClient = connectionManager.getApiClient(item.ServerId);
renderSeriesTimerEditor(page, item, apiClient, user);
renderTimerEditor(page, item, apiClient, user);
renderImage(page, item, apiClient, user);
renderLogo(page, item, apiClient);
Emby.Page.setTitle('');
setInitialCollapsibleState(page, item, apiClient, context, user);
renderDetails(page, item, apiClient, context);
renderTrackSelections(page, instance, item);
renderBackdrop(item);
renderDetailPageBackdrop(page, item, apiClient);
var canPlay = reloadPlayButtons(page, item);
if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf('PlayTrailers')) {
hideAll(page, 'btnPlayTrailer', true);
} else {
hideAll(page, 'btnPlayTrailer');
}
setTrailerButtonVisibility(page, item);
if (item.CanDelete && !item.IsFolder) {
hideAll(page, 'btnDeleteItem', true);
} else {
hideAll(page, 'btnDeleteItem');
}
if ('Program' !== item.Type || canPlay) {
hideAll(page, 'mainDetailButtons', true);
} else {
hideAll(page, 'mainDetailButtons');
}
showRecordingFields(instance, page, item, user);
var groupedVersions = (item.MediaSources || []).filter(function (g) {
return 'Grouping' == g.Type;
});
if (user.Policy.IsAdministrator && groupedVersions.length) {
page.querySelector('.btnSplitVersions').classList.remove('hide');
} else {
page.querySelector('.btnSplitVersions').classList.add('hide');
}
if (itemContextMenu.getCommands(getContextMenuOptions(item, user)).length) {
hideAll(page, 'btnMoreCommands', true);
} else {
hideAll(page, 'btnMoreCommands');
}
var itemBirthday = page.querySelector('#itemBirthday');
if ('Person' == item.Type && item.PremiereDate) {
try {
var birthday = datetime.parseISO8601Date(item.PremiereDate, true).toDateString();
itemBirthday.classList.remove('hide');
itemBirthday.innerHTML = globalize.translate('BirthDateValue', birthday);
} catch (err) {
itemBirthday.classList.add('hide');
}
} else {
itemBirthday.classList.add('hide');
}
var itemDeathDate = page.querySelector('#itemDeathDate');
if ('Person' == item.Type && item.EndDate) {
try {
var deathday = datetime.parseISO8601Date(item.EndDate, true).toDateString();
itemDeathDate.classList.remove('hide');
itemDeathDate.innerHTML = globalize.translate('DeathDateValue', deathday);
} catch (err) {
itemDeathDate.classList.add('hide');
}
} else {
itemDeathDate.classList.add('hide');
}
var itemBirthLocation = page.querySelector('#itemBirthLocation');
if ('Person' == item.Type && item.ProductionLocations && item.ProductionLocations.length) {
var gmap = '<a is="emby-linkbutton" class="button-link textlink" target="_blank" href="https://maps.google.com/maps?q=' + item.ProductionLocations[0] + '">' + item.ProductionLocations[0] + '</a>';
itemBirthLocation.classList.remove('hide');
itemBirthLocation.innerHTML = globalize.translate('BirthPlaceValue', gmap);
} else {
itemBirthLocation.classList.add('hide');
}
setPeopleHeader(page, item);
loading.hide();
if (item.Type === 'Book') {
hideAll(page, 'btnDownload', true);
}
require(['autoFocuser'], function (autoFocuser) {
autoFocuser.autoFocus(page);
});
}
function logoImageUrl(item, apiClient, options) {
options = options || {};
options.type = 'Logo';
if (item.ImageTags && item.ImageTags.Logo) {
options.tag = item.ImageTags.Logo;
return apiClient.getScaledImageUrl(item.Id, options);
}
if (item.ParentLogoImageTag) {
options.tag = item.ParentLogoImageTag;
return apiClient.getScaledImageUrl(item.ParentLogoItemId, options);
}
return null;
}
function renderLogo(page, item, apiClient) {
var url = logoImageUrl(item, apiClient, {
maxWidth: 400
});
var detailLogo = page.querySelector('.detailLogo');
if (!layoutManager.mobile && !userSettings.enableBackdrops()) {
detailLogo.classList.add('hide');
} else if (url) {
detailLogo.classList.remove('hide');
detailLogo.classList.add('lazy');
detailLogo.setAttribute('data-src', url);
imageLoader.lazyImage(detailLogo);
} else {
detailLogo.classList.add('hide');
}
}
function showRecordingFields(instance, page, item, user) {
if (!instance.currentRecordingFields) {
var recordingFieldsElement = page.querySelector('.recordingFields');
if ('Program' == item.Type && user.Policy.EnableLiveTvManagement) {
require(['recordingFields'], function (recordingFields) {
instance.currentRecordingFields = new recordingFields({
parent: recordingFieldsElement,
programId: item.Id,
serverId: item.ServerId
});
recordingFieldsElement.classList.remove('hide');
});
} else {
recordingFieldsElement.classList.add('hide');
recordingFieldsElement.innerHTML = '';
}
}
}
function renderLinks(linksElem, item) {
var html = [];
var links = [];
if (!layoutManager.tv && item.HomePageUrl) {
links.push('<a style="color:inherit;" is="emby-linkbutton" class="button-link" href="' + item.HomePageUrl + '" target="_blank">' + globalize.translate('ButtonWebsite') + '</a>');
}
if (item.ExternalUrls) {
for (var i = 0, length = item.ExternalUrls.length; i < length; i++) {
var url = item.ExternalUrls[i];
links.push('<a style="color:inherit;" is="emby-linkbutton" class="button-link" href="' + url.Url + '" target="_blank">' + url.Name + '</a>');
}
}
if (links.length) {
html.push(links.join(', '));
}
linksElem.innerHTML = html.join(', ');
if (html.length) {
linksElem.classList.remove('hide');
} else {
linksElem.classList.add('hide');
}
}
function renderDetailImage(page, elem, item, apiClient, editable, imageLoader, indicators) {
if ('SeriesTimer' === item.Type || 'Program' === item.Type) {
editable = false;
}
elem.classList.add('detailimg-hidemobile');
var imageTags = item.ImageTags || {};
if (item.PrimaryImageTag) {
imageTags.Primary = item.PrimaryImageTag;
}
var url;
var html = '';
var shape = 'portrait';
var detectRatio = false;
/* In the following section, getScreenWidth() is multiplied by 0.5 as the posters
are 25vw and we need double the resolution to counter Skia's scaling. */
// TODO: Find a reliable way to get the poster width
if (imageTags.Primary) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ImageTags.Primary
});
detectRatio = true;
} else if (item.BackdropImageTags && item.BackdropImageTags.length) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Backdrop',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.BackdropImageTags[0]
});
shape = 'thumb';
} else if (imageTags.Thumb) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Thumb',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ImageTags.Thumb
});
shape = 'thumb';
} else if (imageTags.Disc) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Disc',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ImageTags.Disc
});
shape = 'square';
} else if (item.AlbumId && item.AlbumPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.AlbumId, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.AlbumPrimaryImageTag
});
shape = 'square';
} else if (item.SeriesId && item.SeriesPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.SeriesId, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.SeriesPrimaryImageTag
});
} else if (item.ParentPrimaryImageItemId && item.ParentPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.ParentPrimaryImageItemId, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ParentPrimaryImageTag
});
}
if (editable && url === undefined) {
html += "<a class='itemDetailGalleryLink itemDetailImage defaultCardBackground defaultCardBackground" + cardBuilder.getDefaultBackgroundClass(item.Name) + "' is='emby-linkbutton' style='display:block;margin:0;padding:0;' href='#'>";
} else if (!editable && url === undefined) {
html += "<div class='itemDetailGalleryLink itemDetailImage defaultCardBackground defaultCardBackground" + cardBuilder.getDefaultBackgroundClass(item.Name) + "' is='emby-linkbutton' style='display:block;margin:0;padding:0;' href='#'>";
} else if (editable) {
html += "<a class='itemDetailGalleryLink' is='emby-linkbutton' style='display:block;margin:0;padding:0;' href='#'>";
}
if (url) {
html += "<img class='itemDetailImage lazy' src='data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=' />";
}
if (url === undefined) {
html += cardBuilder.getDefaultText(item);
}
if (editable) {
html += '</a>';
} else if (!editable && url === undefined) {
html += '</div>';
}
var progressHtml = item.IsFolder || !item.UserData ? '' : indicators.getProgressBarHtml(item);
html += '<div class="detailImageProgressContainer">';
if (progressHtml) {
html += progressHtml;
}
html += '</div>';
elem.innerHTML = html;
if (detectRatio && item.PrimaryImageAspectRatio) {
if (item.PrimaryImageAspectRatio >= 1.48) {
shape = 'thumb';
} else if (item.PrimaryImageAspectRatio >= 0.85 && item.PrimaryImageAspectRatio <= 1.34) {
shape = 'square';
}
}
if ('thumb' == shape) {
elem.classList.add('thumbDetailImageContainer');
elem.classList.remove('portraitDetailImageContainer');
elem.classList.remove('squareDetailImageContainer');
} else if ('square' == shape) {
elem.classList.remove('thumbDetailImageContainer');
elem.classList.remove('portraitDetailImageContainer');
elem.classList.add('squareDetailImageContainer');
} else {
elem.classList.remove('thumbDetailImageContainer');
elem.classList.add('portraitDetailImageContainer');
elem.classList.remove('squareDetailImageContainer');
}
if (url) {
imageLoader.lazyImage(elem.querySelector('img'), url);
}
}
function renderImage(page, item, apiClient, user) {
renderDetailImage(
page,
page.querySelector('.detailImageContainer'),
item,
apiClient,
user.Policy.IsAdministrator && 'Photo' != item.MediaType,
imageLoader,
indicators
);
}
function refreshDetailImageUserData(elem, item) {
elem.querySelector('.detailImageProgressContainer').innerHTML = indicators.getProgressBarHtml(item);
}
function refreshImage(page, item) {
refreshDetailImageUserData(page.querySelector('.detailImageContainer'), item);
}
function setPeopleHeader(page, item) {
if ('Audio' == item.MediaType || 'MusicAlbum' == item.Type || 'Book' == item.MediaType || 'Photo' == item.MediaType) {
page.querySelector('#peopleHeader').innerHTML = globalize.translate('HeaderPeople');
} else {
page.querySelector('#peopleHeader').innerHTML = globalize.translate('HeaderCastAndCrew');
}
}
function renderNextUp(page, item, user) {
var section = page.querySelector('.nextUpSection');
if ('Series' != item.Type) {
return void section.classList.add('hide');
}
connectionManager.getApiClient(item.ServerId).getNextUpEpisodes({
SeriesId: item.Id,
UserId: user.Id
}).then(function (result) {
if (result.Items.length) {
section.classList.remove('hide');
} else {
section.classList.add('hide');
}
var html = cardBuilder.getCardsHtml({
items: result.Items,
shape: 'overflowBackdrop',
showTitle: true,
displayAsSpecial: 'Season' == item.Type && item.IndexNumber,
overlayText: false,
centerText: true,
overlayPlayButton: true
});
var itemsContainer = section.querySelector('.nextUpItems');
itemsContainer.innerHTML = html;
imageLoader.lazyChildren(itemsContainer);
});
}
function setInitialCollapsibleState(page, item, apiClient, context, user) {
page.querySelector('.collectionItems').innerHTML = '';
if ('Playlist' == item.Type) {
page.querySelector('#childrenCollapsible').classList.remove('hide');
renderPlaylistItems(page, item);
} else if ('Studio' == item.Type || 'Person' == item.Type || 'Genre' == item.Type || 'MusicGenre' == item.Type || 'MusicArtist' == item.Type) {
page.querySelector('#childrenCollapsible').classList.remove('hide');
renderItemsByName(page, item);
} else if (item.IsFolder) {
if ('BoxSet' == item.Type) {
page.querySelector('#childrenCollapsible').classList.add('hide');
}
renderChildren(page, item);
} else {
page.querySelector('#childrenCollapsible').classList.add('hide');
}
if ('Series' == item.Type) {
renderSeriesSchedule(page, item);
renderNextUp(page, item, user);
} else {
page.querySelector('.nextUpSection').classList.add('hide');
}
renderScenes(page, item);
if (item.SpecialFeatureCount && 0 != item.SpecialFeatureCount && 'Series' != item.Type) {
page.querySelector('#specialsCollapsible').classList.remove('hide');
renderSpecials(page, item, user, 6);
} else {
page.querySelector('#specialsCollapsible').classList.add('hide');
}
renderCast(page, item);
if (item.PartCount && item.PartCount > 1) {
page.querySelector('#additionalPartsCollapsible').classList.remove('hide');
renderAdditionalParts(page, item, user);
} else {
page.querySelector('#additionalPartsCollapsible').classList.add('hide');
}
if ('MusicAlbum' == item.Type) {
renderMusicVideos(page, item, user);
} else {
page.querySelector('#musicVideosCollapsible').classList.add('hide');
}
}
function toggleLineClamp(clampTarget, e) {
var expandButton = e.target;
var clampClassName = 'detail-clamp-text';
if (clampTarget.classList.contains(clampClassName)) {
clampTarget.classList.remove(clampClassName);
expandButton.innerHTML = globalize.translate('ShowLess');
} else {
clampTarget.classList.add(clampClassName);
expandButton.innerHTML = globalize.translate('ShowMore');
}
}
function renderOverview(elems, item) {
for (var i = 0, length = elems.length; i < length; i++) {
var elem = elems[i];
var overview = item.Overview || '';
if (overview) {
elem.innerHTML = overview;
elem.classList.remove('hide');
elem.classList.add('detail-clamp-text');
// Grab the sibling element to control the expand state
var expandButton = elem.parentElement.querySelector('.overview-expand');
// Detect if we have overflow of text. Based on this StackOverflow answer
// https://stackoverflow.com/a/35157976
if (Math.abs(elem.scrollHeight - elem.offsetHeight) > 2) {
expandButton.classList.remove('hide');
} else {
expandButton.classList.add('hide');
}
expandButton.addEventListener('click', toggleLineClamp.bind(null, elem));
var anchors = elem.querySelectorAll('a');
for (var j = 0, length2 = anchors.length; j < length2; j++) {
anchors[j].setAttribute('target', '_blank');
}
} else {
elem.innerHTML = '';
elem.classList.add('hide');
}
}
}
function renderGenres(page, item, context) {
context = context || inferContext(item);
var type;
var genres = item.GenreItems || [];
switch (context) {
case 'music':
type = 'MusicGenre';
break;
default:
type = 'Genre';
}
var html = genres.map(function (p) {
return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({
Name: p.Name,
Type: type,
ServerId: item.ServerId,
Id: p.Id
}, {
context: context
}) + '">' + p.Name + '</a>';
}).join(', ');
var genresLabel = page.querySelector('.genresLabel');
genresLabel.innerHTML = globalize.translate(genres.length > 1 ? 'Genres' : 'Genre');
var genresValue = page.querySelector('.genres');
genresValue.innerHTML = html;
var genresGroup = page.querySelector('.genresGroup');
if (genres.length) {
genresGroup.classList.remove('hide');
} else {
genresGroup.classList.add('hide');
}
}
function renderDirector(page, item, context) {
var directors = (item.People || []).filter(function (p) {
return 'Director' === p.Type;
});
var html = directors.map(function (p) {
return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({
Name: p.Name,
Type: 'Person',
ServerId: item.ServerId,
Id: p.Id
}, {
context: context
}) + '">' + p.Name + '</a>';
}).join(', ');
var directorsLabel = page.querySelector('.directorsLabel');
directorsLabel.innerHTML = globalize.translate(directors.length > 1 ? 'Directors' : 'Director');
var directorsValue = page.querySelector('.directors');
directorsValue.innerHTML = html;
var directorsGroup = page.querySelector('.directorsGroup');
if (directors.length) {
directorsGroup.classList.remove('hide');
} else {
directorsGroup.classList.add('hide');
}
}
function renderDetails(page, item, apiClient, context, isStatic) {
renderSimilarItems(page, item, context);
renderMoreFromSeason(page, item, apiClient);
renderMoreFromArtist(page, item, apiClient);
renderDirector(page, item, context);
renderGenres(page, item, context);
renderChannelGuide(page, apiClient, item);
var taglineElement = page.querySelector('.tagline');
if (item.Taglines && item.Taglines.length) {
taglineElement.classList.remove('hide');
taglineElement.innerHTML = item.Taglines[0];
} else {
taglineElement.classList.add('hide');
}
var overview = page.querySelector('.overview');
var externalLinksElem = page.querySelector('.itemExternalLinks');
renderOverview([overview], item);
var i;
var itemMiscInfo;
itemMiscInfo = page.querySelectorAll('.itemMiscInfo-primary');
for (i = 0; i < itemMiscInfo.length; i++) {
mediaInfo.fillPrimaryMediaInfo(itemMiscInfo[i], item, {
interactive: true,
episodeTitle: false,
subtitles: false
});
if (itemMiscInfo[i].innerHTML && 'SeriesTimer' !== item.Type) {
itemMiscInfo[i].classList.remove('hide');
} else {
itemMiscInfo[i].classList.add('hide');
}
}
itemMiscInfo = page.querySelectorAll('.itemMiscInfo-secondary');
for (i = 0; i < itemMiscInfo.length; i++) {
mediaInfo.fillSecondaryMediaInfo(itemMiscInfo[i], item, {
interactive: true
});
if (itemMiscInfo[i].innerHTML && 'SeriesTimer' !== item.Type) {
itemMiscInfo[i].classList.remove('hide');
} else {
itemMiscInfo[i].classList.add('hide');
}
}
reloadUserDataButtons(page, item);
renderLinks(externalLinksElem, item);
renderTags(page, item);
renderSeriesAirTime(page, item, isStatic);
}
function enableScrollX() {
return browser.mobile && screen.availWidth <= 1000;
}
function getPortraitShape(scrollX) {
if (null == scrollX) {
scrollX = enableScrollX();
}
return scrollX ? 'overflowPortrait' : 'portrait';
}
function getSquareShape(scrollX) {
if (null == scrollX) {
scrollX = enableScrollX();
}
return scrollX ? 'overflowSquare' : 'square';
}
function renderMoreFromSeason(view, item, apiClient) {
var section = view.querySelector('.moreFromSeasonSection');
if (section) {
if ('Episode' !== item.Type || !item.SeasonId || !item.SeriesId) {
return void section.classList.add('hide');
}
var userId = apiClient.getCurrentUserId();
apiClient.getEpisodes(item.SeriesId, {
SeasonId: item.SeasonId,
UserId: userId,
Fields: 'ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount'
}).then(function (result) {
if (result.Items.length < 2) {
return void section.classList.add('hide');
}
section.classList.remove('hide');
section.querySelector('h2').innerHTML = globalize.translate('MoreFromValue', item.SeasonName);
var itemsContainer = section.querySelector('.itemsContainer');
cardBuilder.buildCards(result.Items, {
parentContainer: section,
itemsContainer: itemsContainer,
shape: 'autooverflow',
sectionTitleTagName: 'h2',
scalable: true,
showTitle: true,
overlayText: false,
centerText: true,
includeParentInfoInTitle: false,
allowBottomPadding: false
});
var card = itemsContainer.querySelector('.card[data-id="' + item.Id + '"]');
if (card) {
setTimeout(function () {
section.querySelector('.emby-scroller').toStart(card.previousSibling || card, true);
}, 100);
}
});
}
}
function renderMoreFromArtist(view, item, apiClient) {
var section = view.querySelector('.moreFromArtistSection');
if (section) {
if ('MusicArtist' === item.Type) {
if (!apiClient.isMinServerVersion('3.4.1.19')) {
return void section.classList.add('hide');
}
} else if ('MusicAlbum' !== item.Type || !item.AlbumArtists || !item.AlbumArtists.length) {
return void section.classList.add('hide');
}
var query = {
IncludeItemTypes: 'MusicAlbum',
Recursive: true,
ExcludeItemIds: item.Id,
SortBy: 'ProductionYear,SortName',
SortOrder: 'Descending'
};
if ('MusicArtist' === item.Type) {
query.ContributingArtistIds = item.Id;
} else if (apiClient.isMinServerVersion('3.4.1.18')) {
query.AlbumArtistIds = item.AlbumArtists[0].Id;
} else {
query.ArtistIds = item.AlbumArtists[0].Id;
}
apiClient.getItems(apiClient.getCurrentUserId(), query).then(function (result) {
if (!result.Items.length) {
return void section.classList.add('hide');
}
section.classList.remove('hide');
if ('MusicArtist' === item.Type) {
section.querySelector('h2').innerHTML = globalize.translate('HeaderAppearsOn');
} else {
section.querySelector('h2').innerHTML = globalize.translate('MoreFromValue', item.AlbumArtists[0].Name);
}
cardBuilder.buildCards(result.Items, {
parentContainer: section,
itemsContainer: section.querySelector('.itemsContainer'),
shape: 'autooverflow',
sectionTitleTagName: 'h2',
scalable: true,
coverImage: 'MusicArtist' === item.Type || 'MusicAlbum' === item.Type,
showTitle: true,
showParentTitle: false,
centerText: true,
overlayText: false,
overlayPlayButton: true,
showYear: true
});
});
}
}
function renderSimilarItems(page, item, context) {
var similarCollapsible = page.querySelector('#similarCollapsible');
if (similarCollapsible) {
if ('Movie' != item.Type && 'Trailer' != item.Type && 'Series' != item.Type && 'Program' != item.Type && 'Recording' != item.Type && 'MusicAlbum' != item.Type && 'MusicArtist' != item.Type && 'Playlist' != item.Type) {
return void similarCollapsible.classList.add('hide');
}
similarCollapsible.classList.remove('hide');
var apiClient = connectionManager.getApiClient(item.ServerId);
var options = {
userId: apiClient.getCurrentUserId(),
limit: 12,
fields: 'PrimaryImageAspectRatio,UserData,CanDelete'
};
if ('MusicAlbum' == item.Type && item.AlbumArtists && item.AlbumArtists.length) {
options.ExcludeArtistIds = item.AlbumArtists[0].Id;
}
apiClient.getSimilarItems(item.Id, options).then(function (result) {
if (!result.Items.length) {
return void similarCollapsible.classList.add('hide');
}
similarCollapsible.classList.remove('hide');
var html = '';
html += cardBuilder.getCardsHtml({
items: result.Items,
shape: 'autooverflow',
showParentTitle: 'MusicAlbum' == item.Type,
centerText: true,
showTitle: true,
context: context,
lazy: true,
showDetailsMenu: true,
coverImage: 'MusicAlbum' == item.Type || 'MusicArtist' == item.Type,
overlayPlayButton: true,
overlayText: false,
showYear: 'Movie' === item.Type || 'Trailer' === item.Type || 'Series' === item.Type
});
var similarContent = similarCollapsible.querySelector('.similarContent');
similarContent.innerHTML = html;
imageLoader.lazyChildren(similarContent);
});
}
}
function renderSeriesAirTime(page, item, isStatic) {
var seriesAirTime = page.querySelector('#seriesAirTime');
if ('Series' != item.Type) {
seriesAirTime.classList.add('hide');
return;
}
var html = '';
if (item.AirDays && item.AirDays.length) {
if (7 == item.AirDays.length) {
html += 'daily';
} else {
html += item.AirDays.map(function (a) {
return a + 's';
}).join(',');
}
}
if (item.AirTime) {
html += ' at ' + item.AirTime;
}
if (item.Studios.length) {
if (isStatic) {
html += ' on ' + item.Studios[0].Name;
} else {
var context = inferContext(item);
var href = appRouter.getRouteUrl(item.Studios[0], {
context: context,
itemType: 'Studio',
serverId: item.ServerId
});
html += ' on <a class="textlink button-link" is="emby-linkbutton" href="' + href + '">' + item.Studios[0].Name + '</a>';
}
}
if (html) {
html = ('Ended' == item.Status ? 'Aired ' : 'Airs ') + html;
seriesAirTime.innerHTML = html;
seriesAirTime.classList.remove('hide');
} else {
seriesAirTime.classList.add('hide');
}
}
function renderTags(page, item) {
var itemTags = page.querySelector('.itemTags');
var tagElements = [];
var tags = item.Tags || [];
if ('Program' === item.Type) {
tags = [];
}
for (var i = 0, length = tags.length; i < length; i++) {
tagElements.push(tags[i]);
}
if (tagElements.length) {
itemTags.innerHTML = globalize.translate('TagsValue', tagElements.join(', '));
itemTags.classList.remove('hide');
} else {
itemTags.innerHTML = '';
itemTags.classList.add('hide');
}
}
function renderChildren(page, item) {
var fields = 'ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount';
var query = {
ParentId: item.Id,
Fields: fields
};
if ('BoxSet' !== item.Type) {
query.SortBy = 'SortName';
}
var promise;
var apiClient = connectionManager.getApiClient(item.ServerId);
var userId = apiClient.getCurrentUserId();
if ('Series' == item.Type) {
promise = apiClient.getSeasons(item.Id, {
userId: userId,
Fields: fields
});
} else if ('Season' == item.Type) {
fields += ',Overview';
promise = apiClient.getEpisodes(item.SeriesId, {
seasonId: item.Id,
userId: userId,
Fields: fields
});
} else if ('MusicArtist' == item.Type) {
query.SortBy = 'ProductionYear,SortName';
}
promise = promise || apiClient.getItems(apiClient.getCurrentUserId(), query);
promise.then(function (result) {
var html = '';
var scrollX = false;
var isList = false;
var childrenItemsContainer = page.querySelector('.childrenItemsContainer');
if ('MusicAlbum' == item.Type) {
html = listView.getListViewHtml({
items: result.Items,
smallIcon: true,
showIndex: true,
index: 'disc',
showIndexNumberLeft: true,
playFromHere: true,
action: 'playallfromhere',
image: false,
artist: 'auto',
containerAlbumArtists: item.AlbumArtists,
addToListButton: true
});
isList = true;
} else if ('Series' == item.Type) {
scrollX = enableScrollX();
html = cardBuilder.getCardsHtml({
items: result.Items,
shape: 'overflowPortrait',
showTitle: true,
centerText: true,
lazy: true,
overlayPlayButton: true,
allowBottomPadding: !scrollX
});
} else if ('Season' == item.Type || 'Episode' == item.Type) {
if ('Episode' !== item.Type) {
isList = true;
}
scrollX = 'Episode' == item.Type;
if (result.Items.length < 2 && 'Episode' === item.Type) {
return;
}
if ('Episode' === item.Type) {
html = cardBuilder.getCardsHtml({
items: result.Items,
shape: 'overflowBackdrop',
showTitle: true,
displayAsSpecial: 'Season' == item.Type && item.IndexNumber,
playFromHere: true,
overlayText: true,
lazy: true,
showDetailsMenu: true,
overlayPlayButton: true,
allowBottomPadding: !scrollX,
includeParentInfoInTitle: false
});
} else if ('Season' === item.Type) {
html = listView.getListViewHtml({
items: result.Items,
showIndexNumber: false,
enableOverview: true,
imageSize: 'large',
enableSideMediaInfo: false,
highlight: false,
action: layoutManager.tv ? 'resume' : 'none',
infoButton: true,
imagePlayButton: true,
includeParentInfoInTitle: false
});
}
}
if ('BoxSet' !== item.Type) {
page.querySelector('#childrenCollapsible').classList.remove('hide');
}
if (scrollX) {
childrenItemsContainer.classList.add('scrollX');
childrenItemsContainer.classList.add('hiddenScrollX');
childrenItemsContainer.classList.remove('vertical-wrap');
childrenItemsContainer.classList.remove('vertical-list');
} else {
childrenItemsContainer.classList.remove('scrollX');
childrenItemsContainer.classList.remove('hiddenScrollX');
childrenItemsContainer.classList.remove('smoothScrollX');
if (isList) {
childrenItemsContainer.classList.add('vertical-list');
childrenItemsContainer.classList.remove('vertical-wrap');
} else {
childrenItemsContainer.classList.add('vertical-wrap');
childrenItemsContainer.classList.remove('vertical-list');
}
}
childrenItemsContainer.innerHTML = html;
imageLoader.lazyChildren(childrenItemsContainer);
if ('BoxSet' == item.Type) {
var collectionItemTypes = [{
name: globalize.translate('HeaderVideos'),
mediaType: 'Video'
}, {
name: globalize.translate('HeaderSeries'),
type: 'Series'
}, {
name: globalize.translate('HeaderAlbums'),
type: 'MusicAlbum'
}, {
name: globalize.translate('HeaderBooks'),
type: 'Book'
}];
renderCollectionItems(page, item, collectionItemTypes, result.Items);
}
});
if ('Season' == item.Type) {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderEpisodes');
} else if ('Series' == item.Type) {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderSeasons');
} else if ('MusicAlbum' == item.Type) {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderTracks');
} else {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderItems');
}
if ('MusicAlbum' == item.Type || 'Season' == item.Type) {
page.querySelector('.childrenSectionHeader').classList.add('hide');
page.querySelector('#childrenCollapsible').classList.add('verticalSection-extrabottompadding');
} else {
page.querySelector('.childrenSectionHeader').classList.remove('hide');
}
}
function renderItemsByName(page, item) {
require('scripts/itembynamedetailpage'.split(','), function () {
window.ItemsByName.renderItems(page, item);
});
}
function renderPlaylistItems(page, item) {
require('scripts/playlistedit'.split(','), function () {
PlaylistViewer.render(page, item);
});
}
function renderProgramsForChannel(page, result) {
var html = '';
var currentItems = [];
var currentStartDate = null;
for (var i = 0, length = result.Items.length; i < length; i++) {
var item = result.Items[i];
var itemStartDate = datetime.parseISO8601Date(item.StartDate);
if (!(currentStartDate && currentStartDate.toDateString() === itemStartDate.toDateString())) {
if (currentItems.length) {
html += '<div class="verticalSection verticalDetailSection">';
html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, {
weekday: 'long',
month: 'long',
day: 'numeric'
}) + '</h2>';
html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({
items: currentItems,
enableUserDataButtons: false,
showParentTitle: true,
image: false,
showProgramTime: true,
mediaInfo: false,
parentTitleWithTitle: true
}) + '</div></div>';
}
currentStartDate = itemStartDate;
currentItems = [];
}
currentItems.push(item);
}
if (currentItems.length) {
html += '<div class="verticalSection verticalDetailSection">';
html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, {
weekday: 'long',
month: 'long',
day: 'numeric'
}) + '</h2>';
html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({
items: currentItems,
enableUserDataButtons: false,
showParentTitle: true,
image: false,
showProgramTime: true,
mediaInfo: false,
parentTitleWithTitle: true
}) + '</div></div>';
}
page.querySelector('.programGuide').innerHTML = html;
}
function renderChannelGuide(page, apiClient, item) {
if ('TvChannel' === item.Type) {
page.querySelector('.programGuideSection').classList.remove('hide');
apiClient.getLiveTvPrograms({
ChannelIds: item.Id,
UserId: apiClient.getCurrentUserId(),
HasAired: false,
SortBy: 'StartDate',
EnableTotalRecordCount: false,
EnableImages: false,
ImageTypeLimit: 0,
EnableUserData: false
}).then(function (result) {
renderProgramsForChannel(page, result);
});
}
}
function renderSeriesSchedule(page, item) {
var apiClient = connectionManager.getApiClient(item.ServerId);
apiClient.getLiveTvPrograms({
UserId: apiClient.getCurrentUserId(),
HasAired: false,
SortBy: 'StartDate',
EnableTotalRecordCount: false,
EnableImages: false,
ImageTypeLimit: 0,
Limit: 50,
EnableUserData: false,
LibrarySeriesId: item.Id
}).then(function (result) {
if (result.Items.length) {
page.querySelector('#seriesScheduleSection').classList.remove('hide');
} else {
page.querySelector('#seriesScheduleSection').classList.add('hide');
}
page.querySelector('#seriesScheduleList').innerHTML = listView.getListViewHtml({
items: result.Items,
enableUserDataButtons: false,
showParentTitle: false,
image: false,
showProgramDateTime: true,
mediaInfo: false,
showTitle: true,
moreButton: false,
action: 'programdialog'
});
loading.hide();
});
}
function inferContext(item) {
if ('Movie' === item.Type || 'BoxSet' === item.Type) {
return 'movies';
}
if ('Series' === item.Type || 'Season' === item.Type || 'Episode' === item.Type) {
return 'tvshows';
}
if ('MusicArtist' === item.Type || 'MusicAlbum' === item.Type || 'Audio' === item.Type || 'AudioBook' === item.Type) {
return 'music';
}
if ('Program' === item.Type) {
return 'livetv';
}
return null;
}
function filterItemsByCollectionItemType(items, typeInfo) {
return items.filter(function (item) {
if (typeInfo.mediaType) {
return item.MediaType == typeInfo.mediaType;
}
return item.Type == typeInfo.type;
});
}
function canPlaySomeItemInCollection(items) {
var i = 0;
for (var length = items.length; i < length; i++) {
if (playbackManager.canPlay(items[i])) {
return true;
}
}
return false;
}
function renderCollectionItems(page, parentItem, types, items) {
page.querySelector('.collectionItems').innerHTML = '';
var i;
var length;
for (i = 0, length = types.length; i < length; i++) {
var type = types[i];
var typeItems = filterItemsByCollectionItemType(items, type);
if (typeItems.length) {
renderCollectionItemType(page, parentItem, type, typeItems);
}
}
var otherType = {
name: globalize.translate('HeaderOtherItems')
};
var otherTypeItems = items.filter(function (curr) {
return !types.filter(function (t) {
return filterItemsByCollectionItemType([curr], t).length > 0;
}).length;
});
if (otherTypeItems.length) {
renderCollectionItemType(page, parentItem, otherType, otherTypeItems);
}
if (!items.length) {
renderCollectionItemType(page, parentItem, {
name: globalize.translate('HeaderItems')
}, items);
}
var containers = page.querySelectorAll('.collectionItemsContainer');
var notifyRefreshNeeded = function () {
renderChildren(page, parentItem);
};
for (i = 0, length = containers.length; i < length; i++) {
containers[i].notifyRefreshNeeded = notifyRefreshNeeded;
}
// if nothing in the collection can be played hide play and shuffle buttons
if (!canPlaySomeItemInCollection(items)) {
hideAll(page, 'btnPlay', false);
hideAll(page, 'btnShuffle', false);
}
// HACK: Call autoFocuser again because btnPlay may be hidden, but focused by reloadFromItem
// FIXME: Sometimes focus does not move until all (?) sections are loaded
require(['autoFocuser'], function (autoFocuser) {
autoFocuser.autoFocus(page);
});
}
function renderCollectionItemType(page, parentItem, type, items) {
var html = '';
html += '<div class="verticalSection">';
html += '<div class="sectionTitleContainer sectionTitleContainer-cards padded-left">';
html += '<h2 class="sectionTitle sectionTitle-cards">';
html += '<span>' + type.name + '</span>';
html += '</h2>';
html += '<button class="btnAddToCollection sectionTitleButton" type="button" is="paper-icon-button-light" style="margin-left:1em;"><span class="material-icons add"></span></button>';
html += '</div>';
html += '<div is="emby-itemscontainer" class="itemsContainer collectionItemsContainer vertical-wrap padded-left padded-right">';
var shape = 'MusicAlbum' == type.type ? getSquareShape(false) : getPortraitShape(false);
html += cardBuilder.getCardsHtml({
items: items,
shape: shape,
showTitle: true,
showYear: 'Video' === type.mediaType || 'Series' === type.type,
centerText: true,
lazy: true,
showDetailsMenu: true,
overlayMoreButton: true,
showAddToCollection: false,
showRemoveFromCollection: true,
collectionId: parentItem.Id
});
html += '</div>';
html += '</div>';
var collectionItems = page.querySelector('.collectionItems');
collectionItems.insertAdjacentHTML('beforeend', html);
imageLoader.lazyChildren(collectionItems);
collectionItems.querySelector('.btnAddToCollection').addEventListener('click', function () {
require(['alert'], function (alert) {
alert({
text: globalize.translate('AddItemToCollectionHelp'),
html: globalize.translate('AddItemToCollectionHelp') + '<br/><br/><a is="emby-linkbutton" class="button-link" target="_blank" href="https://web.archive.org/web/20181216120305/https://github.com/MediaBrowser/Wiki/wiki/Collections">' + globalize.translate('ButtonLearnMore') + '</a>'
});
});
});
}
function renderMusicVideos(page, item, user) {
connectionManager.getApiClient(item.ServerId).getItems(user.Id, {
SortBy: 'SortName',
SortOrder: 'Ascending',
IncludeItemTypes: 'MusicVideo',
Recursive: true,
Fields: 'PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount',
AlbumIds: item.Id
}).then(function (result) {
if (result.Items.length) {
page.querySelector('#musicVideosCollapsible').classList.remove('hide');
var musicVideosContent = page.querySelector('.musicVideosContent');
musicVideosContent.innerHTML = getVideosHtml(result.Items, user);
imageLoader.lazyChildren(musicVideosContent);
} else {
page.querySelector('#musicVideosCollapsible').classList.add('hide');
}
});
}
function renderAdditionalParts(page, item, user) {
connectionManager.getApiClient(item.ServerId).getAdditionalVideoParts(user.Id, item.Id).then(function (result) {
if (result.Items.length) {
page.querySelector('#additionalPartsCollapsible').classList.remove('hide');
var additionalPartsContent = page.querySelector('#additionalPartsContent');
additionalPartsContent.innerHTML = getVideosHtml(result.Items, user);
imageLoader.lazyChildren(additionalPartsContent);
} else {
page.querySelector('#additionalPartsCollapsible').classList.add('hide');
}
});
}
function renderScenes(page, item) {
var chapters = item.Chapters || [];
if (chapters.length && !chapters[0].ImageTag && (chapters = []), chapters.length) {
page.querySelector('#scenesCollapsible').classList.remove('hide');
var scenesContent = page.querySelector('#scenesContent');
require(['chaptercardbuilder'], function (chaptercardbuilder) {
chaptercardbuilder.buildChapterCards(item, chapters, {
itemsContainer: scenesContent,
backdropShape: 'overflowBackdrop',
squareShape: 'overflowSquare'
});
});
} else {
page.querySelector('#scenesCollapsible').classList.add('hide');
}
}
function getVideosHtml(items, user, limit, moreButtonClass) {
var html = cardBuilder.getCardsHtml({
items: items,
shape: 'auto',
showTitle: true,
action: 'play',
overlayText: false,
centerText: true,
showRuntime: true
});
if (limit && items.length > limit) {
html += '<p style="margin: 0;padding-left:5px;"><button is="emby-button" type="button" class="raised more ' + moreButtonClass + '">' + globalize.translate('ButtonMore') + '</button></p>';
}
return html;
}
function renderSpecials(page, item, user, limit) {
connectionManager.getApiClient(item.ServerId).getSpecialFeatures(user.Id, item.Id).then(function (specials) {
var specialsContent = page.querySelector('#specialsContent');
specialsContent.innerHTML = getVideosHtml(specials, user, limit, 'moreSpecials');
imageLoader.lazyChildren(specialsContent);
});
}
function renderCast(page, item) {
var people = (item.People || []).filter(function (p) {
return 'Director' !== p.Type;
});
if (!people.length) {
return void page.querySelector('#castCollapsible').classList.add('hide');
}
page.querySelector('#castCollapsible').classList.remove('hide');
var castContent = page.querySelector('#castContent');
require(['peoplecardbuilder'], function (peoplecardbuilder) {
peoplecardbuilder.buildPeopleCards(people, {
itemsContainer: castContent,
coverImage: true,
serverId: item.ServerId,
shape: 'overflowPortrait',
imageBlurhashes: item.ImageBlurHashes
});
});
}
function itemDetailPage() {
var self = this;
self.setInitialCollapsibleState = setInitialCollapsibleState;
self.renderDetails = renderDetails;
self.renderCast = renderCast;
}
function bindAll(view, selector, eventName, fn) {
var i;
var length;
var elems = view.querySelectorAll(selector);
for (i = 0, length = elems.length; i < length; i++) {
elems[i].addEventListener(eventName, fn);
}
}
function onTrackSelectionsSubmit(e) {
e.preventDefault();
return false;
}
window.ItemDetailPage = new itemDetailPage();
return function (view, params) {
function reload(instance, page, params) {
loading.show();
var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient;
var promises = [getPromise(apiClient, params), apiClient.getCurrentUser()];
Promise.all(promises).then(function (responses) {
var item = responses[0];
var user = responses[1];
currentItem = item;
reloadFromItem(instance, page, params, item, user);
});
}
function splitVersions(instance, page, apiClient, params) {
require(['confirm'], function (confirm) {
confirm('Are you sure you wish to split the media sources into separate items?', 'Split Media Apart').then(function () {
loading.show();
apiClient.ajax({
type: 'DELETE',
url: apiClient.getUrl('Videos/' + params.id + '/AlternateSources')
}).then(function () {
loading.hide();
reload(instance, page, params);
});
});
});
}
function getPlayOptions(startPosition) {
var audioStreamIndex = view.querySelector('.selectAudio').value || null;
return {
startPositionTicks: startPosition,
mediaSourceId: view.querySelector('.selectSource').value,
audioStreamIndex: audioStreamIndex,
subtitleStreamIndex: view.querySelector('.selectSubtitles').value
};
}
function playItem(item, startPosition) {
var playOptions = getPlayOptions(startPosition);
playOptions.items = [item];
playbackManager.play(playOptions);
}
function playTrailer() {
playbackManager.playTrailers(currentItem);
}
function playCurrentItem(button, mode) {
var item = currentItem;
if ('Program' === item.Type) {
var apiClient = connectionManager.getApiClient(item.ServerId);
return void apiClient.getLiveTvChannel(item.ChannelId, apiClient.getCurrentUserId()).then(function (channel) {
playbackManager.play({
items: [channel]
});
});
}
playItem(item, item.UserData && 'resume' === mode ? item.UserData.PlaybackPositionTicks : 0);
}
function onPlayClick() {
playCurrentItem(this, this.getAttribute('data-mode'));
}
function onInstantMixClick() {
playbackManager.instantMix(currentItem);
}
function onShuffleClick() {
playbackManager.shuffle(currentItem);
}
function onDeleteClick() {
require(['deleteHelper'], function (deleteHelper) {
deleteHelper.deleteItem({
item: currentItem,
navigate: true
});
});
}
function onCancelSeriesTimerClick() {
require(['recordingHelper'], function (recordingHelper) {
recordingHelper.cancelSeriesTimerWithConfirmation(currentItem.Id, currentItem.ServerId).then(function () {
Dashboard.navigate('livetv.html');
});
});
}
function onCancelTimerClick() {
require(['recordingHelper'], function (recordingHelper) {
recordingHelper.cancelTimer(connectionManager.getApiClient(currentItem.ServerId), currentItem.TimerId).then(function () {
reload(self, view, params);
});
});
}
function onPlayTrailerClick() {
playTrailer();
}
function onDownloadClick() {
require(['fileDownloader'], function (fileDownloader) {
var downloadHref = apiClient.getItemDownloadUrl(currentItem.Id);
fileDownloader.download([{
url: downloadHref,
itemId: currentItem.Id,
serverId: currentItem.serverId
}]);
});
}
function onMoreCommandsClick() {
var button = this;
apiClient.getCurrentUser().then(function (user) {
itemContextMenu.show(getContextMenuOptions(currentItem, user, button)).then(function (result) {
if (result.deleted) {
appRouter.goHome();
} else if (result.updated) {
reload(self, view, params);
}
});
});
}
function onPlayerChange() {
renderTrackSelections(view, self, currentItem);
setTrailerButtonVisibility(view, currentItem);
}
function editImages() {
return new Promise(function (resolve, reject) {
require(['imageEditor'], function (imageEditor) {
imageEditor.show({
itemId: currentItem.Id,
serverId: currentItem.ServerId
}).then(resolve, reject);
});
});
}
function onWebSocketMessage(e, data) {
var msg = data;
if ('UserDataChanged' === msg.MessageType && currentItem && msg.Data.UserId == apiClient.getCurrentUserId()) {
var key = currentItem.UserData.Key;
var userData = msg.Data.UserDataList.filter(function (u) {
return u.Key == key;
})[0];
if (userData) {
currentItem.UserData = userData;
reloadPlayButtons(view, currentItem);
refreshImage(view, currentItem);
}
}
}
var currentItem;
var self = this;
var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient;
view.querySelectorAll('.btnPlay');
bindAll(view, '.btnPlay', 'click', onPlayClick);
bindAll(view, '.btnResume', 'click', onPlayClick);
bindAll(view, '.btnInstantMix', 'click', onInstantMixClick);
bindAll(view, '.btnShuffle', 'click', onShuffleClick);
bindAll(view, '.btnPlayTrailer', 'click', onPlayTrailerClick);
bindAll(view, '.btnCancelSeriesTimer', 'click', onCancelSeriesTimerClick);
bindAll(view, '.btnCancelTimer', 'click', onCancelTimerClick);
bindAll(view, '.btnDeleteItem', 'click', onDeleteClick);
bindAll(view, '.btnDownload', 'click', onDownloadClick);
view.querySelector('.trackSelections').addEventListener('submit', onTrackSelectionsSubmit);
view.querySelector('.btnSplitVersions').addEventListener('click', function () {
splitVersions(self, view, apiClient, params);
});
bindAll(view, '.btnMoreCommands', 'click', onMoreCommandsClick);
view.querySelector('.selectSource').addEventListener('change', function () {
renderVideoSelections(view, self._currentPlaybackMediaSources);
renderAudioSelections(view, self._currentPlaybackMediaSources);
renderSubtitleSelections(view, self._currentPlaybackMediaSources);
});
view.addEventListener('click', function (e) {
if (dom.parentWithClass(e.target, 'moreScenes')) {
renderScenes(view, currentItem);
} else if (dom.parentWithClass(e.target, 'morePeople')) {
renderCast(view, currentItem);
} else if (dom.parentWithClass(e.target, 'moreSpecials')) {
apiClient.getCurrentUser().then(function (user) {
renderSpecials(view, currentItem, user);
});
}
});
view.querySelector('.detailImageContainer').addEventListener('click', function (e) {
if (dom.parentWithClass(e.target, 'itemDetailGalleryLink')) {
editImages().then(function () {
reload(self, view, params);
});
}
});
view.addEventListener('viewshow', function (e) {
var page = this;
if (layoutManager.mobile) {
libraryMenu.setTransparentMenu(true);
}
if (e.detail.isRestored) {
if (currentItem) {
Emby.Page.setTitle('');
renderTrackSelections(page, self, currentItem, true);
}
} else {
reload(self, page, params);
}
events.on(apiClient, 'message', onWebSocketMessage);
events.on(playbackManager, 'playerchange', onPlayerChange);
});
view.addEventListener('viewbeforehide', function () {
events.off(apiClient, 'message', onWebSocketMessage);
events.off(playbackManager, 'playerchange', onPlayerChange);
libraryMenu.setTransparentMenu(false);
});
view.addEventListener('viewdestroy', function () {
currentItem = null;
self._currentPlaybackMediaSources = null;
self.currentRecordingFields = null;
});
};
});
| 1 | 16,008 | I added this here because people cards (which depends on cardBuilder) had this added in blurhash. Not sure when this is used though cc @JustAMan | jellyfin-jellyfin-web | js |
@@ -509,9 +509,6 @@ func DecryptDecode(ctx context.Context, k *secrets.Keeper, post Decode) Decode {
}
// DecoderByName returns a *Decoder based on decoderName.
-// It is intended to be used by VariableURLOpeners in driver packages.
-var DecoderByName = decoderByName
-
// Supported values include:
// - empty string: Returns the default from the URLOpener.Decoder, or
// BytesDecoder if URLOpener.Decoder is nil (which is true if you're | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package runtimevar provides an easy and portable way to watch runtime
// configuration variables.
//
// It provides a blocking method that returns a Snapshot of the variable value
// whenever a change is detected.
//
// Subpackages contain distinct implementations of runtimevar for various
// providers, including Cloud and on-prem solutions. For example, "etcdvar"
// supports variables stored in etcd. Your application should import one of
// these provider-specific subpackages and use its exported function(s) to
// create a *Variable; do not use the New function in this package. For example:
//
// var v *runtimevar.Variable
// var err error
// v, err = etcdvar.New("my variable", etcdClient, runtimevar.JSONDecode, nil)
// ...
//
// Then, write your application code using the *Variable type. You can
// easily reconfigure your initialization code to choose a different provider.
// You can develop your application locally using filevar or constantvar, and
// deploy it to multiple Cloud providers. You may find
// http://github.com/google/wire useful for managing your initialization code.
//
// Variable implements health.Checker; it reports as healthy when Latest will
// return a value without blocking.
//
// Alternatively, you can construct a *Variable via a URL and OpenVariable.
// See https://godoc.org/gocloud.dev#hdr-URLs for more information.
//
//
// OpenCensus Integration
//
// OpenCensus supports tracing and metric collection for multiple languages and
// backend providers. See https://opencensus.io.
//
// This API collects an OpenCensus metric "gocloud.dev/runtimevar/value_changes",
// a count of the number of times all variables have changed values, by provider.
//
// To enable metric collection in your application, see "Exporting stats" at
// https://opencensus.io/quickstart/go/metrics.
package runtimevar // import "gocloud.dev/runtimevar"
import (
"bytes"
"context"
"encoding/gob"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"reflect"
"strings"
"sync"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/oc"
"gocloud.dev/internal/openurl"
"gocloud.dev/runtimevar/driver"
"gocloud.dev/secrets"
)
// Snapshot contains a snapshot of a variable's value and metadata about it.
// It is intended to be read-only for users.
type Snapshot struct {
// Value contains the value of the variable.
// The type for Value depends on the provider; for most providers, it depends
// on the decoder used when creating Variable.
Value interface{}
// UpdateTime is the time when the last change was detected.
UpdateTime time.Time
asFunc func(interface{}) bool
}
// As converts i to provider-specific types.
// See https://godoc.org/gocloud.dev#hdr-As for background information, the "As"
// examples in this package for examples, and the provider-specific package
// documentation for the specific types supported for that provider.
func (s *Snapshot) As(i interface{}) bool {
if s.asFunc == nil {
return false
}
return s.asFunc(i)
}
const pkgName = "gocloud.dev/runtimevar"
var (
changeMeasure = stats.Int64(pkgName+"/value_changes", "Count of variable value changes",
stats.UnitDimensionless)
// OpenCensusViews are predefined views for OpenCensus metrics.
OpenCensusViews = []*view.View{
{
Name: pkgName + "/value_changes",
Measure: changeMeasure,
Description: "Count of variable value changes by provider.",
TagKeys: []tag.Key{oc.ProviderKey},
Aggregation: view.Count(),
},
}
)
// Variable provides an easy and portable way to watch runtime configuration
// variables. To create a Variable, use constructors found in provider-specific
// subpackages.
type Variable struct {
dw driver.Watcher
provider string // for metric collection
// For cancelling the background goroutine, and noticing when it has exited.
backgroundCancel context.CancelFunc
backgroundDone chan struct{}
// haveGood is closed when we get the first good value for the variable.
haveGood chan struct{}
// A reference to changed at the last time Watch was called.
// Not protected by mu because it's only referenced in Watch, which is not
// supposed to be called from multiple goroutines.
lastWatch <-chan struct{}
mu sync.RWMutex
changed chan struct{} // closed when changing any of the other variables and replaced with a new channel
last Snapshot
lastErr error
lastGood Snapshot
}
// New is intended for use by provider implementations.
var New = newVar
// newVar creates a new *Variable based on a specific driver implementation.
func newVar(w driver.Watcher) *Variable {
ctx, cancel := context.WithCancel(context.Background())
changed := make(chan struct{})
v := &Variable{
dw: w,
provider: oc.ProviderName(w),
backgroundCancel: cancel,
backgroundDone: make(chan struct{}),
haveGood: make(chan struct{}),
changed: changed,
lastWatch: changed,
lastErr: gcerr.Newf(gcerr.FailedPrecondition, nil, "no value yet"),
}
go v.background(ctx)
return v
}
// ErrClosed is returned from Watch when the Variable has been Closed.
var ErrClosed = gcerr.Newf(gcerr.FailedPrecondition, nil, "Variable has been Closed")
// Watch returns when there is a new Snapshot of the current value of the
// variable.
//
// The first call to Watch will block while reading the variable from the
// provider, and will return the resulting Snapshot or error. If an error is
// returned, the returned Snapshot is a zero value and should be ignored.
// Subsequent calls will block until the variable's value changes or a different
// error occurs.
//
// Watch returns an ErrClosed error if the Variable has been closed.
//
// Watch should not be called on the same variable from multiple goroutines
// concurrently. The typical use case is to call it in a single goroutine in a
// loop.
//
// If the variable does not exist, Watch returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
//
// Alternatively, use Latest to retrieve the latest good value.
func (c *Variable) Watch(ctx context.Context) (Snapshot, error) {
// Block until there's a change since the last Watch call, signaled
// by lastWatch being closed by the background goroutine.
var ctxErr error
select {
case <-c.lastWatch:
case <-ctx.Done():
ctxErr = ctx.Err()
}
c.mu.Lock()
defer c.mu.Unlock()
if c.lastErr == ErrClosed {
return Snapshot{}, ErrClosed
} else if ctxErr != nil {
return Snapshot{}, ctxErr
}
c.lastWatch = c.changed
return c.last, c.lastErr
}
func (c *Variable) background(ctx context.Context) {
var curState, prevState driver.State
var wait time.Duration
for {
select {
case <-ctx.Done():
// We're shutting down; exit the goroutine.
close(c.backgroundDone)
return
case <-time.After(wait):
// Continue.
}
curState, wait = c.dw.WatchVariable(ctx, prevState)
if curState == nil {
// No change.
continue
}
// There's something new to return!
prevState = curState
_ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(oc.ProviderKey, c.provider)}, changeMeasure.M(1))
// Error from RecordWithTags is not possible.
// Updates under the lock.
c.mu.Lock()
if c.lastErr == ErrClosed {
close(c.backgroundDone)
c.mu.Unlock()
return
}
if val, err := curState.Value(); err == nil {
// We got a good value!
c.last = Snapshot{
Value: val,
UpdateTime: curState.UpdateTime(),
asFunc: curState.As,
}
c.lastErr = nil
c.lastGood = c.last
// Close c.haveGood if it's not already closed.
select {
case <-c.haveGood:
default:
close(c.haveGood)
}
} else {
// We got an error value.
c.last = Snapshot{}
c.lastErr = wrapError(c.dw, err)
}
close(c.changed)
c.changed = make(chan struct{})
c.mu.Unlock()
}
}
// Latest is intended to be called per request, with the request context.
// It returns the latest good Snapshot of the variable value, blocking if no
// good value has ever been received. If ctx is Done, it returns the latest
// error indicating why no good value is available (not the ctx.Err()).
// You can pass an already-Done ctx to make Latest not block.
//
// Latest returns ErrClosed if the Variable has been closed.
func (c *Variable) Latest(ctx context.Context) (Snapshot, error) {
var haveGood bool
select {
case <-c.haveGood:
haveGood = true
case <-ctx.Done():
// We don't return ctx.Err().
}
c.mu.RLock()
defer c.mu.RUnlock()
if haveGood && c.lastErr != ErrClosed {
return c.lastGood, nil
}
return Snapshot{}, c.lastErr
}
// CheckHealth returns an error unless Latest will return a good value
// without blocking.
func (c *Variable) CheckHealth() error {
haveGood := false
select {
case <-c.haveGood:
haveGood = true
default:
}
c.mu.RLock()
defer c.mu.RUnlock()
if haveGood && c.lastErr != ErrClosed {
return nil
}
return c.lastErr
}
// Close closes the Variable. The Variable is unusable after Close returns.
func (c *Variable) Close() error {
// Record that we're closing. Subsequent calls to Watch/Latest will return ErrClosed.
c.mu.Lock()
if c.lastErr == ErrClosed {
c.mu.Unlock()
return ErrClosed
}
c.last = Snapshot{}
c.lastErr = ErrClosed
// Close any remaining channels to wake up any callers that are waiting on them.
close(c.changed)
// If it's the first good value, close haveGood so that Latest doesn't block.
select {
case <-c.haveGood:
default:
close(c.haveGood)
}
c.mu.Unlock()
// Shut down the background goroutine.
c.backgroundCancel()
<-c.backgroundDone
// Close the driver.
err := c.dw.Close()
return wrapError(c.dw, err)
}
func wrapError(w driver.Watcher, err error) error {
if err == nil {
return nil
}
if gcerr.DoNotWrap(err) {
return err
}
return gcerr.New(w.ErrorCode(err), err, 2, "runtimevar")
}
// ErrorAs converts err to provider-specific types.
// ErrorAs panics if i is nil or not a pointer.
// ErrorAs returns false if err == nil.
// See https://godoc.org/gocloud.dev#hdr-As for background information.
func (c *Variable) ErrorAs(err error, i interface{}) bool {
return gcerr.ErrorAs(err, i, c.dw.ErrorAs)
}
// VariableURLOpener represents types than can open Variables based on a URL.
// The opener must not modify the URL argument. OpenVariableURL must be safe to
// call from multiple goroutines.
//
// This interface is generally implemented by types in driver packages.
type VariableURLOpener interface {
OpenVariableURL(ctx context.Context, u *url.URL) (*Variable, error)
}
// URLMux is a URL opener multiplexer. It matches the scheme of the URLs
// against a set of registered schemes and calls the opener that matches the
// URL's scheme.
// See https://godoc.org/gocloud.dev#hdr-URLs for more information.
//
// The zero value is a multiplexer with no registered schemes.
type URLMux struct {
schemes openurl.SchemeMap
}
// VariableSchemes returns a sorted slice of the registered Variable schemes.
func (mux *URLMux) VariableSchemes() []string { return mux.schemes.Schemes() }
// ValidVariableScheme returns true iff scheme has been registered for Variables.
func (mux *URLMux) ValidVariableScheme(scheme string) bool { return mux.schemes.ValidScheme(scheme) }
// RegisterVariable registers the opener with the given scheme. If an opener
// already exists for the scheme, RegisterVariable panics.
func (mux *URLMux) RegisterVariable(scheme string, opener VariableURLOpener) {
mux.schemes.Register("runtimevar", "Variable", scheme, opener)
}
// OpenVariable calls OpenVariableURL with the URL parsed from urlstr.
// OpenVariable is safe to call from multiple goroutines.
func (mux *URLMux) OpenVariable(ctx context.Context, urlstr string) (*Variable, error) {
opener, u, err := mux.schemes.FromString("Variable", urlstr)
if err != nil {
return nil, err
}
return opener.(VariableURLOpener).OpenVariableURL(ctx, u)
}
// OpenVariableURL dispatches the URL to the opener that is registered with the
// URL's scheme. OpenVariableURL is safe to call from multiple goroutines.
func (mux *URLMux) OpenVariableURL(ctx context.Context, u *url.URL) (*Variable, error) {
opener, err := mux.schemes.FromURL("Variable", u)
if err != nil {
return nil, err
}
return opener.(VariableURLOpener).OpenVariableURL(ctx, u)
}
var defaultURLMux = new(URLMux)
// DefaultURLMux returns the URLMux used by OpenVariable.
//
// Driver packages can use this to register their VariableURLOpener on the mux.
func DefaultURLMux() *URLMux {
return defaultURLMux
}
// OpenVariable opens the variable identified by the URL given.
// See the URLOpener documentation in provider-specific subpackages for
// details on supported URL formats, and https://godoc.org/gocloud.dev#hdr-URLs
// for more information.
func OpenVariable(ctx context.Context, urlstr string) (*Variable, error) {
return defaultURLMux.OpenVariable(ctx, urlstr)
}
// Decode is a function type for unmarshaling/decoding a slice of bytes into
// an arbitrary type. Decode functions are used when creating a Decoder via
// NewDecoder. This package provides common Decode functions including
// GobDecode and JSONDecode.
type Decode func([]byte, interface{}) error
// Decoder decodes a slice of bytes into a particular Go object.
//
// This package provides some common Decoders that you can use directly,
// including StringDecoder and BytesDecoder. You can also NewDecoder to
// construct other Decoders.
type Decoder struct {
typ reflect.Type
fn Decode
}
// NewDecoder returns a Decoder that uses fn to decode a slice of bytes into
// an object of type obj.
//
// This package provides some common Decode functions, including JSONDecode
// and GobDecode, which can be passed to this function to create Decoders for
// JSON and gob values.
func NewDecoder(obj interface{}, fn Decode) *Decoder {
return &Decoder{
typ: reflect.TypeOf(obj),
fn: fn,
}
}
// Decode decodes b into a new instance of the target type.
func (d *Decoder) Decode(b []byte) (interface{}, error) {
nv := reflect.New(d.typ).Interface()
if err := d.fn(b, nv); err != nil {
return nil, err
}
ptr := reflect.ValueOf(nv)
return ptr.Elem().Interface(), nil
}
var (
// StringDecoder decodes into strings.
StringDecoder = NewDecoder("", StringDecode)
// BytesDecoder copies the slice of bytes.
BytesDecoder = NewDecoder([]byte{}, BytesDecode)
// JSONDecode can be passed to NewDecoder when decoding JSON (https://golang.org/pkg/encoding/json/).
JSONDecode = json.Unmarshal
)
// GobDecode can be passed to NewDecoder when decoding gobs (https://golang.org/pkg/encoding/gob/).
func GobDecode(data []byte, obj interface{}) error {
return gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)
}
// StringDecode decodes raw bytes b into a string.
func StringDecode(b []byte, obj interface{}) error {
v := obj.(*string)
*v = string(b)
return nil
}
// BytesDecode copies the slice of bytes b into obj.
func BytesDecode(b []byte, obj interface{}) error {
v := obj.(*[]byte)
*v = b[:]
return nil
}
// DecryptDecode returns a decode function that can be passed to NewDecoder when
// decoding an encrypted message (https://godoc.org/gocloud.dev/secrets).
//
// post defaults to BytesDecode. An optional decoder can be passed in to do
// further decode operation based on the decrypted message.
func DecryptDecode(ctx context.Context, k *secrets.Keeper, post Decode) Decode {
return func(b []byte, obj interface{}) error {
decrypted, err := k.Decrypt(ctx, b)
if err != nil {
return err
}
if post == nil {
return BytesDecode(decrypted, obj)
}
return post(decrypted, obj)
}
}
// DecoderByName returns a *Decoder based on decoderName.
// It is intended to be used by VariableURLOpeners in driver packages.
var DecoderByName = decoderByName
// Supported values include:
// - empty string: Returns the default from the URLOpener.Decoder, or
// BytesDecoder if URLOpener.Decoder is nil (which is true if you're
// using the default URLOpener).
// - "bytes": Returns a BytesDecoder; Snapshot.Value will be of type []byte.
// - "jsonmap": Returns a JSON decoder for a map[string]interface{};
// Snapshot.Value will be of type *map[string]interface{}.
// - "string": Returns StringDecoder; Snapshot.Value will be of type string.
// It also supports using "decrypt+<decoderName>" (or "decrypt" for default
// decoder) to decrypt the data before decoding. It uses the secrets package to
// open a keeper by the URL string stored in a envrionment variable
// "RUNTIMEVAR_KEEPER_URL". See https://godoc.org/gocloud.dev/secrets#OpenKeeper
// for more details.
func decoderByName(ctx context.Context, decoderName string, dflt *Decoder) (*Decoder, error) {
// Open a *secrets.Keeper if the decoderName contains "decrypt".
k, decoderName, err := decryptByName(ctx, decoderName)
if err != nil {
return nil, err
}
if dflt == nil {
dflt = BytesDecoder
}
switch decoderName {
case "":
return maybeDecrypt(ctx, k, dflt), nil
case "bytes":
return maybeDecrypt(ctx, k, BytesDecoder), nil
case "jsonmap":
var m map[string]interface{}
return maybeDecrypt(ctx, k, NewDecoder(&m, JSONDecode)), nil
case "string":
return maybeDecrypt(ctx, k, StringDecoder), nil
default:
return nil, fmt.Errorf("unsupported decoder %q", decoderName)
}
}
// decryptByName returns a *secrets.Keeper for decryption when decoderName
// contains "decrypt".
func decryptByName(ctx context.Context, decoderName string) (*secrets.Keeper, string, error) {
if !strings.HasPrefix(decoderName, "decrypt") {
return nil, decoderName, nil
}
keeperURL := os.Getenv("RUNTIMEVAR_KEEPER_URL")
if keeperURL == "" {
return nil, "", errors.New("environment variable RUNTIMEVAR_KEEPER_URL needed to open a *secrets.Keeper for decryption")
}
k, err := secrets.OpenKeeper(ctx, keeperURL)
if err != nil {
return nil, "", err
}
decoderName = strings.TrimPrefix(decoderName, "decrypt")
if decoderName != "" {
decoderName = strings.TrimLeftFunc(decoderName, func(r rune) bool {
return r == ' ' || r == '+'
})
}
// The parsed value is "decrypt <decoderName>".
return k, decoderName, nil
}
func maybeDecrypt(ctx context.Context, k *secrets.Keeper, dec *Decoder) *Decoder {
if k == nil {
return dec
}
return NewDecoder(reflect.New(dec.typ).Elem().Interface(), DecryptDecode(ctx, k, dec.fn))
}
| 1 | 16,610 | Let's keep this line in the docstring, to let end users know they shouldn't be using this directly. | google-go-cloud | go |
@@ -39,13 +39,10 @@ func (p *Protocol) validateCreateStake(ctx context.Context, act *action.CreateSt
if act.Amount().Cmp(p.config.MinStakeAmount) == -1 {
return errors.Wrap(ErrInvalidAmount, "stake amount is less than the minimum requirement")
}
- if act.GasPrice().Sign() < 0 {
- return errors.Wrap(action.ErrGasPrice, "negative value")
- }
if !p.inMemCandidates.ContainsName(act.Candidate()) {
return errors.Wrap(ErrInvalidCanName, "cannot find candidate in candidate center")
}
- return nil
+ return act.AbstractAction.SelfCheck()
}
func (p *Protocol) validateUnstake(ctx context.Context, act *action.Unstake) error { | 1 | // Copyright (c) 2020 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package staking
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
)
// Errors
var (
ErrNilAction = errors.New("action is nil")
ErrInvalidAmount = errors.New("invalid staking amount")
ErrInvalidCanName = errors.New("invalid candidate name")
ErrInvalidOwner = errors.New("invalid owner address")
ErrInvalidOperator = errors.New("invalid operator address")
ErrInvalidSelfStkIndex = errors.New("invalid self-staking bucket index")
ErrMissingField = errors.New("missing data field")
)
func (p *Protocol) validateCreateStake(ctx context.Context, act *action.CreateStake) error {
if act == nil {
return ErrNilAction
}
if !IsValidCandidateName(act.Candidate()) {
return ErrInvalidCanName
}
if act.Amount().Cmp(p.config.MinStakeAmount) == -1 {
return errors.Wrap(ErrInvalidAmount, "stake amount is less than the minimum requirement")
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
if !p.inMemCandidates.ContainsName(act.Candidate()) {
return errors.Wrap(ErrInvalidCanName, "cannot find candidate in candidate center")
}
return nil
}
func (p *Protocol) validateUnstake(ctx context.Context, act *action.Unstake) error {
if act == nil {
return ErrNilAction
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
return nil
}
func (p *Protocol) validateWithdrawStake(ctx context.Context, act *action.WithdrawStake) error {
if act == nil {
return ErrNilAction
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
return nil
}
func (p *Protocol) validateChangeCandidate(ctx context.Context, act *action.ChangeCandidate) error {
if act == nil {
return ErrNilAction
}
if !IsValidCandidateName(act.Candidate()) {
return ErrInvalidCanName
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
if !p.inMemCandidates.ContainsName(act.Candidate()) {
return errors.Wrap(ErrInvalidCanName, "cannot find candidate in candidate center")
}
return nil
}
func (p *Protocol) validateTransferStake(ctx context.Context, act *action.TransferStake) error {
if act == nil {
return ErrNilAction
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
return nil
}
func (p *Protocol) validateDepositToStake(ctx context.Context, act *action.DepositToStake) error {
if act == nil {
return ErrNilAction
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
return nil
}
func (p *Protocol) validateRestake(ctx context.Context, act *action.Restake) error {
if act == nil {
return ErrNilAction
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
return nil
}
func (p *Protocol) validateCandidateRegister(ctx context.Context, act *action.CandidateRegister) error {
if act == nil {
return ErrNilAction
}
actCtx := protocol.MustGetActionCtx(ctx)
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
if !IsValidCandidateName(act.Name()) {
return ErrInvalidCanName
}
if act.Amount().Cmp(p.config.RegistrationConsts.MinSelfStake) < 0 {
return errors.Wrap(ErrInvalidAmount, "self staking amount is not valid")
}
owner := actCtx.Caller
if act.OwnerAddress() != nil {
owner = act.OwnerAddress()
}
if c := p.inMemCandidates.GetByOwner(owner); c != nil {
// an existing owner, but selfstake is 0
if c.SelfStake.Cmp(big.NewInt(0)) != 0 {
return ErrInvalidOwner
}
if act.Name() != c.Name && p.inMemCandidates.ContainsName(act.Name()) {
return ErrInvalidCanName
}
if !address.Equal(act.OperatorAddress(), c.Operator) && p.inMemCandidates.ContainsOperator(act.OperatorAddress()) {
return ErrInvalidOperator
}
return nil
}
// cannot collide with existing name
if p.inMemCandidates.ContainsName(act.Name()) {
return ErrInvalidCanName
}
// cannot collide with existing operator address
if p.inMemCandidates.ContainsOperator(act.OperatorAddress()) {
return ErrInvalidOperator
}
return nil
}
func (p *Protocol) validateCandidateUpdate(ctx context.Context, act *action.CandidateUpdate) error {
actCtx := protocol.MustGetActionCtx(ctx)
if act == nil {
return ErrNilAction
}
if act.GasPrice().Sign() < 0 {
return errors.Wrap(action.ErrGasPrice, "negative value")
}
if len(act.Name()) != 0 {
if !IsValidCandidateName(act.Name()) {
return ErrInvalidCanName
}
}
// only owner can update candidate
c := p.inMemCandidates.GetByOwner(actCtx.Caller)
if c == nil {
return ErrInvalidOwner
}
// cannot collide with existing name
if len(act.Name()) != 0 && act.Name() != c.Name && p.inMemCandidates.ContainsName(act.Name()) {
return ErrInvalidCanName
}
// cannot collide with existing operator address
if act.OperatorAddress() != nil && !address.Equal(act.OperatorAddress(), c.Operator) && p.inMemCandidates.ContainsOperator(act.OperatorAddress()) {
return ErrInvalidOperator
}
return nil
}
// IsValidCandidateName check if a candidate name string is valid.
func IsValidCandidateName(s string) bool {
if len(s) == 0 || len(s) > 12 {
return false
}
for _, c := range s {
if !(('a' <= c && c <= 'z') || ('0' <= c && c <= '9')) {
return false
}
}
return true
}
| 1 | 21,463 | can do the same for other validateXXX() | iotexproject-iotex-core | go |
@@ -512,7 +512,7 @@ ReturnCode_t PublisherImpl::wait_for_acknowledgments(
const DomainParticipant* PublisherImpl::get_participant() const
{
- return participant_->get_participant();
+ return const_cast<const DomainParticipantImpl*>(participant_)->get_participant();
}
const Publisher* PublisherImpl::get_publisher() const | 1 | // Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* PublisherImpl.cpp
*
*/
#include <fastdds/publisher/PublisherImpl.hpp>
#include <fastdds/publisher/DataWriterImpl.hpp>
#include <fastdds/domain/DomainParticipantImpl.hpp>
#include <fastdds/topic/TopicDescriptionImpl.hpp>
#include <fastdds/dds/publisher/Publisher.hpp>
#include <fastdds/dds/publisher/PublisherListener.hpp>
#include <fastdds/dds/publisher/DataWriter.hpp>
#include <fastdds/dds/publisher/qos/DataWriterQos.hpp>
#include <fastdds/dds/domain/DomainParticipantListener.hpp>
#include <fastdds/dds/topic/TypeSupport.hpp>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastdds/dds/log/Log.hpp>
#include <fastrtps/attributes/PublisherAttributes.h>
#include <fastrtps/xmlparser/XMLProfileManager.h>
#include <functional>
namespace eprosima {
namespace fastdds {
namespace dds {
using fastrtps::xmlparser::XMLProfileManager;
using fastrtps::xmlparser::XMLP_ret;
using fastrtps::rtps::InstanceHandle_t;
using fastrtps::Duration_t;
using fastrtps::PublisherAttributes;
static void set_qos_from_attributes(
DataWriterQos& qos,
const PublisherAttributes& attr)
{
qos.writer_resource_limits().matched_subscriber_allocation = attr.matched_subscriber_allocation;
qos.properties() = attr.properties;
qos.throughput_controller() = attr.throughputController;
qos.endpoint().unicast_locator_list = attr.unicastLocatorList;
qos.endpoint().multicast_locator_list = attr.multicastLocatorList;
qos.endpoint().remote_locator_list = attr.remoteLocatorList;
qos.endpoint().history_memory_policy = attr.historyMemoryPolicy;
qos.endpoint().user_defined_id = attr.getUserDefinedID();
qos.endpoint().entity_id = attr.getEntityID();
qos.reliable_writer_qos().times = attr.times;
qos.reliable_writer_qos().disable_positive_acks = attr.qos.m_disablePositiveACKs;
qos.durability() = attr.qos.m_durability;
qos.durability_service() = attr.qos.m_durabilityService;
qos.deadline() = attr.qos.m_deadline;
qos.latency_budget() = attr.qos.m_latencyBudget;
qos.liveliness() = attr.qos.m_liveliness;
qos.reliability() = attr.qos.m_reliability;
qos.lifespan() = attr.qos.m_lifespan;
qos.user_data().setValue(attr.qos.m_userData);
qos.ownership() = attr.qos.m_ownership;
qos.ownership_strength() = attr.qos.m_ownershipStrength;
qos.destination_order() = attr.qos.m_destinationOrder;
qos.representation() = attr.qos.representation;
qos.publish_mode() = attr.qos.m_publishMode;
qos.history() = attr.topic.historyQos;
qos.resource_limits() = attr.topic.resourceLimitsQos;
}
PublisherImpl::PublisherImpl(
DomainParticipantImpl* p,
const PublisherQos& qos,
PublisherListener* listen)
: participant_(p)
, qos_(&qos == &PUBLISHER_QOS_DEFAULT ? participant_->get_default_publisher_qos() : qos)
, listener_(listen)
, publisher_listener_(this)
, user_publisher_(nullptr)
, rtps_participant_(p->rtps_participant())
, default_datawriter_qos_(DATAWRITER_QOS_DEFAULT)
{
PublisherAttributes pub_attr;
XMLProfileManager::getDefaultPublisherAttributes(pub_attr);
set_qos_from_attributes(default_datawriter_qos_, pub_attr);
}
ReturnCode_t PublisherImpl::enable()
{
if (qos_.entity_factory().autoenable_created_entities)
{
std::lock_guard<std::mutex> lock(mtx_writers_);
for (auto topic_writers : writers_)
{
for (DataWriterImpl* dw : topic_writers.second)
{
dw->user_datawriter_->enable();
}
}
}
return ReturnCode_t::RETCODE_OK;
}
void PublisherImpl::disable()
{
set_listener(nullptr);
user_publisher_->set_listener(nullptr);
{
std::lock_guard<std::mutex> lock(mtx_writers_);
for (auto it = writers_.begin(); it != writers_.end(); ++it)
{
for (DataWriterImpl* dw : it->second)
{
dw->disable();
}
}
}
}
PublisherImpl::~PublisherImpl()
{
{
std::lock_guard<std::mutex> lock(mtx_writers_);
for (auto it = writers_.begin(); it != writers_.end(); ++it)
{
for (DataWriterImpl* dw : it->second)
{
delete dw;
}
}
writers_.clear();
}
delete user_publisher_;
}
const PublisherQos& PublisherImpl::get_qos() const
{
return qos_;
}
ReturnCode_t PublisherImpl::set_qos(
const PublisherQos& qos)
{
bool enabled = user_publisher_->is_enabled();
const PublisherQos& qos_to_set = (&qos == &PUBLISHER_QOS_DEFAULT) ?
participant_->get_default_publisher_qos() : qos;
if (&qos != &PUBLISHER_QOS_DEFAULT)
{
ReturnCode_t ret_val = check_qos(qos_to_set);
if (!ret_val)
{
return ret_val;
}
}
if (enabled && !can_qos_be_updated(qos_, qos_to_set))
{
return ReturnCode_t::RETCODE_IMMUTABLE_POLICY;
}
set_qos(qos_, qos_to_set, !enabled);
if (enabled)
{
std::lock_guard<std::mutex> lock(mtx_writers_);
for (auto topic_writers : writers_)
{
for (auto writer : topic_writers.second)
{
writer->publisher_qos_updated();
}
}
}
return ReturnCode_t::RETCODE_OK;
}
const PublisherListener* PublisherImpl::get_listener() const
{
return listener_;
}
ReturnCode_t PublisherImpl::set_listener(
PublisherListener* listener)
{
listener_ = listener;
return ReturnCode_t::RETCODE_OK;
}
void PublisherImpl::PublisherWriterListener::on_publication_matched(
DataWriter* writer,
const PublicationMatchedStatus& info)
{
if (publisher_->listener_ != nullptr)
{
publisher_->listener_->on_publication_matched(writer, info);
}
}
void PublisherImpl::PublisherWriterListener::on_liveliness_lost(
DataWriter* writer,
const LivelinessLostStatus& status)
{
if (publisher_->listener_ != nullptr)
{
publisher_->listener_->on_liveliness_lost(writer, status);
}
}
void PublisherImpl::PublisherWriterListener::on_offered_deadline_missed(
DataWriter* writer,
const fastrtps::OfferedDeadlineMissedStatus& status)
{
if (publisher_->listener_ != nullptr)
{
publisher_->listener_->on_offered_deadline_missed(writer, status);
}
}
DataWriter* PublisherImpl::create_datawriter(
Topic* topic,
const DataWriterQos& qos,
DataWriterListener* listener,
const StatusMask& mask)
{
logInfo(PUBLISHER, "CREATING WRITER IN TOPIC: " << topic->get_name());
//Look for the correct type registration
TypeSupport type_support = participant_->find_type(topic->get_type_name());
/// Preconditions
// Check the type was registered.
if (type_support.empty())
{
logError(PUBLISHER, "Type: " << topic->get_type_name() << " Not Registered");
return nullptr;
}
if (!DataWriterImpl::check_qos(qos))
{
return nullptr;
}
topic->get_impl()->reference();
DataWriterImpl* impl = new DataWriterImpl(
this,
type_support,
topic,
qos,
listener);
DataWriter* writer = new DataWriter(impl, mask);
impl->user_datawriter_ = writer;
{
std::lock_guard<std::mutex> lock(mtx_writers_);
writers_[topic->get_name()].push_back(impl);
}
if (user_publisher_->is_enabled() && qos_.entity_factory().autoenable_created_entities)
{
if (ReturnCode_t::RETCODE_OK != writer->enable())
{
delete_datawriter(writer);
return nullptr;
}
}
return writer;
}
DataWriter* PublisherImpl::create_datawriter_with_profile(
Topic* topic,
const std::string& profile_name,
DataWriterListener* listener,
const StatusMask& mask)
{
// TODO (ILG): Change when we have full XML support for DDS QoS profiles
PublisherAttributes attr;
if (XMLP_ret::XML_OK == XMLProfileManager::fillPublisherAttributes(profile_name, attr))
{
DataWriterQos qos = default_datawriter_qos_;
set_qos_from_attributes(qos, attr);
return create_datawriter(topic, qos, listener, mask);
}
return nullptr;
}
ReturnCode_t PublisherImpl::delete_datawriter(
DataWriter* writer)
{
if (user_publisher_ != writer->get_publisher())
{
return ReturnCode_t::RETCODE_PRECONDITION_NOT_MET;
}
std::unique_lock<std::mutex> lock(mtx_writers_);
auto vit = writers_.find(writer->get_topic()->get_name());
if (vit != writers_.end())
{
auto dw_it = std::find(vit->second.begin(), vit->second.end(), writer->impl_);
if (dw_it != vit->second.end())
{
//First extract the writer from the maps to free the mutex
DataWriterImpl* writer_impl = *dw_it;
ReturnCode_t ret_code = writer_impl->check_delete_preconditions();
if (!ret_code)
{
return ret_code;
}
writer_impl->set_listener(nullptr);
vit->second.erase(dw_it);
if (vit->second.empty())
{
writers_.erase(vit);
}
lock.unlock();
//Now we can delete it
writer_impl->get_topic()->get_impl()->dereference();
delete (writer_impl);
return ReturnCode_t::RETCODE_OK;
}
}
return ReturnCode_t::RETCODE_ERROR;
}
DataWriter* PublisherImpl::lookup_datawriter(
const std::string& topic_name) const
{
std::lock_guard<std::mutex> lock(mtx_writers_);
auto it = writers_.find(topic_name);
if (it != writers_.end() && it->second.size() > 0)
{
return it->second.front()->user_datawriter_;
}
return nullptr;
}
bool PublisherImpl::get_datawriters(
std::vector<DataWriter*>& writers) const
{
std::lock_guard<std::mutex> lock(mtx_writers_);
for (auto vit : writers_)
{
for (DataWriterImpl* dw : vit.second)
{
writers.push_back(dw->user_datawriter_);
}
}
return true;
}
bool PublisherImpl::has_datawriters() const
{
if (writers_.empty())
{
return false;
}
return true;
}
bool PublisherImpl::contains_entity(
const fastrtps::rtps::InstanceHandle_t& handle) const
{
std::lock_guard<std::mutex> lock(mtx_writers_);
for (auto vit : writers_)
{
for (DataWriterImpl* dw : vit.second)
{
InstanceHandle_t h(dw->guid());
if (h == handle)
{
return true;
}
}
}
return false;
}
/* TODO
bool PublisherImpl::suspend_publications()
{
logError(PUBLISHER, "Operation not implemented");
return false;
}
*/
/* TODO
bool PublisherImpl::resume_publications()
{
logError(PUBLISHER, "Operation not implemented");
return false;
}
*/
/* TODO
bool PublisherImpl::begin_coherent_changes()
{
logError(PUBLISHER, "Operation not implemented");
return false;
}
*/
/* TODO
bool PublisherImpl::end_coherent_changes()
{
logError(PUBLISHER, "Operation not implemented");
return false;
}
*/
ReturnCode_t PublisherImpl::set_default_datawriter_qos(
const DataWriterQos& qos)
{
if (&qos == &DATAWRITER_QOS_DEFAULT)
{
reset_default_datawriter_qos();
return ReturnCode_t::RETCODE_OK;
}
ReturnCode_t ret_val = DataWriterImpl::check_qos(qos);
if (!ret_val)
{
return ret_val;
}
DataWriterImpl::set_qos(default_datawriter_qos_, qos, true);
return ReturnCode_t::RETCODE_OK;
}
void PublisherImpl::reset_default_datawriter_qos()
{
// TODO (ILG): Change when we have full XML support for DDS QoS profiles
DataWriterImpl::set_qos(default_datawriter_qos_, DATAWRITER_QOS_DEFAULT, true);
PublisherAttributes attr;
XMLProfileManager::getDefaultPublisherAttributes(attr);
set_qos_from_attributes(default_datawriter_qos_, attr);
}
const DataWriterQos& PublisherImpl::get_default_datawriter_qos() const
{
return default_datawriter_qos_;
}
const ReturnCode_t PublisherImpl::get_datawriter_qos_from_profile(
const std::string& profile_name,
DataWriterQos& qos) const
{
PublisherAttributes attr;
if (XMLP_ret::XML_OK == XMLProfileManager::fillPublisherAttributes(profile_name, attr))
{
qos = default_datawriter_qos_;
set_qos_from_attributes(qos, attr);
return ReturnCode_t::RETCODE_OK;
}
return ReturnCode_t::RETCODE_BAD_PARAMETER;
}
/* TODO
bool PublisherImpl::copy_from_topic_qos(
fastrtps::WriterQos&,
const fastrtps::TopicAttributes&) const
{
logError(PUBLISHER, "Operation not implemented");
return false;
}
*/
ReturnCode_t PublisherImpl::wait_for_acknowledgments(
const Duration_t& max_wait)
{
Duration_t current = max_wait;
Duration_t begin, end;
std::lock_guard<std::mutex> lock(mtx_writers_);
for (auto& vit : writers_)
{
for (DataWriterImpl* dw : vit.second)
{
participant_->get_current_time(begin);
if (!dw->wait_for_acknowledgments(current))
{
return ReturnCode_t::RETCODE_ERROR;
}
// Check ellapsed time and decrement
participant_->get_current_time(end);
current = current - (end - begin);
if (current < fastrtps::c_TimeZero)
{
return ReturnCode_t::RETCODE_TIMEOUT;
}
}
}
return ReturnCode_t::RETCODE_OK;
}
const DomainParticipant* PublisherImpl::get_participant() const
{
return participant_->get_participant();
}
const Publisher* PublisherImpl::get_publisher() const
{
return user_publisher_;
}
/* TODO
bool PublisherImpl::delete_contained_entities()
{
logError(PUBLISHER, "Operation not implemented");
return false;
}
*/
const InstanceHandle_t& PublisherImpl::get_instance_handle() const
{
return handle_;
}
bool PublisherImpl::type_in_use(
const std::string& type_name) const
{
for (auto it : writers_)
{
for (DataWriterImpl* writer : it.second)
{
if (writer->get_topic()->get_type_name() == type_name)
{
return true; // Is in use
}
}
}
return false;
}
void PublisherImpl::set_qos(
PublisherQos& to,
const PublisherQos& from,
bool first_time)
{
if (first_time && !(to.presentation() == from.presentation()))
{
to.presentation(from.presentation());
to.presentation().hasChanged = true;
}
if (!(to.partition() == from.partition()))
{
to.partition() = from.partition();
to.partition().hasChanged = true;
}
if (!(to.group_data() == from.group_data()))
{
to.group_data() = from.group_data();
to.group_data().hasChanged = true;
}
if (!(to.entity_factory() == from.entity_factory()))
{
to.entity_factory() = from.entity_factory();
}
}
ReturnCode_t PublisherImpl::check_qos(
const PublisherQos& qos)
{
(void) qos;
return ReturnCode_t::RETCODE_OK;
}
bool PublisherImpl::can_qos_be_updated(
const PublisherQos& to,
const PublisherQos& from)
{
(void) to;
(void) from;
return true;
}
PublisherListener* PublisherImpl::get_listener_for(
const StatusMask& status)
{
if (listener_ != nullptr &&
user_publisher_->get_status_mask().is_active(status))
{
return listener_;
}
return participant_->get_listener_for(status);
}
} // dds
} // fastdds
} // eprosima
| 1 | 20,819 | Why is this necessary? | eProsima-Fast-DDS | cpp |
@@ -21,6 +21,7 @@ import os
import sys
import sphinx
from sphinx.errors import VersionRequirementError
+import datetime
curr_path = os.path.dirname(os.path.realpath(__file__))
libpath = os.path.join(curr_path, '../python-package/') | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# LightGBM documentation build configuration file, created by
# sphinx-quickstart on Thu May 4 14:30:58 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx
from sphinx.errors import VersionRequirementError
curr_path = os.path.dirname(os.path.realpath(__file__))
libpath = os.path.join(curr_path, '../python-package/')
sys.path.insert(0, libpath)
# -- mock out modules
try:
from unittest.mock import Mock # Python 3.x
except ImportError:
from mock import Mock # Python 2.x
MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse',
'sklearn', 'matplotlib', 'pandas', 'graphviz',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -- General configuration ------------------------------------------------
os.environ['LIGHTGBM_BUILD_DOC'] = '1'
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3' # Due to sphinx.ext.napoleon
if needs_sphinx > sphinx.__version__:
message = 'This project needs at least Sphinx v%s' % needs_sphinx
raise VersionRequirementError(message)
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'LightGBM'
copyright = '2017, Microsoft Corporation'
author = 'Microsoft Corporation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Both the class' and the __init__ method's docstring are concatenated and inserted.
autoclass_content = 'both'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'includehidden': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LightGBMdoc'
def setup(app):
app.add_javascript("js/script.js")
| 1 | 19,124 | Please move this import upper to other similar imports | microsoft-LightGBM | cpp |
@@ -1749,6 +1749,14 @@ translate_from_synchall_to_dispatch(thread_record_t *tr, thread_synch_state_t sy
arch_mcontext_reset_stolen_reg(dcontext, mc);
}
});
+ IF_AARCHXX({
+ // XXX i#4495: Consider saving stolen reg's application value.
+ set_stolen_reg_val(mc, (reg_t)os_get_dr_tls_base(dcontext));
+ // XXX: This path is tested by linux.thread-reset and linux.clone-reset.
+ // We just haven't run those on ARM yet.
+ IF_ARM(ASSERT_NOT_TESTED());
+ });
+
/* We send all threads, regardless of whether was in DR or not, to
* re-interp from translated cxt, to avoid having to handle stale
* local state problems if we simply resumed. | 1 | /* **********************************************************
* Copyright (c) 2012-2020 Google, Inc. All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* thread.c - thread synchronization
*/
#include "globals.h"
#include "synch.h"
#include "instrument.h" /* is_in_client_lib() */
#include "hotpatch.h" /* hotp_only_in_tramp() */
#include "fragment.h" /* get_at_syscall() */
#include "fcache.h" /* in_fcache() */
#include "translate.h"
#include "native_exec.h"
extern vm_area_vector_t *fcache_unit_areas; /* from fcache.c */
static bool started_detach = false; /* set before synchall */
bool doing_detach = false; /* set after synchall */
static void
synch_thread_yield(void);
/* Thread-local data
*/
typedef struct _thread_synch_data_t {
/* the following three fields are used to synchronize for detach, suspend
* thread, terminate thread, terminate process */
/* synch_lock and pending_synch_count act as a semaphore */
/* for check_wait_at_safe_spot() must use a spin_mutex_t */
spin_mutex_t *synch_lock;
/* we allow pending_synch_count to be read without holding the synch_lock
* so all updates should be ATOMIC as well as holding the lock */
int pending_synch_count;
/* To guarantee that the thread really has this permission you need to hold the
* synch_lock when you read this value. If the target thread is suspended, use a
* trylock, as it could have been suspended while holding synch_lock (i#2805).
*/
thread_synch_permission_t synch_perm;
/* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set
* to whether synch_with_all_threads was successful in synching this thread.
*/
bool synch_with_success;
/* Case 10101: allows threads waiting_at_safe_spot() to set their own
* contexts. This use sometimes requires a full os-specific context, which
* we hide behind a generic pointer and a size.
*/
priv_mcontext_t *set_mcontext;
void *set_context;
size_t set_context_size;
#ifdef X64
/* PR 263338: we have to pad for alignment */
byte *set_context_alloc;
#endif
} thread_synch_data_t;
/* This lock prevents more than one thread from being in the synch_with_all_
* threads method body at the same time (which would lead to deadlock as they
* tried to synchronize with each other)
*/
DECLARE_CXTSWPROT_VAR(mutex_t all_threads_synch_lock,
INIT_LOCK_FREE(all_threads_synch_lock));
/* pass either mc or both cxt and cxt_size */
static void
free_setcontext(priv_mcontext_t *mc, void *cxt, size_t cxt_size _IF_X64(byte *cxt_alloc))
{
if (mc != NULL) {
ASSERT(cxt == NULL);
global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER));
} else if (cxt != NULL) {
ASSERT(cxt_size > 0);
global_heap_free(IF_X64_ELSE(cxt_alloc, cxt), cxt_size HEAPACCT(ACCT_OTHER));
}
}
static void
synch_thread_free_setcontext(thread_synch_data_t *tsd)
{
free_setcontext(tsd->set_mcontext, tsd->set_context,
tsd->set_context_size _IF_X64(tsd->set_context_alloc));
tsd->set_mcontext = NULL;
tsd->set_context = NULL;
}
void
synch_init(void)
{
}
void
synch_exit(void)
{
ASSERT(uninit_thread_count == 0);
DELETE_LOCK(all_threads_synch_lock);
}
void
synch_thread_init(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)heap_alloc(
dcontext, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER));
dcontext->synch_field = (void *)tsd;
tsd->pending_synch_count = 0;
tsd->synch_perm = THREAD_SYNCH_NONE;
tsd->synch_with_success = false;
tsd->set_mcontext = NULL;
tsd->set_context = NULL;
/* the synch_lock is in unprotected memory so that check_wait_at_safe_spot
* can call the EXITING_DR hook before releasing it */
tsd->synch_lock = HEAP_TYPE_ALLOC(dcontext, spin_mutex_t, ACCT_OTHER, UNPROTECTED);
ASSIGN_INIT_SPINMUTEX_FREE(*tsd->synch_lock, synch_lock);
}
void
synch_thread_exit(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* Could be waiting at safe spot when we detach or exit */
synch_thread_free_setcontext(tsd);
DELETE_SPINMUTEX(*tsd->synch_lock);
/* Note that we do need to free this in non-debug builds since, despite
* appearances, UNPROTECTED_LOCAL is acutally allocated on a global
* heap. */
HEAP_TYPE_FREE(dcontext, tsd->synch_lock, spin_mutex_t, ACCT_OTHER, UNPROTECTED);
#ifdef DEBUG
/* for non-debug we do fast exit path and don't free local heap */
/* clean up tsd fields here */
heap_free(dcontext, tsd, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER));
#endif
}
/* Check for a no-xfer permission. Currently used only for case 6821,
* where we need to distinguish three groups: unsafe (wait for safe
* point), safe and translatable, and safe but not translatable.
*/
bool
thread_synch_state_no_xfer(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* We use a trylock in case the thread is suspended holding synch_lock (i#2805). */
if (spinmutex_trylock(tsd->synch_lock)) {
bool res = (tsd->synch_perm == THREAD_SYNCH_NO_LOCKS_NO_XFER ||
tsd->synch_perm == THREAD_SYNCH_VALID_MCONTEXT_NO_XFER);
spinmutex_unlock(tsd->synch_lock);
return res;
}
return false;
}
bool
thread_synch_check_state(dcontext_t *dcontext, thread_synch_permission_t desired_perm)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* We support calling this routine from our signal handler when it has interrupted
* DR and might be holding tsd->synch_lock or other locks.
* We first check synch_perm w/o a lock and if it's not at least
* THREAD_SYNCH_NO_LOCKS we do not attempt to grab synch_lock (we'd hit rank order
* violations). If that check passes, the only problematic lock is if we already
* hold synch_lock, so we use test and trylocks there.
*/
if (desired_perm < THREAD_SYNCH_NO_LOCKS) {
ASSERT(desired_perm == THREAD_SYNCH_NONE);
return true;
}
if (!THREAD_SYNCH_SAFE(tsd->synch_perm, desired_perm))
return false;
/* barrier to keep the 1st check above on this side of the lock below */
#ifdef WINDOWS
MemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
#endif
/* We use a trylock in case the thread is suspended holding synch_lock (i#2805).
* We start with testlock to avoid recursive lock assertions.
*/
if (!spinmutex_testlock(tsd->synch_lock) && spinmutex_trylock(tsd->synch_lock)) {
bool res = THREAD_SYNCH_SAFE(tsd->synch_perm, desired_perm);
spinmutex_unlock(tsd->synch_lock);
return res;
}
return false;
}
/* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set to
* whether synch_with_all_threads was successful in synching this thread.
* Cannot be called when THREAD_SYNCH_*_AND_CLEANED was requested as the
* thread-local memory will be freed on success!
*/
bool
thread_synch_successful(thread_record_t *tr)
{
thread_synch_data_t *tsd;
ASSERT(tr != NULL && tr->dcontext != NULL);
ASSERT_OWN_MUTEX(true, &all_threads_synch_lock);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
tsd = (thread_synch_data_t *)tr->dcontext->synch_field;
return tsd->synch_with_success;
}
#ifdef UNIX
/* i#2659: the kernel is now doing auto-restart so we have to check for the
* pc being at the syscall.
*/
static bool
is_after_or_restarted_do_syscall(dcontext_t *dcontext, app_pc pc, bool check_vsyscall)
{
if (is_after_do_syscall_addr(dcontext, pc))
return true;
if (check_vsyscall && pc == vsyscall_sysenter_return_pc)
return true;
if (!get_at_syscall(dcontext)) /* rule out having just reached the syscall */
return false;
int syslen = syscall_instr_length(dr_get_isa_mode(dcontext));
if (is_after_do_syscall_addr(dcontext, pc + syslen))
return true;
if (check_vsyscall && pc + syslen == vsyscall_sysenter_return_pc)
return true;
return false;
}
#endif
bool
is_at_do_syscall(dcontext_t *dcontext, app_pc pc, byte *esp)
{
app_pc buf[2];
bool res = d_r_safe_read(esp, sizeof(buf), buf);
if (!res) {
ASSERT(res); /* we expect the stack to always be readable */
return false;
}
if (does_syscall_ret_to_callsite()) {
#ifdef WINDOWS
if (get_syscall_method() == SYSCALL_METHOD_INT && DYNAMO_OPTION(sygate_int)) {
return (pc == after_do_syscall_addr(dcontext) &&
buf[0] == after_do_syscall_code(dcontext));
} else {
return pc == after_do_syscall_code(dcontext);
}
#else
return is_after_or_restarted_do_syscall(dcontext, pc, false /*!vsys*/);
#endif
} else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
#ifdef WINDOWS
if (pc == vsyscall_after_syscall) {
if (DYNAMO_OPTION(sygate_sysenter))
return buf[1] == after_do_syscall_code(dcontext);
else
return buf[0] == after_do_syscall_code(dcontext);
} else {
/* not at a system call, could still have tos match after_do_syscall
* either by chance or because we leak that value on the apps stack
* (a non transparency) */
ASSERT_CURIOSITY(buf[0] != after_do_syscall_code(dcontext));
return false;
}
#else
/* Even when the main syscall method is sysenter, we also have a
* do_int_syscall and do_clone_syscall that use int, so check both.
* Note that we don't modify the stack, so once we do sysenter syscalls
* inlined in the cache (PR 288101) we'll need some mechanism to
* distinguish those: but for now if a sysenter instruction is used it
* has to be do_syscall since DR's own syscalls are ints.
*/
return is_after_or_restarted_do_syscall(dcontext, pc, true /*vsys*/);
#endif
}
/* we can reach here w/ a fault prior to 1st syscall on Linux */
IF_WINDOWS(ASSERT_NOT_REACHED());
return false;
}
/* Helper function for at_safe_spot(). Note state for client-owned threads isn't
* considered valid since it may be holding client locks and doesn't correspond to
* an actual app state. Caller should handle client-owned threads appropriately. */
static bool
is_native_thread_state_valid(dcontext_t *dcontext, app_pc pc, byte *esp)
{
/* ref case 3675, the assumption is that if we aren't executing
* out of dr memory and our stack isn't in dr memory (to disambiguate
* pc in kernel32, ntdll etc.) then the app has a valid native context.
* However, we can't call is_dynamo_address() as it (and its children)
* grab too many different locks, all of which we would have to check
* here in the same manner as fcache_unit_areas.lock in at_safe_spot(). So
* instead we just check the pc for the dr dll, interception code, and
* do_syscall regions and check the stack against the thread's dr stack
* and the d_r_initstack, all of which we can do without grabbing any locks.
* That should be sufficient at this point, FIXME try to use something
* like is_dynamo_address() to make this more maintainable */
/* For sysenter system calls we also have to check the top of the stack
* for the after_do_syscall_address to catch the do_syscall @ syscall
* itself case. */
ASSERT(esp != NULL);
ASSERT(is_thread_currently_native(dcontext->thread_record));
#ifdef WINDOWS
if (pc == (app_pc)thread_attach_takeover) {
/* We are trying to take over this thread but it has not yet been
* scheduled. It was native, and can't hold any DR locks.
*/
return true;
}
#endif
return (!is_in_dynamo_dll(pc) &&
IF_WINDOWS(!is_part_of_interception(pc) &&)(
!in_generated_routine(dcontext, pc) ||
/* we allow native thread to be at do_syscall - for int syscalls the pc
* (syscall return point) will be in do_syscall (so in generated routine)
* xref case 9333 */
is_at_do_syscall(dcontext, pc, esp)) &&
!is_on_initstack(esp) && !is_on_dstack(dcontext, esp) &&
IF_CLIENT_INTERFACE(!is_in_client_lib(pc) &&)
/* xref PR 200067 & 222812 on client-owned native threads */
IF_CLIENT_INTERFACE(!IS_CLIENT_THREAD(dcontext) &&)
#ifdef HOT_PATCHING_INTERFACE
/* Shouldn't be in the middle of executing a hotp_only patch. The
* check for being in hotp_dll is DR_WHERE_HOTPATCH because the patch can
* change esp.
*/
(dcontext->whereami != DR_WHERE_HOTPATCH &&
/* dynamo dll check has been done */
!hotp_only_in_tramp(pc)) &&
#endif
true /* no effect, simplifies ifdef handling with && above */
);
}
/* Translates the context mcontext for the given thread trec. If
* restore_memory is true, also restores any memory values that were
* shifted (primarily due to clients). If restore_memory is true, the
* caller should always relocate the translated thread, as it may not
* execute properly if left at its current location (it could be in the
* middle of client code in the cache).
* If recreate_app_state() is called, f will be passed through to it.
*
* Like any instance where a thread_record_t is used by a thread other than its
* owner, the caller must hold the thread_initexit_lock to ensure that it
* remains valid.
* Requires thread trec is at_safe_spot().
*/
bool
translate_mcontext(thread_record_t *trec, priv_mcontext_t *mcontext, bool restore_memory,
fragment_t *f)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field;
bool res;
recreate_success_t success;
bool native_translate = false;
ASSERT(tsd->pending_synch_count >= 0);
/* check if native thread */
if (is_thread_currently_native(trec)) {
/* running natively, no need to translate unless at do_syscall for an
* intercepted-via-trampoline syscall which we allow now for case 9333 */
#ifdef CLIENT_INTERFACE
if (IS_CLIENT_THREAD(trec->dcontext)) {
/* don't need to translate anything */
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " is client "
"thread, no translation needed\n",
trec->id);
return true;
}
#endif
if (is_native_thread_state_valid(trec->dcontext, (app_pc)mcontext->pc,
(byte *)mcontext->xsp)) {
#ifdef WINDOWS
if ((app_pc)mcontext->pc == (app_pc)thread_attach_takeover) {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " at "
"takeover point\n",
trec->id);
thread_attach_translate(trec->dcontext, mcontext, restore_memory);
return true;
}
#endif
if (is_at_do_syscall(trec->dcontext, (app_pc)mcontext->pc,
(byte *)mcontext->xsp)) {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " running "
"natively, at do_syscall so translation needed\n",
trec->id);
native_translate = true;
} else {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " running "
"natively, no translation needed\n",
trec->id);
return true;
}
} else {
/* now that do_syscall is a safe spot for native threads we shouldn't get
* here for get context on self, FIXME - is however possible to get here
* via get_context on unsuspended thread (result of which is technically
* undefined according to MS), see get_context post sys comments
* (should prob. synch there in which case can assert here) */
ASSERT(trec->id != d_r_get_thread_id());
ASSERT_CURIOSITY(false &&
"translate failure, likely get context on "
"unsuspended native thread");
/* we'll just try to translate and hope for the best */
native_translate = true;
}
}
if (!native_translate) {
/* check if waiting at a good spot */
spinmutex_lock(tsd->synch_lock);
res = THREAD_SYNCH_SAFE(tsd->synch_perm, THREAD_SYNCH_VALID_MCONTEXT);
spinmutex_unlock(tsd->synch_lock);
if (res) {
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " waiting at "
"valid mcontext point, copying over\n",
trec->id);
DOLOG(2, LOG_SYNCH, {
LOG(THREAD_GET, LOG_SYNCH, 2, "Thread State\n");
dump_mcontext(get_mcontext(trec->dcontext), THREAD_GET, DUMP_NOT_XML);
});
*mcontext = *get_mcontext(trec->dcontext);
#ifdef CLIENT_INTERFACE
if (dr_xl8_hook_exists()) {
if (!instrument_restore_nonfcache_state(trec->dcontext, true, mcontext))
return false;
}
#endif
return true;
}
}
/* In case 4148 we see a thread calling NtGetContextThread on itself, which
* is undefined according to MS but it does get the syscall address, so it's
* fine with us. For other threads the app shouldn't be asking about them
* unless they're suspended, and the same goes for us.
*/
ASSERT_CURIOSITY(trec->dcontext->whereami == DR_WHERE_FCACHE ||
trec->dcontext->whereami == DR_WHERE_SIGNAL_HANDLER ||
native_translate || trec->id == d_r_get_thread_id());
LOG(THREAD_GET, LOG_SYNCH, 2,
"translate context, thread " TIDFMT " at pc_recreatable spot translating\n",
trec->id);
success = recreate_app_state(trec->dcontext, mcontext, restore_memory, f);
if (success != RECREATE_SUCCESS_STATE) {
/* should never happen right?
* actually it does when deciding whether can deliver a signal
* immediately (PR 213040).
*/
LOG(THREAD_GET, LOG_SYNCH, 1,
"translate context, thread " TIDFMT " unable to translate context at pc"
" = " PFX "\n",
trec->id, mcontext->pc);
SYSLOG_INTERNAL_WARNING_ONCE("failed to translate");
return false;
}
return true;
}
static bool
waiting_at_safe_spot(thread_record_t *trec, thread_synch_state_t desired_state)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field;
ASSERT(tsd->pending_synch_count >= 0);
/* Check if waiting at a good spot. We can't spin in case the suspended thread is
* holding this lock (e.g., i#2805). We only need the lock to check synch_perm.
*/
if (spinmutex_trylock(tsd->synch_lock)) {
thread_synch_permission_t perm = tsd->synch_perm;
bool res = THREAD_SYNCH_SAFE(perm, desired_state);
spinmutex_unlock(tsd->synch_lock);
if (res) {
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " waiting at safe spot (synch_perm=%d)\n", trec->id,
perm);
return true;
}
} else {
LOG(THREAD_GET, LOG_SYNCH, 2,
"at_safe_spot unable to get locks to test if thread " TIDFMT " is waiting "
"at safe spot\n",
trec->id);
}
return false;
}
#ifdef CLIENT_SIDELINE
static bool
should_suspend_client_thread(dcontext_t *dcontext, thread_synch_state_t desired_state)
{
/* Marking un-suspendable does not apply to cleaning/terminating */
ASSERT(IS_CLIENT_THREAD(dcontext));
return (THREAD_SYNCH_IS_CLEANED(desired_state) || dcontext->client_data->suspendable);
}
#endif
/* checks whether the thread trec is at a spot suitable for requested define
* desired_state
* Requires that trec thread is suspended */
/* Note that since trec is potentially suspended at an arbitrary point,
* this function (and any function it calls) cannot call mutex_lock as
* trec thread may hold a lock. It is ok for at_safe_spot to return false if
* it can't obtain a lock on the first try. FIXME : in the long term we may
* want to go to a locking model that stores the thread id of the owner in
* which case we can check for this situation directly
*/
bool
at_safe_spot(thread_record_t *trec, priv_mcontext_t *mc,
thread_synch_state_t desired_state)
{
bool safe = false;
if (waiting_at_safe_spot(trec, desired_state))
return true;
#ifdef ARM
if (TESTANY(EFLAGS_IT, mc->cpsr)) {
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " not at safe spot (pc=" PFX " in an IT block) for %d\n",
trec->id, mc->pc, desired_state);
return false;
}
#endif
/* check if suspended at good spot */
/* FIXME: right now don't distinguish between suspend and term privileges
* even though suspend is stronger requirement, are the checks below
* sufficient */
/* FIXME : check with respect to flush, should be ok */
/* test fcache_unit_areas.lock (from fcache.c) before calling recreate_app_state
* since it calls in_fcache() which uses the lock (if we are in_fcache()
* assume other locks are not a problem (so is_dynamo_address is fine)) */
/* Right now the only dr code that ends up in the cache is our DLL main
* (which we'll reduce/get rid of with libc independence), our takeover
* from preinject return stack, and the callback.c interception code.
* FIXME : test for just these and ASSERT(!is_dynamo_address) otherwise */
if (is_thread_currently_native(trec)) {
/* thread is running native, verify is not in dr code */
#ifdef CLIENT_INTERFACE
/* We treat client-owned threads (such as a client nudge thread) as native and
* consider them safe if they are in the client_lib. Since they might own client
* locks that could block application threads from progressing, we synchronize
* with them last. FIXME - xref PR 231301 - since we can't disambiguate
* client->ntdll/gencode which is safe from client->dr->ntdll/gencode which isn't
* we disallow both. This could hurt synchronization efficiency if the client
* owned thread spent most of its execution time calling out of its lib to ntdll
* routines or generated code. */
if (IS_CLIENT_THREAD(trec->dcontext)) {
safe = (trec->dcontext->client_data->client_thread_safe_for_synch ||
is_in_client_lib(mc->pc)) &&
/* Do not cleanup/terminate a thread holding a client lock (PR 558463) */
/* Actually, don't consider a thread holding a client lock to be safe
* at all (PR 609569): client should use
* dr_client_thread_set_suspendable(false) if its thread spends a lot
* of time holding locks.
*/
(!should_suspend_client_thread(trec->dcontext, desired_state) ||
trec->dcontext->client_data->mutex_count == 0);
}
#endif
if (is_native_thread_state_valid(trec->dcontext, mc->pc, (byte *)mc->xsp)) {
safe = true;
/* We should always be able to translate a valid native state, but be
* sure to check before thread_attach_exit().
*/
ASSERT(translate_mcontext(trec, mc, false /*just querying*/, NULL));
#ifdef WINDOWS
if (mc->pc == (app_pc)thread_attach_takeover &&
THREAD_SYNCH_IS_CLEANED(desired_state)) {
/* The takeover data will be freed at process exit, but we might
* clean up a thread mid-run, so make sure we free the data.
*/
thread_attach_exit(trec->dcontext, mc);
}
#endif
}
#ifdef CLIENT_INTERFACE
} else if (desired_state == THREAD_SYNCH_TERMINATED_AND_CLEANED &&
trec->dcontext->whereami == DR_WHERE_FCACHE &&
trec->dcontext->client_data->at_safe_to_terminate_syscall) {
/* i#1420: At safe to terminate syscall like dr_sleep in a clean call.
* XXX: A thread in dr_sleep might not be safe to terminate for some
* corner cases: for example, a client may hold a lock and then go sleep,
* terminating it may mess the client up for not releasing the lock.
* We limit this to the thread being in fcache (i.e., from a clean call)
* to rule out some corner cases.
*/
safe = true;
#endif
} else if ((!WRITE_LOCK_HELD(&fcache_unit_areas->lock) &&
/* even though we only need the read lock, if our target holds it
* and a 3rd thread requests the write lock, we'll hang if we
* ask for the read lock (case 7493)
*/
!READ_LOCK_HELD(&fcache_unit_areas->lock)) &&
recreate_app_state(trec->dcontext, mc, false /*just query*/, NULL) ==
RECREATE_SUCCESS_STATE &&
/* It's ok to call is_dynamo_address even though it grabs many
* locks because recreate_app_state succeeded.
*/
!is_dynamo_address(mc->pc)) {
safe = true;
}
if (safe) {
ASSERT(trec->dcontext->whereami == DR_WHERE_FCACHE ||
trec->dcontext->whereami == DR_WHERE_SIGNAL_HANDLER ||
is_thread_currently_native(trec));
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " suspended at safe spot pc=" PFX "\n", trec->id, mc->pc);
return true;
}
LOG(THREAD_GET, LOG_SYNCH, 2,
"thread " TIDFMT " not at safe spot (pc=" PFX ") for %d\n", trec->id, mc->pc,
desired_state);
return false;
}
/* a fast way to tell a thread if it should call check_wait_at_safe_spot
* if translating context would be expensive */
bool
should_wait_at_safe_spot(dcontext_t *dcontext)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
return (tsd->pending_synch_count != 0);
}
/* use with care! normally check_wait_at_safe_spot() should be called instead */
void
set_synch_state(dcontext_t *dcontext, thread_synch_permission_t state)
{
if (state >= THREAD_SYNCH_NO_LOCKS)
ASSERT_OWN_NO_LOCKS();
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
/* We have a wart in the settings here (i#2805): a caller can set
* THREAD_SYNCH_NO_LOCKS, yet here we're acquiring locks. In fact if this thread
* is suspended in between the lock and the unset of synch_perm from
* THREAD_SYNCH_NO_LOCKS back to THREAD_SYNCH_NONE, it can cause problems. We
* have everyone who might query in such a state use a trylock and assume
* synch_perm is THREAD_SYNCH_NONE if the lock cannot be acquired.
*/
spinmutex_lock(tsd->synch_lock);
tsd->synch_perm = state;
spinmutex_unlock(tsd->synch_lock);
}
/* checks to see if any threads are waiting to synch with this one and waits
* if they are
* cur_state - a given permission define from above that describes the current
* state of the caller
* NOTE - Requires the caller is !could_be_linking (i.e. not in an
* enter_couldbelinking state)
*/
void
check_wait_at_safe_spot(dcontext_t *dcontext, thread_synch_permission_t cur_state)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
app_pc pc;
byte cxt[MAX(CONTEXT_HEAP_SIZE_OPAQUE, sizeof(priv_mcontext_t))];
bool set_context = false;
bool set_mcontext = false;
if (tsd->pending_synch_count == 0 || cur_state == THREAD_SYNCH_NONE)
return;
ASSERT(tsd->pending_synch_count >= 0);
pc = get_mcontext(dcontext)->pc;
LOG(THREAD, LOG_SYNCH, 2, "waiting for synch with state %d (pc " PFX ")\n", cur_state,
pc);
if (cur_state == THREAD_SYNCH_VALID_MCONTEXT) {
ASSERT(!is_dynamo_address(pc));
/* for detach must set this here and now */
IF_WINDOWS(IF_CLIENT_INTERFACE(set_last_error(dcontext->app_errno)));
}
spinmutex_lock(tsd->synch_lock);
tsd->synch_perm = cur_state;
/* Since can be killed, suspended, etc. must call the exit dr hook. But, to
* avoid races, we must do so before giving up the synch_lock. This is why
* that lock has to be in unprotected memory. FIXME - for single thread in
* dr this will lead to rank order violation between dr exclusivity lock
* and the synch_lock with no easy workaround (real deadlocks possible).
* Luckily we'll prob. never use that option. */
if (INTERNAL_OPTION(single_thread_in_DR)) {
ASSERT_NOT_IMPLEMENTED(false);
}
EXITING_DR();
/* Ref case 5074, for us/app to successfully SetThreadContext at
* this synch point, this thread can NOT be at a system call. So, for
* case 10101, we instead have threads that are waiting_at_safe_spot()
* set their own contexts, allowing us to make system calls here.
* We don't yet handle the detach case, so it still requires no system
* calls, including the act of releasing the synch_lock
* which is why that lock has to be a user mode spin yield lock.
* FIXME: we could change tsd->synch_lock back to a regular lock
* once we have detach handling system calls here.
*/
spinmutex_unlock(tsd->synch_lock);
while (tsd->pending_synch_count > 0 && tsd->synch_perm != THREAD_SYNCH_NONE) {
STATS_INC_DC(dcontext, synch_loops_wait_safe);
#ifdef WINDOWS
if (started_detach) {
/* We spin for any non-detach synchs encountered during detach
* since we have no flag telling us this synch is for detach. */
/* Ref case 5074, can NOT use os_thread_yield here. This must be a user
* mode spin loop. */
SPINLOCK_PAUSE();
} else {
#endif
/* FIXME case 10100: replace this sleep/yield with a wait_for_event() */
synch_thread_yield();
#ifdef WINDOWS
}
#endif
}
/* Regain the synch_lock before ENTERING_DR to avoid races with getting
* suspended/killed in the middle of ENTERING_DR (before synch_perm is
* reset to NONE). */
/* Ref case 5074, for detach we still can NOT use os_thread_yield here (no system
* calls) so don't allow the spinmutex_lock to yield while grabbing the lock. */
spinmutex_lock_no_yield(tsd->synch_lock);
ENTERING_DR();
tsd->synch_perm = THREAD_SYNCH_NONE;
if (tsd->set_mcontext != NULL || tsd->set_context != NULL) {
IF_WINDOWS(ASSERT(!started_detach));
/* Make a local copy */
ASSERT(sizeof(cxt) >= sizeof(priv_mcontext_t));
if (tsd->set_mcontext != NULL) {
set_mcontext = true;
memcpy(cxt, tsd->set_mcontext, sizeof(*tsd->set_mcontext));
} else {
set_context = true;
memcpy(cxt, tsd->set_context, tsd->set_context_size);
}
synch_thread_free_setcontext(tsd); /* sets to NULL for us */
}
spinmutex_unlock(tsd->synch_lock);
LOG(THREAD, LOG_SYNCH, 2, "done waiting for synch with state %d (pc " PFX ")\n",
cur_state, pc);
if (set_mcontext || set_context) {
/* FIXME: see comment in dispatch.c check_wait_at_safe_spot() call
* about problems with KSTART(fcache_* differences bet the target
* being at the synch point vs in the cache.
*/
if (set_mcontext)
thread_set_self_mcontext((priv_mcontext_t *)cxt);
else
thread_set_self_context((void *)cxt);
ASSERT_NOT_REACHED();
}
}
/* adjusts the pending synch count */
void
adjust_wait_at_safe_spot(dcontext_t *dcontext, int amt)
{
thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field;
ASSERT(tsd->pending_synch_count >= 0);
spinmutex_lock(tsd->synch_lock);
ATOMIC_ADD(int, tsd->pending_synch_count, amt);
spinmutex_unlock(tsd->synch_lock);
}
/* Case 10101: Safely sets the context for a target thread that may be waiting at a
* safe spot, in which case we do not want to directly do a setcontext as the return
* from the yield or wait system call will mess up the state (case 5074).
* Assumes that cxt was allocated on the global heap, and frees it, rather than
* making its own copy (as an optimization).
* Does not work on the executing thread.
* Caller must hold thread_initexit_lock.
* If used on behalf of the app, it's up to the caller to check for privileges.
*/
bool
set_synched_thread_context(thread_record_t *trec,
/* pass either mc or both cxt and cxt_size */
priv_mcontext_t *mc, void *cxt, size_t cxt_size,
thread_synch_state_t desired_state _IF_X64(byte *cxt_alloc)
_IF_WINDOWS(NTSTATUS *status /*OUT*/))
{
bool res = true;
ASSERT(trec != NULL && trec->dcontext != NULL);
ASSERT(trec->dcontext != get_thread_private_dcontext());
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
#ifdef WINDOWS
if (status != NULL)
*status = STATUS_SUCCESS;
#endif
if (waiting_at_safe_spot(trec, desired_state)) {
/* case 10101: to allow system calls in check_wait_at_safe_spot() for
* performance reasons we have the waiting thread perform its own setcontext.
*/
thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field;
spinmutex_lock(tsd->synch_lock);
if (tsd->set_mcontext != NULL || tsd->set_context != NULL) {
/* Two synchs in a row while still waiting; 2nd takes precedence */
STATS_INC(wait_multiple_setcxt);
synch_thread_free_setcontext(tsd);
}
#ifdef WINDOWS
LOG(THREAD_GET, LOG_SYNCH, 2,
"set_synched_thread_context %d to pc " PFX " via %s\n", trec->id,
(mc != NULL) ? mc->pc : (app_pc)((CONTEXT *)cxt)->CXT_XIP,
(mc != NULL) ? "mc" : "CONTEXT");
#else
ASSERT_NOT_IMPLEMENTED(mc != NULL); /* XXX: need sigcontext or sig_full_cxt_t */
#endif
if (mc != NULL)
tsd->set_mcontext = mc;
else {
ASSERT(cxt != NULL && cxt_size > 0);
tsd->set_context = cxt;
tsd->set_context_size = cxt_size;
}
IF_X64(tsd->set_context_alloc = cxt_alloc);
ASSERT(THREAD_SYNCH_SAFE(tsd->synch_perm, desired_state));
ASSERT(tsd->pending_synch_count >= 0);
/* Don't need to change pending_synch_count or anything; when thread is
* resumed it will properly reset everything itself */
spinmutex_unlock(tsd->synch_lock);
} else {
if (mc != NULL) {
res = thread_set_mcontext(trec, mc);
} else {
#ifdef WINDOWS
/* sort of ugly: but NtSetContextThread handling needs the status */
if (status != NULL) {
*status = nt_set_context(trec->handle, (CONTEXT *)cxt);
res = NT_SUCCESS(*status);
} else
res = thread_set_context(trec->handle, (CONTEXT *)cxt);
#else
/* currently there are no callers who don't pass mc: presumably
* PR 212090 will change that */
ASSERT_NOT_IMPLEMENTED(false);
#endif
}
free_setcontext(mc, cxt, cxt_size _IF_X64(cxt_alloc));
}
return res;
}
/* This is used to limit the maximum number of times synch_with_thread or
* synch_with_all_threads spin yield loops while waiting on an exiting thread.
* We assert if we ever break out of the loop because of this limit. FIXME make
* sure this limit is large enough that if it does ever trigger it's because
* of some kind of deadlock situation. Breaking out of the synchronization loop
* early is a correctness issue. Right now the limits are large but arbitrary.
* FIXME : once we are confident about thread synch get rid of these max loop checks.
* N.B.: the THREAD_SYNCH_SMALL_LOOP_MAX flag causes us to divide these by 10.
*/
#define SYNCH_ALL_THREADS_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_all_threads_max_loops))
#define SYNCH_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_thread_max_loops))
/* Amt of time in ms to wait for threads to get to a safe spot per a loop,
* see comments in synch_with_yield() on value. Our default value is 5ms which,
* depending on the tick resolution could end up being as long as 10 ms. */
#define SYNCH_WITH_WAIT_MS ((int)DYNAMO_OPTION(synch_with_sleep_time))
/* for use by synch_with_* routines to wait for thread(s) */
static void
synch_thread_yield()
{
/* xref 9400, 9488 - os_thread_yield() works ok on an UP machine, but on an MP machine
* yield might not actually do anything (in which case we burn through to the max
* loop counts pretty quick). We actually do want to wait a reasonable amt of time
* since the target thread might be doing some long latency dr operation (like
* dumping 500kb of registry into a forensics file) so we have the option to sleep
* instead. */
uint num_procs = get_num_processors();
ASSERT(num_procs != 0);
if ((num_procs == 1 && DYNAMO_OPTION(synch_thread_sleep_UP)) ||
(num_procs > 1 && DYNAMO_OPTION(synch_thread_sleep_MP))) {
os_thread_sleep(SYNCH_WITH_WAIT_MS);
} else {
os_thread_yield();
}
}
/* returns a thread_synch_result_t value
* id - the thread you want to synch with
* block - whether or not should spin until synch is successful
* hold_initexit_lock - whether or not the caller holds the thread_initexit_lock
* caller_state - a given permission define from above that describes the
* current state of the caller (note that holding the initexit
* lock is ok with respect to NO_LOCK
* desired_state - a requested state define from above that describes the
* desired synchronization
* flags - options from THREAD_SYNCH_ bitmask values
* NOTE - if you hold the initexit_lock and block with greater than NONE for
* caller state, then initexit_lock may be released and re-acquired
* NOTE - if any of the nt_ routines fails, it is assumed the thread no longer
* exists and returns true
* NOTE - if called directly (i.e. not through synch_with_all_threads)
* requires THREAD_SYNCH_IS_SAFE(caller_state, desired_state) to avoid deadlock
* NOTE - Requires the caller is !could_be_linking (i.e. not in an
* enter_couldbelinking state)
* NOTE - you can't call this with a thread that you've already suspended
*/
thread_synch_result_t
synch_with_thread(thread_id_t id, bool block, bool hold_initexit_lock,
thread_synch_permission_t caller_state,
thread_synch_state_t desired_state, uint flags)
{
thread_id_t my_id = d_r_get_thread_id();
uint loop_count = 0;
int expect_exiting = 0;
thread_record_t *my_tr = thread_lookup(my_id), *trec = NULL;
dcontext_t *dcontext = NULL;
priv_mcontext_t mc;
thread_synch_result_t res = THREAD_SYNCH_RESULT_NOT_SAFE;
bool first_loop = true;
IF_UNIX(bool actually_suspended = true;)
const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags)
? (SYNCH_MAXIMUM_LOOPS / 10)
: SYNCH_MAXIMUM_LOOPS;
ASSERT(id != my_id);
/* Must set ABORT or IGNORE. Only caller can RETRY as need a new
* set of threads for that, hoping problematic one is short-lived.
*/
ASSERT(
TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE,
flags));
if (my_tr != NULL) {
dcontext = my_tr->dcontext;
expect_exiting = dcontext->is_exiting ? 1 : 0;
ASSERT(exiting_thread_count >= expect_exiting);
} else {
/* calling thread should always be a known thread */
ASSERT_NOT_REACHED();
}
LOG(THREAD, LOG_SYNCH, 2,
"Synching with thread " TIDFMT ", giving %d, requesting %d, blocking=%d\n", id,
caller_state, desired_state, block);
if (!hold_initexit_lock)
d_r_mutex_lock(&thread_initexit_lock);
while (true) {
/* get thread record */
/* FIXME : thread id recycling is possible that this could be a
* different thread, perhaps we should take handle instead of id
* FIXME: use the new num field of thread_record_t?
*/
LOG(THREAD, LOG_SYNCH, 3, "Looping on synch with thread " TIDFMT "\n", id);
trec = thread_lookup(id);
/* We test the exiting thread count to avoid races between terminate/
* suspend thread (current thread, though we could be here for other
* reasons) and an exiting thread (who might no longer be on the all
* threads list) who is still using shared resources (ref case 3121) */
if ((trec == NULL && exiting_thread_count == expect_exiting) ||
loop_count++ > max_loops) {
/* make sure we didn't exit the loop without synchronizing, FIXME :
* in release builds we assume the synchronization is failing and
* continue without it, but that is dangerous.
* It is now up to the caller to handle this, and some use
* small loop counts and abort on failure, so only a curiosity. */
ASSERT_CURIOSITY(loop_count < max_loops);
LOG(THREAD, LOG_SYNCH, 3,
"Exceeded loop count synching with thread " TIDFMT "\n", id);
goto exit_synch_with_thread;
}
DOSTATS({
if (trec == NULL && exiting_thread_count > expect_exiting) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n");
STATS_INC(synch_yields_for_exiting_thread);
}
});
#ifdef UNIX
if (trec != NULL && trec->execve) {
/* i#237/PR 498284: clean up vfork "threads" that invoked execve.
* There should be no race since vfork suspends the parent.
*/
res = THREAD_SYNCH_RESULT_SUCCESS;
actually_suspended = false;
break;
}
#endif
if (trec != NULL) {
if (first_loop) {
adjust_wait_at_safe_spot(trec->dcontext, 1);
first_loop = false;
}
if (!os_thread_suspend(trec)) {
/* FIXME : eventually should be a real assert once we figure out
* how to handle threads with low privilege handles */
/* For dr_api_exit, we may have missed a thread exit. */
ASSERT_CURIOSITY_ONCE(
IF_APP_EXPORTS(dr_api_exit ||)(false &&
"Thead synch unable to suspend target"
" thread, case 2096?"));
res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)
? THREAD_SYNCH_RESULT_SUCCESS
: THREAD_SYNCH_RESULT_SUSPEND_FAILURE);
IF_UNIX(actually_suspended = false);
break;
}
if (!thread_get_mcontext(trec, &mc)) {
/* FIXME : eventually should be a real assert once we figure out
* how to handle threads with low privilege handles */
ASSERT_CURIOSITY_ONCE(false &&
"Thead synch unable to get_context target"
" thread, case 2096?");
res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)
? THREAD_SYNCH_RESULT_SUCCESS
: THREAD_SYNCH_RESULT_SUSPEND_FAILURE);
/* Make sure to not leave suspended if not returning success */
if (!TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags))
os_thread_resume(trec);
break;
}
if (at_safe_spot(trec, &mc, desired_state)) {
/* FIXME: case 5325 for detach handling and testing */
IF_WINDOWS(
ASSERT_NOT_IMPLEMENTED(!dcontext->aslr_context.sys_aslr_clobbered));
LOG(THREAD, LOG_SYNCH, 2, "Thread " TIDFMT " suspended in good spot\n",
id);
LOG(trec->dcontext->logfile, LOG_SYNCH, 2,
"@@@@@@@@@@@@@@@@@@ SUSPENDED BY THREAD " TIDFMT " synch_with_thread "
"@@@@@@@@@@@@@@@@@@\n",
my_id);
res = THREAD_SYNCH_RESULT_SUCCESS;
break;
} else {
RSTATS_INC(synchs_not_at_safe_spot);
}
if (!os_thread_resume(trec)) {
ASSERT_NOT_REACHED();
res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)
? THREAD_SYNCH_RESULT_SUCCESS
: THREAD_SYNCH_RESULT_SUSPEND_FAILURE);
break;
}
}
/* don't loop if !block, before we ever release initexit_lock in case
* caller is holding it and not blocking, (i.e. wants to keep it) */
if (!block)
break;
/* see if someone is waiting for us */
if (dcontext != NULL && caller_state != THREAD_SYNCH_NONE &&
should_wait_at_safe_spot(dcontext)) {
if (trec != NULL)
adjust_wait_at_safe_spot(trec->dcontext, -1);
d_r_mutex_unlock(&thread_initexit_lock);
/* ref case 5552, if we've inc'ed the exiting thread count need to
* adjust it back before calling check_wait_at_safe_spot since we
* may end up being killed there */
if (dcontext->is_exiting) {
ASSERT(exiting_thread_count >= 1);
ATOMIC_DEC(int, exiting_thread_count);
}
check_wait_at_safe_spot(dcontext, caller_state);
if (dcontext->is_exiting) {
ATOMIC_INC(int, exiting_thread_count);
}
d_r_mutex_lock(&thread_initexit_lock);
trec = thread_lookup(id);
/* Like above, we test the exiting thread count to avoid races
* between terminate/suspend thread (current thread, though we
* could be here for other reasons) and an exiting thread (who
* might no longer be on the all threads list) who is still using
* shared resources (ref case 3121) */
if (trec == NULL && exiting_thread_count == expect_exiting) {
if (!hold_initexit_lock)
d_r_mutex_unlock(&thread_initexit_lock);
return THREAD_SYNCH_RESULT_SUCCESS;
}
DOSTATS({
if (trec == NULL && exiting_thread_count > expect_exiting) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n");
STATS_INC(synch_yields_for_exiting_thread);
}
});
if (trec != NULL)
adjust_wait_at_safe_spot(trec->dcontext, 1);
}
STATS_INC(synch_yields);
d_r_mutex_unlock(&thread_initexit_lock);
/* Note - we only need call the ENTER/EXIT_DR hooks if single thread
* in dr since we are not really exiting DR here (we just need to give
* up the exclusion lock for a while to let thread we are trying to
* synch with make progress towards a safe synch point). */
if (INTERNAL_OPTION(single_thread_in_DR))
EXITING_DR(); /* give up DR exclusion lock */
synch_thread_yield();
if (INTERNAL_OPTION(single_thread_in_DR))
ENTERING_DR(); /* re-gain DR exclusion lock */
d_r_mutex_lock(&thread_initexit_lock);
}
/* reset this back to before */
adjust_wait_at_safe_spot(trec->dcontext, -1);
/* success!, is suspended (or already exited) put in desired state */
if (res == THREAD_SYNCH_RESULT_SUCCESS) {
LOG(THREAD, LOG_SYNCH, 2,
"Success synching with thread " TIDFMT " performing cleanup\n", id);
if (THREAD_SYNCH_IS_TERMINATED(desired_state)) {
if (IF_UNIX_ELSE(!trec->execve, true))
os_thread_terminate(trec);
#ifdef UNIX
/* We need to ensure the target thread has received the
* signal and is no longer using its sigstack or ostd struct
* before we clean those up.
*/
/* PR 452168: if failed to send suspend signal, do not spin */
if (actually_suspended) {
if (!is_thread_terminated(trec->dcontext)) {
/* i#96/PR 295561: use futex(2) if available. Blocks until
* the thread gets terminated.
*/
os_wait_thread_terminated(trec->dcontext);
}
} else
ASSERT(TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags));
#endif
}
if (THREAD_SYNCH_IS_CLEANED(desired_state)) {
dynamo_other_thread_exit(trec _IF_WINDOWS(false));
}
}
exit_synch_with_thread:
if (!hold_initexit_lock)
d_r_mutex_unlock(&thread_initexit_lock);
return res;
}
/* desired_synch_state - a requested state define from above that describes
* the synchronization required
* threads, num_threads - must not be NULL, if !THREAD_SYNCH_IS_CLEANED(desired
* synch_state) then will hold a list and num of threads
* cur_state - a given permission from above that describes the state of the
* caller
* flags - options from THREAD_SYNCH_ bitmask values
* NOTE - Requires that the caller doesn't hold the thread_initexit_lock, on
* return caller will hold the thread_initexit_lock
* NOTE - Requires the caller is !could_be_linking (i.e. not in an
* enter_couldbelinking state)
* NOTE - To avoid deadlock this routine should really only be called with
* cur_state giving maximum permissions, (currently app_exit and detach could
* conflict, except our routes to app_exit go through different synch point
* (TermThread or TermProcess) first
* NOTE - when !all_synched, if desired_synch_state is not cleaned or synch result is
* ignored, the caller is reponsible for resuming threads that are suspended,
* freeing allocation for threads array and releasing locks
* Caller should call end_synch_with_all_threads when finished to accomplish that.
*/
bool
synch_with_all_threads(thread_synch_state_t desired_synch_state,
/*OUT*/ thread_record_t ***threads_out,
/*OUT*/ int *num_threads_out, thread_synch_permission_t cur_state,
/* FIXME: turn the ThreadSynch* enums into bitmasks and merge
* into flags param */
uint flags)
{
/* Case 8815: we cannot use the OUT params themselves internally as they
* may be volatile, so we need our own values until we're ready to return
*/
bool threads_are_stale = true;
thread_record_t **threads = NULL;
int num_threads = 0;
/* we record ids from before we gave up thread_initexit_lock */
thread_id_t *thread_ids_temp = NULL;
int num_threads_temp = 0, i, j, expect_self_exiting = 0;
/* synch array contains a SYNCH_WITH_ALL_ value for each thread */
uint *synch_array = NULL, *synch_array_temp = NULL;
enum {
SYNCH_WITH_ALL_NEW = 0,
SYNCH_WITH_ALL_NOTIFIED = 1,
SYNCH_WITH_ALL_SYNCHED = 2,
};
bool all_synched = false;
thread_id_t my_id = d_r_get_thread_id();
uint loop_count = 0;
thread_record_t *tr = thread_lookup(my_id);
dcontext_t *dcontext = NULL;
uint flags_one; /* flags for synch_with_thread() call */
thread_synch_result_t synch_res;
const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags)
? (SYNCH_ALL_THREADS_MAXIMUM_LOOPS / 10)
: SYNCH_ALL_THREADS_MAXIMUM_LOOPS;
#ifdef CLIENT_INTERFACE
/* We treat client-owned threads as native but they don't have a clean native state
* for us to suspend them in (they are always in client or dr code). We need to be
* able to suspend such threads so that they're !couldbelinking and holding no dr
* locks. We make the assumption that client-owned threads that are in the client
* library (or are in a dr routine that has set dcontext->client_thread_safe_to_sync)
* meet this requirement (see at_safe_spot()). As such, all we need to worry about
* here are client locks the client-owned thread might hold that could block other
* threads from reaching safe spots. If we only suspend client-owned threads once
* all other threads are taken care of then this is not a problem. FIXME - xref
* PR 231301 on issues that arise if the client thread spends most of its time
* calling out of its lib to dr API, ntdll, or generated code functions. */
bool finished_non_client_threads;
#endif
ASSERT(!dynamo_all_threads_synched);
/* flag any caller who does not give up enough permissions to avoid livelock
* with other synch_with_all_threads callers
*/
ASSERT_CURIOSITY(cur_state >= THREAD_SYNCH_NO_LOCKS_NO_XFER);
/* also flag anyone asking for full mcontext w/o possibility of no_xfer,
* which can also livelock
*/
ASSERT_CURIOSITY(desired_synch_state < THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT
/* detach currently violates this: bug 8942 */
|| started_detach);
/* must set exactly one of these -- FIXME: better way to check? */
ASSERT(
TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE |
THREAD_SYNCH_SUSPEND_FAILURE_RETRY,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_RETRY,
flags) &&
!TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SUSPEND_FAILURE_RETRY,
flags));
flags_one = flags;
/* we'll do the retry */
if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags)) {
flags_one &= ~THREAD_SYNCH_SUSPEND_FAILURE_RETRY;
flags_one |= THREAD_SYNCH_SUSPEND_FAILURE_ABORT;
}
if (tr != NULL) {
dcontext = tr->dcontext;
expect_self_exiting = dcontext->is_exiting ? 1 : 0;
ASSERT(exiting_thread_count >= expect_self_exiting);
} else {
/* calling thread should always be a known thread */
ASSERT_NOT_REACHED();
}
LOG(THREAD, LOG_SYNCH, 1,
"synch with all threads my id = " SZFMT
" Giving %d permission and seeking %d state\n",
my_id, cur_state, desired_synch_state);
/* grab all_threads_synch_lock */
/* since all_threads synch doesn't give any permissions this is necessary
* to prevent deadlock in the case of two threads trying to synch with all
* threads at the same time */
/* FIXME: for DEADLOCK_AVOIDANCE, to preserve LIFO, should we
* exit DR, trylock, then immediately enter DR? introducing any
* race conditions in doing so?
* Ditto on all other os_thread_yields in this file!
*/
while (!d_r_mutex_trylock(&all_threads_synch_lock)) {
LOG(THREAD, LOG_SYNCH, 2, "Spinning on all threads synch lock\n");
STATS_INC(synch_yields);
if (dcontext != NULL && cur_state != THREAD_SYNCH_NONE &&
should_wait_at_safe_spot(dcontext)) {
/* ref case 5552, if we've inc'ed the exiting thread count need to
* adjust it back before calling check_wait_at_safe_spot since we
* may end up being killed there */
if (dcontext->is_exiting) {
ASSERT(exiting_thread_count >= 1);
ATOMIC_DEC(int, exiting_thread_count);
}
check_wait_at_safe_spot(dcontext, cur_state);
if (dcontext->is_exiting) {
ATOMIC_INC(int, exiting_thread_count);
}
}
LOG(THREAD, LOG_SYNCH, 2, "Yielding on all threads synch lock\n");
/* Note - we only need call the ENTER/EXIT_DR hooks if single thread
* in dr since we are not really exiting DR here (we just need to give
* up the exclusion lock for a while to let thread we are trying to
* synch with make progress towards a safe synch point). */
if (INTERNAL_OPTION(single_thread_in_DR))
EXITING_DR(); /* give up DR exclusion lock */
os_thread_yield();
if (INTERNAL_OPTION(single_thread_in_DR))
ENTERING_DR(); /* re-gain DR exclusion lock */
}
d_r_mutex_lock(&thread_initexit_lock);
/* synch with all threads */
/* FIXME: this should be a do/while loop - then we wouldn't have
* to initialize all the variables above
*/
while (threads_are_stale || !all_synched ||
exiting_thread_count > expect_self_exiting || uninit_thread_count > 0) {
if (threads != NULL) {
/* Case 8941: must free here rather than when yield (below) since
* termination condition can change between there and here
*/
ASSERT(num_threads > 0);
global_heap_free(threads,
num_threads *
sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
/* be paranoid */
threads = NULL;
num_threads = 0;
}
get_list_of_threads(&threads, &num_threads);
threads_are_stale = false;
synch_array = (uint *)global_heap_alloc(num_threads *
sizeof(uint) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < num_threads; i++) {
synch_array[i] = SYNCH_WITH_ALL_NEW;
}
/* Fixme : an inefficient algorithm, but is not as bad as it seems
* since it is very unlikely that many threads have started or ended
* and the list threads routine always puts them in the same order
*/
/* on first loop num_threads_temp == 0 */
for (i = 0; i < num_threads_temp; i++) {
/* care only if we have already notified or synched thread */
if (synch_array_temp[i] != SYNCH_WITH_ALL_NEW) {
for (j = 0; j < num_threads; j++) {
/* FIXME : os recycles thread ids, should have stronger
* check here, could check dcontext equivalence, (but we
* recycle those to), probably should check threads_temp
* handle and be sure thread is still alive since the id
* won't be recycled then */
if (threads[j]->id == thread_ids_temp[i]) {
synch_array[j] = synch_array_temp[i];
break;
}
}
}
}
/* free old synch list, old thread id list */
if (num_threads_temp > 0) {
global_heap_free(thread_ids_temp,
num_threads_temp *
sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT));
global_heap_free(synch_array_temp,
num_threads_temp * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT));
num_threads_temp = 0;
}
all_synched = true;
LOG(THREAD, LOG_SYNCH, 3, "Looping over all threads (%d threads)\n", num_threads);
#ifdef CLIENT_INTERFACE
finished_non_client_threads = true;
for (i = 0; i < num_threads; i++) {
if (threads[i]->id != my_id && synch_array[i] != SYNCH_WITH_ALL_SYNCHED &&
!IS_CLIENT_THREAD(threads[i]->dcontext)) {
finished_non_client_threads = false;
break;
}
}
#endif
/* make a copy of the thread ids (can't just keep the thread list
* since it consists of pointers to live thread_record_t structs).
* we must make the copy before synching b/c cleaning up a thread
* involves freeing its thread_record_t.
*/
thread_ids_temp = (thread_id_t *)global_heap_alloc(
num_threads * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < num_threads; i++)
thread_ids_temp[i] = threads[i]->id;
num_threads_temp = num_threads;
synch_array_temp = synch_array;
for (i = 0; i < num_threads; i++) {
/* do not de-ref threads[i] after synching if it was cleaned up! */
if (synch_array[i] != SYNCH_WITH_ALL_SYNCHED && threads[i]->id != my_id) {
#ifdef CLIENT_INTERFACE
if (!finished_non_client_threads &&
IS_CLIENT_THREAD(threads[i]->dcontext)) {
all_synched = false;
continue; /* skip this thread for now till non-client are finished */
}
if (IS_CLIENT_THREAD(threads[i]->dcontext) &&
(TEST(flags, THREAD_SYNCH_SKIP_CLIENT_THREAD) ||
!should_suspend_client_thread(threads[i]->dcontext,
desired_synch_state))) {
/* PR 609569: do not suspend this thread.
* Avoid races between resume_all_threads() and
* dr_client_thread_set_suspendable() by storing the fact.
*
* For most of our synchall purposes we really want to prevent
* threads from acting on behalf of the application, and make
* sure we can relocate them if in the code cache. DR itself is
* thread-safe, and while a synchall-initiator will touch
* thread-private data for threads it suspends, having some
* threads it does not suspend shouldn't cause any problems so
* long as it doesn't touch their thread-private data.
*/
synch_array[i] = SYNCH_WITH_ALL_SYNCHED;
threads[i]->dcontext->client_data->left_unsuspended = true;
continue;
}
#endif
/* speed things up a tad */
if (synch_array[i] != SYNCH_WITH_ALL_NOTIFIED) {
ASSERT(synch_array[i] == SYNCH_WITH_ALL_NEW);
adjust_wait_at_safe_spot(threads[i]->dcontext, 1);
synch_array[i] = SYNCH_WITH_ALL_NOTIFIED;
}
LOG(THREAD, LOG_SYNCH, 2,
"About to try synch with thread #%d/%d " TIDFMT "\n", i, num_threads,
threads[i]->id);
synch_res =
synch_with_thread(threads[i]->id, false, true, THREAD_SYNCH_NONE,
desired_synch_state, flags_one);
if (synch_res == THREAD_SYNCH_RESULT_SUCCESS) {
LOG(THREAD, LOG_SYNCH, 2, "Synch succeeded!\n");
/* successful synch */
synch_array[i] = SYNCH_WITH_ALL_SYNCHED;
if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state))
adjust_wait_at_safe_spot(threads[i]->dcontext, -1);
} else {
LOG(THREAD, LOG_SYNCH, 2, "Synch failed!\n");
all_synched = false;
if (synch_res == THREAD_SYNCH_RESULT_SUSPEND_FAILURE) {
if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags))
goto synch_with_all_abort;
} else
ASSERT(synch_res == THREAD_SYNCH_RESULT_NOT_SAFE);
}
} else {
LOG(THREAD, LOG_SYNCH, 2, "Skipping synch with thread " TIDFMT "\n",
thread_ids_temp[i]);
}
}
if (loop_count++ >= max_loops)
break;
/* We test the exiting thread count to avoid races between exit
* process (current thread, though we could be here for detach or other
* reasons) and an exiting thread (who might no longer be on the all
* threads list) who is still using shared resources (ref case 3121) */
if (!all_synched || exiting_thread_count > expect_self_exiting ||
uninit_thread_count > 0) {
DOSTATS({
if (all_synched && exiting_thread_count > expect_self_exiting) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread %d %d %d\n",
all_synched, exiting_thread_count, expect_self_exiting);
STATS_INC(synch_yields_for_exiting_thread);
} else if (all_synched && uninit_thread_count > 0) {
LOG(THREAD, LOG_SYNCH, 2, "Waiting for an uninit thread %d %d\n",
all_synched, uninit_thread_count);
STATS_INC(synch_yields_for_uninit_thread);
}
});
STATS_INC(synch_yields);
/* release lock in case some other thread waiting on it */
d_r_mutex_unlock(&thread_initexit_lock);
LOG(THREAD, LOG_SYNCH, 2, "Not all threads synched looping again\n");
/* Note - we only need call the ENTER/EXIT_DR hooks if single
* thread in dr since we are not really exiting DR here (we just
* need to give up the exclusion lock for a while to let thread we
* are trying to synch with make progress towards a safe synch
* point). */
if (INTERNAL_OPTION(single_thread_in_DR))
EXITING_DR(); /* give up DR exclusion lock */
synch_thread_yield();
if (INTERNAL_OPTION(single_thread_in_DR))
ENTERING_DR(); /* re-gain DR exclusion lock */
d_r_mutex_lock(&thread_initexit_lock);
/* We unlock and lock the thread_initexit_lock, so threads might be stale. */
threads_are_stale = true;
}
}
/* case 9392: callers passing in ABORT expect a return value of failure
* to correspond w/ no suspended threads, a freed threads array, and no
* locks being held, so we go through the abort path
*/
if (!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags))
goto synch_with_all_abort;
synch_with_all_exit:
/* make sure we didn't exit the loop without synchronizing, FIXME : in
* release builds we assume the synchronization is failing and continue
* without it, but that is dangerous.
* It is now up to the caller to handle this, and some use
* small loop counts and abort on failure, so only a curiosity. */
ASSERT_CURIOSITY(loop_count < max_loops);
ASSERT(threads != NULL);
/* Since the set of threads can change we don't set the success field
* until we're passing back the thread list.
* We would use an tsd field directly instead of synch_array except
* for THREAD_SYNCH_*_CLEAN where tsd is freed.
*/
ASSERT(synch_array != NULL);
if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state)) { /* else unsafe to access tsd */
for (i = 0; i < num_threads; i++) {
if (threads[i]->id != my_id) {
thread_synch_data_t *tsd;
ASSERT(threads[i]->dcontext != NULL);
tsd = (thread_synch_data_t *)threads[i]->dcontext->synch_field;
tsd->synch_with_success = (synch_array[i] == SYNCH_WITH_ALL_SYNCHED);
}
}
}
global_heap_free(synch_array, num_threads * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT));
if (num_threads_temp > 0) {
global_heap_free(thread_ids_temp,
num_threads_temp *
sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT));
}
/* FIXME case 9333: on all_synch failure we do not free threads array if
* synch_result is ignored. Callers are responsible for resuming threads that are
* suspended and freeing allocation for threads array
*/
if ((!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) ||
THREAD_SYNCH_IS_CLEANED(desired_synch_state)) {
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
threads = NULL;
num_threads = 0;
}
LOG(THREAD, LOG_SYNCH, 1, "Finished synch with all threads: result=%d\n",
all_synched);
DOLOG(1, LOG_SYNCH, {
if (all_synched) {
LOG(THREAD, LOG_SYNCH, 1,
"\treturning holding initexit_lock and all_threads_synch_lock\n");
}
});
*threads_out = threads;
*num_threads_out = num_threads;
dynamo_all_threads_synched = all_synched;
ASSERT(exiting_thread_count - expect_self_exiting == 0);
/* FIXME case 9392: where on all_synch failure we do not release the locks in the
* non-abort exit path */
return all_synched;
synch_with_all_abort:
/* undo everything! */
for (i = 0; i < num_threads; i++) {
DEBUG_DECLARE(bool ok;)
if (threads[i]->id != my_id) {
if (synch_array[i] == SYNCH_WITH_ALL_SYNCHED) {
bool resume = true;
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(threads[i]->dcontext) &&
threads[i]->dcontext->client_data->left_unsuspended) {
/* PR 609569: we did not suspend this thread */
resume = false;
}
#endif
if (resume) {
DEBUG_DECLARE(ok =)
os_thread_resume(threads[i]);
ASSERT(ok);
}
/* ensure synch_with_success is set to false on exit path,
* even though locks are released and not fully valid
*/
synch_array[i] = SYNCH_WITH_ALL_NEW;
} else if (synch_array[i] == SYNCH_WITH_ALL_NOTIFIED) {
adjust_wait_at_safe_spot(threads[i]->dcontext, -1);
}
}
}
d_r_mutex_unlock(&thread_initexit_lock);
d_r_mutex_unlock(&all_threads_synch_lock);
ASSERT(exiting_thread_count - expect_self_exiting == 0);
ASSERT(!all_synched); /* ensure our OUT values will be NULL,0
for THREAD_SYNCH_SUSPEND_FAILURE_ABORT */
goto synch_with_all_exit;
}
/* Assumes that the threads were suspended with synch_with_all_threads()
* and thus even is_thread_currently_native() threads were suspended.
* Assumes that the caller will free up threads if it is dynamically allocated.
*/
void
resume_all_threads(thread_record_t **threads, const uint num_threads)
{
uint i;
thread_id_t my_tid;
bool res;
ASSERT_OWN_MUTEX(true, &all_threads_synch_lock);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
if (threads == NULL || num_threads == 0)
return;
my_tid = d_r_get_thread_id();
for (i = 0; i < num_threads; i++) {
if (my_tid == threads[i]->id)
continue;
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(threads[i]->dcontext) &&
threads[i]->dcontext->client_data->left_unsuspended) {
/* PR 609569: we did not suspend this thread */
threads[i]->dcontext->client_data->left_unsuspended = false;
continue;
}
#endif
/* This routine assumes that each thread in the array was suspended, so
* each one has to successfully resume.
*/
res = os_thread_resume(threads[i]);
ASSERT(res);
}
}
/* Should be called to clean up after synch_with_all_threads as otherwise
* dynamo_all_threads_synched will be left as true.
* If resume is true, resumes the threads in the threads array.
* Unlocks thread_initexit_lock and all_threads_synch_lock.
* If threads != NULL, frees the threads array.
*/
void
end_synch_with_all_threads(thread_record_t **threads, uint num_threads, bool resume)
{
/* dynamo_all_threads_synched will be false if synch failed */
ASSERT_CURIOSITY(dynamo_all_threads_synched);
ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock));
dynamo_all_threads_synched = false;
if (resume) {
ASSERT(threads != NULL);
resume_all_threads(threads, num_threads);
}
/* if we knew whether THREAD_SYNCH_*_CLEANED was specified we could set
* synch_with_success to false, but it's unsafe otherwise
*/
d_r_mutex_unlock(&thread_initexit_lock);
d_r_mutex_unlock(&all_threads_synch_lock);
if (threads != NULL) {
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
}
/* Resets a thread's context to start interpreting anew.
* ASSUMPTION: the thread is currently suspended.
* This was moved here from fcache_reset_all_caches_proactively simply to
* get access to win32-private CONTEXT-related routines
*/
void
translate_from_synchall_to_dispatch(thread_record_t *tr, thread_synch_state_t synch_state)
{
bool res;
/* we do not have to align priv_mcontext_t */
priv_mcontext_t *mc = global_heap_alloc(sizeof(*mc) HEAPACCT(ACCT_OTHER));
bool free_cxt = true;
dcontext_t *dcontext = tr->dcontext;
app_pc pre_translation;
ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock));
/* FIXME: would like to assert that suspendcount is > 0 but how? */
ASSERT(thread_synch_successful(tr));
res = thread_get_mcontext(tr, mc);
ASSERT(res);
pre_translation = (app_pc)mc->pc;
LOG(GLOBAL, LOG_CACHE, 2, "\trecreating address for " PFX "\n", mc->pc);
LOG(THREAD, LOG_CACHE, 2,
"translate_from_synchall_to_dispatch: being translated from " PFX "\n", mc->pc);
if (get_at_syscall(dcontext)) {
/* Don't need to do anything as shared_syscall and do_syscall will not
* change due to a reset and will have any inlined ibl updated. If we
* did try to send these guys back to d_r_dispatch, have to set asynch_tag
* (as well as next_tag since translation looks only at that), restore
* TOS to asynch_target/esi (unless still at reset state), and have to
* figure out how to avoid post-syscall processing for those who never
* did pre-syscall processing (i.e., if at shared_syscall) (else will
* get wrong dcontext->sysnum, etc.)
* Not to mention that after resuming the kernel will finish the
* syscall and clobber several registers, making it hard to set a
* clean state (xref case 6113, case 5074, and notes below)!
* It's just too hard to redirect while at a syscall.
*/
LOG(GLOBAL, LOG_CACHE, 2, "\tat syscall so not translating\n");
/* sanity check */
ASSERT(is_after_syscall_address(dcontext, pre_translation) ||
IF_WINDOWS_ELSE(pre_translation == vsyscall_after_syscall,
is_after_or_restarted_do_syscall(dcontext, pre_translation,
true /*vsys*/)));
#if defined(UNIX) && defined(X86_32)
if (pre_translation == vsyscall_sysenter_return_pc ||
pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc) {
/* Because we remove the vsyscall hook on a send_all_other_threads_native()
* yet have no barrier to know the threads have run their own go-native
* code, we want to send them away from the hook, to our gencode.
*/
if (pre_translation == vsyscall_sysenter_return_pc)
mc->pc = after_do_shared_syscall_addr(dcontext);
else if (pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc)
mc->pc = get_do_int_syscall_entry(dcontext);
/* exit stub and subsequent fcache_return will save rest of state */
res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0,
synch_state _IF_X64((void *)mc)
_IF_WINDOWS(NULL));
ASSERT(res);
/* cxt is freed by set_synched_thread_context() or target thread */
free_cxt = false;
}
#endif
IF_ARM({
if (INTERNAL_OPTION(steal_reg_at_reset) != 0) {
/* We don't want to translate, just update the stolen reg values */
arch_mcontext_reset_stolen_reg(dcontext, mc);
res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0,
synch_state _IF_X64((void *)mc)
_IF_WINDOWS(NULL));
ASSERT(res);
/* cxt is freed by set_synched_thread_context() or target thread */
free_cxt = false;
}
});
} else {
res = translate_mcontext(tr, mc, true /*restore memory*/, NULL);
ASSERT(res);
if (!thread_synch_successful(tr) || mc->pc == 0) {
/* Better to risk failure on accessing a freed cache than
* to have a guaranteed crash by sending to NULL.
* FIXME: it's possible the real translation is NULL,
* but if so should be fine to leave it there since the
* current eip should also be NULL.
*/
ASSERT_NOT_REACHED();
goto translate_from_synchall_to_dispatch_exit;
}
LOG(GLOBAL, LOG_CACHE, 2, "\ttranslation pc = " PFX "\n", mc->pc);
ASSERT(!is_dynamo_address((app_pc)mc->pc) && !in_fcache((app_pc)mc->pc));
IF_ARM({
if (INTERNAL_OPTION(steal_reg_at_reset) != 0) {
/* XXX: do we need this? Will signal.c will fix it up prior
* to sigreturn from suspend handler?
*/
arch_mcontext_reset_stolen_reg(dcontext, mc);
}
});
/* We send all threads, regardless of whether was in DR or not, to
* re-interp from translated cxt, to avoid having to handle stale
* local state problems if we simply resumed.
* We assume no KSTATS or other state issues to deal with.
* FIXME: enter hook w/o an exit?
*/
dcontext->next_tag = (app_pc)mc->pc;
/* FIXME PR 212266: for linux if we're at an inlined syscall
* we may have problems: however, we might be able to rely on the kernel
* not clobbering any registers besides eax (which is ok: reset stub
* handles it), though presumably it's allowed to write to any
* caller-saved registers. We may need to change inlined syscalls
* to set at_syscall (see comments below as well).
*/
if (pre_translation ==
IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_sysenter_return_pc) &&
!waiting_at_safe_spot(dcontext->thread_record, synch_state)) {
/* FIXME case 7827/PR 212266: shouldn't translate for this case, right?
* should have -ignore_syscalls set at_syscall and eliminate
* this whole block of code
*/
/* put the proper retaddr back on the stack, as we won't
* be doing the ret natively to regain control, but rather
* will interpret it
*/
/* FIXME: ensure readable and writable? */
app_pc cur_retaddr = *((app_pc *)mc->xsp);
app_pc native_retaddr;
ASSERT(cur_retaddr != NULL);
/* must be ignore_syscalls (else, at_syscall will be set) */
IF_WINDOWS(ASSERT(DYNAMO_OPTION(ignore_syscalls)));
ASSERT(get_syscall_method() == SYSCALL_METHOD_SYSENTER);
/* For DYNAMO_OPTION(sygate_sysenter) we need to restore both stack
* values and fix up esp, but we can't do it here since the kernel
* will change esp... incompatible w/ -ignore_syscalls anyway
*/
IF_WINDOWS(ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(sygate_sysenter)));
/* may still be at syscall from a prior reset -- don't want to grab
* locks for in_fcache so we determine via the translation
*/
ASSERT_NOT_TESTED();
native_retaddr = recreate_app_pc(dcontext, cur_retaddr, NULL);
if (native_retaddr != cur_retaddr) {
LOG(GLOBAL, LOG_CACHE, 2, "\trestoring TOS to " PFX " from " PFX "\n",
native_retaddr, cur_retaddr);
*((app_pc *)mc->xsp) = native_retaddr;
} else {
LOG(GLOBAL, LOG_CACHE, 2,
"\tnot restoring TOS since still at previous reset state " PFX "\n",
cur_retaddr);
}
}
/* Send back to d_r_dispatch. Rather than setting up last_exit in eax here,
* we point to a special routine to save the correct eax -- in fact it's
* simply a direct exit stub. Originally this was b/c we tried to
* translate threads at system calls, and the kernel clobbers eax (and
* ecx/edx for sysenter, though preserves eip setcontext change: case
* 6113, case 5074) in finishing the system call, but now that we don't
* translate them we've kept the stub approach. It's actually faster
* for the stub itself to save eax and set the linkstub than for us to
* emulate it here, anyway.
* Note that a thread in check_wait_at_safe_spot() spins and will NOT be
* at a syscall, avoiding problems there (case 5074).
*/
mc->pc = (app_pc)get_reset_exit_stub(dcontext);
LOG(GLOBAL, LOG_CACHE, 2, "\tsent to reset exit stub " PFX "\n", mc->pc);
#ifdef WINDOWS
/* i#25: we could have interrupted thread in DR, where has priv fls data
* in TEB, and fcache_return blindly copies into app fls: so swap to app
* now, just in case. DR routine can handle swapping when already app.
*/
swap_peb_pointer(dcontext, false /*to app*/);
#endif
/* exit stub and subsequent fcache_return will save rest of state */
res =
set_synched_thread_context(dcontext->thread_record, mc, NULL, 0,
synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL));
ASSERT(res);
/* cxt is freed by set_synched_thread_context() or target thread */
free_cxt = false;
}
translate_from_synchall_to_dispatch_exit:
if (free_cxt) {
global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER));
}
}
/***************************************************************************
* Detach and similar operations
*/
/* Atomic variable to prevent multiple threads from trying to detach at
* the same time.
*/
DECLARE_CXTSWPROT_VAR(static volatile int dynamo_detaching_flag, LOCK_FREE_STATE);
void
send_all_other_threads_native(void)
{
thread_record_t **threads;
dcontext_t *my_dcontext = get_thread_private_dcontext();
int i, num_threads;
bool waslinking;
/* We're forced to use an asynch model due to not being able to call
* dynamo_thread_not_under_dynamo, which has a bonus of making it easier
* to handle other threads asking for synchall.
* This is why we don't ask for THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT.
*/
const thread_synch_state_t desired_state =
THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER;
ASSERT(dynamo_initialized && !dynamo_exited && my_dcontext != NULL);
LOG(my_dcontext->logfile, LOG_ALL, 1, "%s\n", __FUNCTION__);
LOG(GLOBAL, LOG_ALL, 1, "%s: cur thread " TIDFMT "\n", __FUNCTION__,
d_r_get_thread_id());
waslinking = is_couldbelinking(my_dcontext);
if (waslinking)
enter_nolinking(my_dcontext, NULL, false);
#ifdef WINDOWS
/* Ensure new threads will go straight to native */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
init_apc_go_native_pause = true;
init_apc_go_native = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
# ifdef CLIENT_INTERFACE
wait_for_outstanding_nudges();
# endif
#endif
/* Suspend all threads except those trying to synch with us */
if (!synch_with_all_threads(desired_state, &threads, &num_threads,
THREAD_SYNCH_NO_LOCKS_NO_XFER,
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_SYNCHRONIZE_THREADS, 2,
get_application_name(), get_application_pid());
}
ASSERT(mutex_testlock(&all_threads_synch_lock) &&
mutex_testlock(&thread_initexit_lock));
#ifdef WINDOWS
/* Let threads waiting at APC point go native */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
init_apc_go_native_pause = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
#endif
#ifdef WINDOWS
/* FIXME i#95: handle outstanding callbacks where we've put our retaddr on
* the app stack. This should be able to share
* detach_helper_handle_callbacks() code. Won't the old single-thread
* dr_app_stop() have had this same problem? Since we're not tearing
* everything down, can we solve it by waiting until we hit
* after_shared_syscall_code_ex() in a native thread?
*/
ASSERT_NOT_IMPLEMENTED(get_syscall_method() != SYSCALL_METHOD_SYSENTER);
#endif
for (i = 0; i < num_threads; i++) {
if (threads[i]->dcontext == my_dcontext ||
is_thread_currently_native(threads[i]) ||
/* FIXME i#2784: we should suspend client threads for the duration
* of the app being native to avoid problems with having no
* signal handlers in place.
*/
IS_CLIENT_THREAD(threads[i]->dcontext))
continue;
/* Because dynamo_thread_not_under_dynamo() has to be run by the owning
* thread, the simplest solution is to send everyone back to d_r_dispatch
* with a flag to go native from there, rather than directly setting the
* native context.
*/
threads[i]->dcontext->go_native = true;
if (thread_synch_state_no_xfer(threads[i]->dcontext)) {
/* Another thread trying to synch with us: just let it go. It will
* go native once it gets back to d_r_dispatch which will be before it
* goes into the cache.
*/
continue;
} else {
LOG(my_dcontext->logfile, LOG_ALL, 1, "%s: sending thread %d native\n",
__FUNCTION__, threads[i]->id);
LOG(threads[i]->dcontext->logfile, LOG_ALL, 1,
"**** requested by thread %d to go native\n", my_dcontext->owning_thread);
/* This won't change a thread at a syscall, so we rely on the thread
* going to d_r_dispatch and then going native when its syscall exits.
*
* FIXME i#95: That means the time to go native is, unfortunately,
* unbounded. This means that dr_app_cleanup() needs to synch the
* threads and force-xl8 these. We should share code with detach.
* Right now we rely on the app joining all its threads *before*
* calling dr_app_cleanup(), or using dr_app_stop_and_cleanup[_with_stats]().
* This also means we have a race with unhook_vsyscall in
* os_process_not_under_dynamorio(), which we solve by redirecting
* threads at syscalls to our gencode.
*/
translate_from_synchall_to_dispatch(threads[i], desired_state);
}
}
end_synch_with_all_threads(threads, num_threads, true /*resume*/);
os_process_not_under_dynamorio(my_dcontext);
if (waslinking)
enter_couldbelinking(my_dcontext, NULL, false);
return;
}
void
detach_on_permanent_stack(bool internal, bool do_cleanup, dr_stats_t *drstats)
{
dcontext_t *my_dcontext;
thread_record_t **threads;
thread_record_t *my_tr = NULL;
int i, num_threads, my_idx = -1;
thread_id_t my_id;
#ifdef WINDOWS
bool detach_stacked_callbacks;
bool *cleanup_tpc;
#endif
DEBUG_DECLARE(bool ok;)
DEBUG_DECLARE(int exit_res;)
/* synch-all flags: */
uint flags = 0;
#ifdef WINDOWS
/* For Windows we may fail to suspend a thread (e.g., privilege
* problems), and in that case we want to just ignore the failure.
*/
flags |= THREAD_SYNCH_SUSPEND_FAILURE_IGNORE;
#elif defined(UNIX)
/* For Unix, such privilege problems are rarer but we would still prefer to
* continue if we hit a problem.
*/
flags |= THREAD_SYNCH_SUSPEND_FAILURE_IGNORE;
#endif
/* i#297: we only synch client threads after process exit event. */
flags |= THREAD_SYNCH_SKIP_CLIENT_THREAD;
ENTERING_DR();
/* dynamo_detaching_flag is not really a lock, and since no one ever waits
* on it we can't deadlock on it either.
*/
if (!atomic_compare_exchange(&dynamo_detaching_flag, LOCK_FREE_STATE, LOCK_SET_STATE))
return;
/* Unprotect .data for exit cleanup.
* XXX: more secure to not do this until we've synched, but then need
* alternative prot for started_detach and init_apc_go_native*
*/
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
ASSERT(!started_detach);
started_detach = true;
if (!internal) {
synchronize_dynamic_options();
if (!DYNAMO_OPTION(allow_detach)) {
started_detach = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
dynamo_detaching_flag = LOCK_FREE_STATE;
SYSLOG_INTERNAL_ERROR("Detach called without the allow_detach option set");
EXITING_DR();
return;
}
}
ASSERT(dynamo_initialized);
ASSERT(!dynamo_exited);
my_id = d_r_get_thread_id();
my_dcontext = get_thread_private_dcontext();
if (my_dcontext == NULL) {
/* We support detach after just dr_app_setup() with no start. */
ASSERT(!dynamo_started);
my_tr = thread_lookup(my_id);
ASSERT(my_tr != NULL);
my_dcontext = my_tr->dcontext;
os_process_under_dynamorio_initiate(my_dcontext);
os_process_under_dynamorio_complete(my_dcontext);
dynamo_thread_under_dynamo(my_dcontext);
ASSERT(get_thread_private_dcontext() == my_dcontext);
}
ASSERT(my_dcontext != NULL);
LOG(GLOBAL, LOG_ALL, 1, "Detach: thread %d starting detach process\n", my_id);
SYSLOG(SYSLOG_INFORMATION, INFO_DETACHING, 2, get_application_name(),
get_application_pid());
/* synch with flush */
if (my_dcontext != NULL)
enter_threadexit(my_dcontext);
#ifdef WINDOWS
/* Signal to go native at APC init here. Set pause first so that threads
* will wait till we are ready for them to go native (after ntdll unpatching).
* (To avoid races these must be set in this order!)
*/
init_apc_go_native_pause = true;
init_apc_go_native = true;
/* XXX i#2611: there is still a race for threads caught between init_apc_go_native
* and dynamo_thread_init adding to all_threads: this just reduces the risk.
* Unfortunately we can't easily use the UNIX solution of uninit_thread_count
* since we can't distinguish internally vs externally created threads.
*/
os_thread_yield();
# ifdef CLIENT_INTERFACE
wait_for_outstanding_nudges();
# endif
#endif
#ifdef UNIX
/* i#2270: we ignore alarm signals during detach to reduce races. */
signal_remove_alarm_handlers(my_dcontext);
#endif
/* suspend all DR-controlled threads at safe locations */
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT, &threads,
&num_threads,
/* Case 6821: allow other synch-all-thread uses
* that beat us to not wait on us. We still have
* a problem if we go first since we must xfer
* other threads.
*/
THREAD_SYNCH_NO_LOCKS_NO_XFER, flags)) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_SYNCHRONIZE_THREADS, 2,
get_application_name(), get_application_pid());
}
/* Now we own the thread_initexit_lock. We'll release the locks grabbed in
* synch_with_all_threads below after cleaning up all the threads in case we
* need to grab it during process exit cleanup.
*/
ASSERT(mutex_testlock(&all_threads_synch_lock) &&
mutex_testlock(&thread_initexit_lock));
ASSERT(!doing_detach);
doing_detach = true;
#ifdef HOT_PATCHING_INTERFACE
/* In hotp_only mode, we must remove patches when detaching; we don't want
* to leave in all our hooks and detach; that will definitely crash the app.
*/
if (DYNAMO_OPTION(hotp_only))
hotp_only_detach_helper();
#endif
#ifdef WINDOWS
/* XXX: maybe we should re-check for additional threads that passed the init_apc
* lock but weren't yet initialized and so didn't show up on the list?
*/
LOG(GLOBAL, LOG_ALL, 1,
"Detach : about to unpatch ntdll.dll and fix memory permissions\n");
detach_remove_image_entry_hook(num_threads, threads);
if (!INTERNAL_OPTION(noasynch)) {
/* We have to do this here, before client exit events, as we're letting
* threads go native next. We thus will not detect crashes during client
* exit during detach.
*/
callback_interception_unintercept();
}
#endif
if (!DYNAMO_OPTION(thin_client))
revert_memory_regions();
#ifdef UNIX
unhook_vsyscall();
#endif
LOG(GLOBAL, LOG_ALL, 1,
"Detach : unpatched ntdll.dll and fixed memory permissions\n");
#ifdef WINDOWS
/* Release the APC init lock and let any threads waiting there go native */
LOG(GLOBAL, LOG_ALL, 1, "Detach : Releasing init_apc_go_native_pause\n");
init_apc_go_native_pause = false;
#endif
/* perform exit tasks that require full thread data structs */
dynamo_process_exit_with_thread_info();
#ifdef WINDOWS
/* We need to record a bool indicating whether we can free each thread's
* resources fully or whether we need them for callback cleanup.
*/
cleanup_tpc =
(bool *)global_heap_alloc(num_threads * sizeof(bool) HEAPACCT(ACCT_OTHER));
/* Handle any outstanding callbacks */
detach_stacked_callbacks = detach_handle_callbacks(num_threads, threads, cleanup_tpc);
#endif
LOG(GLOBAL, LOG_ALL, 1, "Detach: starting to translate contexts\n");
for (i = 0; i < num_threads; i++) {
priv_mcontext_t mc;
if (threads[i]->dcontext == my_dcontext) {
my_idx = i;
my_tr = threads[i];
continue;
} else if (IS_CLIENT_THREAD(threads[i]->dcontext)) {
/* i#297 we will kill client-owned threads later after app exit events
* in dynamo_shared_exit().
*/
continue;
} else if (detach_do_not_translate(threads[i])) {
LOG(GLOBAL, LOG_ALL, 2, "Detach: not translating " TIDFMT "\n",
threads[i]->id);
} else {
LOG(GLOBAL, LOG_ALL, 2, "Detach: translating " TIDFMT "\n", threads[i]->id);
DEBUG_DECLARE(ok =)
thread_get_mcontext(threads[i], &mc);
ASSERT(ok);
/* For a thread at a syscall, we use SA_RESTART for our suspend signal,
* so the kernel will adjust the restart point back to the syscall for us
* where expected. This is an artifical signal we're introducing, so an
* app that assumes no signals and assumes its non-auto-restart syscalls
* don't need loops could be broken.
*/
LOG(GLOBAL, LOG_ALL, 3,
/* Having the code bytes can help diagnose post-detach where the code
* cache is gone.
*/
"Detach: pre-xl8 pc=%p (%02x %02x %02x %02x %02x), xsp=%p "
"for thread " TIDFMT "\n",
mc.pc, *mc.pc, *(mc.pc + 1), *(mc.pc + 2), *(mc.pc + 3), *(mc.pc + 4),
mc.xsp, threads[i]->id);
DEBUG_DECLARE(ok =)
translate_mcontext(threads[i], &mc, true /*restore mem*/, NULL /*f*/);
ASSERT(ok);
if (!threads[i]->under_dynamo_control) {
LOG(GLOBAL, LOG_ALL, 1,
"Detach : thread " TIDFMT " already running natively\n",
threads[i]->id);
/* we do need to restore the app ret addr, for native_exec */
if (!DYNAMO_OPTION(thin_client) && DYNAMO_OPTION(native_exec) &&
!vmvector_empty(native_exec_areas)) {
put_back_native_retaddrs(threads[i]->dcontext);
}
}
detach_finalize_translation(threads[i], &mc);
LOG(GLOBAL, LOG_ALL, 1, "Detach: pc=" PFX " for thread " TIDFMT "\n", mc.pc,
threads[i]->id);
ASSERT(!is_dynamo_address(mc.pc) && !in_fcache(mc.pc));
/* XXX case 7457: if the thread is suspended after it received a fault
* but before the kernel copied the faulting context to the user mode
* structures for the handler, it could result in a codemod exception
* that wouldn't happen natively!
*/
DEBUG_DECLARE(ok =)
thread_set_mcontext(threads[i], &mc);
ASSERT(ok);
/* i#249: restore app's PEB/TEB fields */
IF_WINDOWS(restore_peb_pointer_for_thread(threads[i]->dcontext));
}
/* Resumes the thread, which will do kernel-visible cleanup of
* signal state. Resume happens within the synch_all region where
* the thread_initexit_lock is held so that we can clean up thread
* data later.
*/
#ifdef UNIX
os_signal_thread_detach(threads[i]->dcontext);
#endif
LOG(GLOBAL, LOG_ALL, 1, "Detach: thread " TIDFMT " is being resumed as native\n",
threads[i]->id);
os_thread_resume(threads[i]);
}
ASSERT(my_idx != -1 || !internal);
#ifdef UNIX
LOG(GLOBAL, LOG_ALL, 1, "Detach: waiting for threads to fully detach\n");
for (i = 0; i < num_threads; i++) {
if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext))
os_wait_thread_detached(threads[i]->dcontext);
}
#endif
if (!do_cleanup)
return;
/* Clean up each thread now that everyone has gone native. Needs to be
* done with the thread_initexit_lock held, which is true within a synched
* region.
*/
for (i = 0; i < num_threads; i++) {
if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext)) {
LOG(GLOBAL, LOG_ALL, 1, "Detach: cleaning up thread " TIDFMT " %s\n",
threads[i]->id, IF_WINDOWS_ELSE(cleanup_tpc[i] ? "and its TPC" : "", ""));
dynamo_other_thread_exit(threads[i] _IF_WINDOWS(!cleanup_tpc[i]));
}
}
if (my_idx != -1) {
/* pre-client thread cleanup (PR 536058) */
dynamo_thread_exit_pre_client(my_dcontext, my_tr->id);
}
LOG(GLOBAL, LOG_ALL, 1, "Detach: Letting slave threads go native\n");
#ifdef WINDOWS
global_heap_free(cleanup_tpc, num_threads * sizeof(bool) HEAPACCT(ACCT_OTHER));
/* XXX: there's a possible race if a thread waiting at APC is still there
* when we unload our dll.
*/
os_thread_yield();
#endif
end_synch_with_all_threads(threads, num_threads, false /*don't resume */);
threads = NULL;
LOG(GLOBAL, LOG_ALL, 1, "Detach: Entering final cleanup and unload\n");
SYSLOG_INTERNAL_INFO("Detaching from process, entering final cleanup");
if (drstats != NULL)
stats_get_snapshot(drstats);
DEBUG_DECLARE(exit_res =)
dynamo_shared_exit(my_tr _IF_WINDOWS(detach_stacked_callbacks));
ASSERT(exit_res == SUCCESS);
detach_finalize_cleanup();
stack_free(d_r_initstack, DYNAMORIO_STACK_SIZE);
dynamo_exit_post_detach();
doing_detach = false;
started_detach = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
dynamo_detaching_flag = LOCK_FREE_STATE;
EXITING_DR();
}
| 1 | 21,948 | Include the key word "translated". Otherwise the reader will find this confusing. | DynamoRIO-dynamorio | c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.