patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -0,0 +1,19 @@
+import { always } from 'ramda';
+
+/**
+ * A function that returns empty string.
+ *
+ * @func stubString
+ * @memberOf RA
+ * @since {@link https://char0n.github.io/ramda-adjunct/2.1.0|v2.1.0}
+ * @category Function
+ * @sig ... -> ''
+ * @return {''}
+ * @example
+ *
+ * RA.stubString(); //=> ''
+ * RA.stubString(1, 2, 3); //=> ''
+ */
+const stubString = always('');
+
+export default stubString; | 1 | 1 | 4,976 | Hindley-Milner signatures don't use literal types. They explusively use types. It's deal with that | char0n-ramda-adjunct | js |
|
@@ -61,3 +61,11 @@ type Priority struct {
PolicyPriority float64
RulePriority int32
}
+
+// OFPolicyRule groups all configurations that the openflow module needs to install flow for PolicyRule.
+type OFPolicyRule struct {
+ OfID uint32
+ OfRule *PolicyRule
+ NpName string
+ NpNamespace string
+} | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"github.com/vmware-tanzu/antrea/pkg/apis/networking/v1beta1"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
)
type AddressCategory uint8
const (
IPAddr AddressCategory = iota
IPNetAddr
OFPortAddr
)
type AddressType int
const (
SrcAddress AddressType = iota
DstAddress
)
type Address interface {
GetMatchValue() string
GetMatchKey(addrType AddressType) int
GetValue() interface{}
}
// PolicyRule groups configurations to set up conjunctive match for egress/ingress policy rules.
type PolicyRule struct {
Direction v1beta1.Direction
From []Address
To []Address
Service []v1beta1.Service
Action *secv1alpha1.RuleAction
Priority *uint16
}
func (r *PolicyRule) IsAntreaNetworkPolicyRule() bool {
return r.Priority != nil
}
// Priority is a struct that is composed of CNP priority, rule priority and
// tier/category priority in the future. It is used as the basic unit for
// priority sorting.
type Priority struct {
PolicyPriority float64
RulePriority int32
}
| 1 | 19,781 | Could we just extend PolicyRule with more fields? It was originally designed as the struct required by openflow client, nested structs seem not helping. And for the name of the fields, initialism should be uppercase, OFID and NPName doesn't look good, then maybe FlowID, PolicyName.. | antrea-io-antrea | go |
@@ -485,9 +485,13 @@ class Functions
// bcmath
'bcscale',
-
+
// json
'json_last_error',
+
+ // opcache
+ 'opcache_compile_file', 'opcache_get_configuration', 'opcache_get_status',
+ 'opcache_invalidate', 'opcache_is_script_cached', 'opcache_reset',
];
if (\in_array(strtolower($function_id), $impure_functions, true)) { | 1 | <?php
namespace Psalm\Internal\Codebase;
use function array_shift;
use function explode;
use function implode;
use Psalm\Codebase;
use Psalm\Internal\Analyzer\StatementsAnalyzer;
use Psalm\Internal\Provider\FileStorageProvider;
use Psalm\Internal\Provider\FunctionExistenceProvider;
use Psalm\Internal\Provider\FunctionParamsProvider;
use Psalm\Internal\Provider\FunctionReturnTypeProvider;
use Psalm\Internal\Type\Comparator\CallableTypeComparator;
use Psalm\StatementsSource;
use Psalm\Storage\FunctionStorage;
use function strpos;
use function strtolower;
use function substr;
use Psalm\Type\Atomic\TNamedObject;
use Psalm\Internal\MethodIdentifier;
use function rtrim;
use function is_bool;
/**
* @internal
*/
class Functions
{
/**
* @var FileStorageProvider
*/
private $file_storage_provider;
/**
* @var array<lowercase-string, FunctionStorage>
*/
private static $stubbed_functions;
/** @var FunctionReturnTypeProvider */
public $return_type_provider;
/** @var FunctionExistenceProvider */
public $existence_provider;
/** @var FunctionParamsProvider */
public $params_provider;
/**
* @var Reflection
*/
private $reflection;
public function __construct(FileStorageProvider $storage_provider, Reflection $reflection)
{
$this->file_storage_provider = $storage_provider;
$this->reflection = $reflection;
$this->return_type_provider = new FunctionReturnTypeProvider();
$this->existence_provider = new FunctionExistenceProvider();
$this->params_provider = new FunctionParamsProvider();
self::$stubbed_functions = [];
}
/**
* @param non-empty-lowercase-string $function_id
*/
public function getStorage(
?StatementsAnalyzer $statements_analyzer,
string $function_id,
?string $root_file_path = null,
?string $checked_file_path = null
) : FunctionStorage {
if ($function_id[0] === '\\') {
$function_id = substr($function_id, 1);
}
if (isset(self::$stubbed_functions[$function_id])) {
return self::$stubbed_functions[$function_id];
}
$file_storage = null;
if ($statements_analyzer) {
$root_file_path = $statements_analyzer->getRootFilePath();
$checked_file_path = $statements_analyzer->getFilePath();
$file_storage = $this->file_storage_provider->get($root_file_path);
$function_analyzers = $statements_analyzer->getFunctionAnalyzers();
if (isset($function_analyzers[$function_id])) {
$function_id = $function_analyzers[$function_id]->getFunctionId();
if (isset($file_storage->functions[$function_id])) {
return $file_storage->functions[$function_id];
}
}
// closures can be returned here
if (isset($file_storage->functions[$function_id])) {
return $file_storage->functions[$function_id];
}
}
if (!$root_file_path || !$checked_file_path) {
if ($this->reflection->hasFunction($function_id)) {
return $this->reflection->getFunctionStorage($function_id);
}
throw new \UnexpectedValueException(
'Expecting non-empty $root_file_path and $checked_file_path'
);
}
if ($this->reflection->hasFunction($function_id)) {
return $this->reflection->getFunctionStorage($function_id);
}
if (!isset($file_storage->declaring_function_ids[$function_id])) {
if ($checked_file_path !== $root_file_path) {
$file_storage = $this->file_storage_provider->get($checked_file_path);
if (isset($file_storage->functions[$function_id])) {
return $file_storage->functions[$function_id];
}
}
throw new \UnexpectedValueException(
'Expecting ' . $function_id . ' to have storage in ' . $checked_file_path
);
}
$declaring_file_path = $file_storage->declaring_function_ids[$function_id];
$declaring_file_storage = $this->file_storage_provider->get($declaring_file_path);
if (!isset($declaring_file_storage->functions[$function_id])) {
throw new \UnexpectedValueException(
'Not expecting ' . $function_id . ' to not have storage in ' . $declaring_file_path
);
}
return $declaring_file_storage->functions[$function_id];
}
public function addGlobalFunction(string $function_id, FunctionStorage $storage): void
{
self::$stubbed_functions[strtolower($function_id)] = $storage;
}
public function hasStubbedFunction(string $function_id): bool
{
return isset(self::$stubbed_functions[strtolower($function_id)]);
}
/**
* @return array<string, FunctionStorage>
*/
public function getAllStubbedFunctions(): array
{
return self::$stubbed_functions;
}
/**
* @param lowercase-string $function_id
*/
public function functionExists(
StatementsAnalyzer $statements_analyzer,
string $function_id
): bool {
if ($this->existence_provider->has($function_id)) {
$function_exists = $this->existence_provider->doesFunctionExist($statements_analyzer, $function_id);
if ($function_exists !== null) {
return $function_exists;
}
}
$file_storage = $this->file_storage_provider->get($statements_analyzer->getRootFilePath());
if (isset($file_storage->declaring_function_ids[$function_id])) {
return true;
}
if ($this->reflection->hasFunction($function_id)) {
return true;
}
if (isset(self::$stubbed_functions[$function_id])) {
return true;
}
if (isset($statements_analyzer->getFunctionAnalyzers()[$function_id])) {
return true;
}
$predefined_functions = $statements_analyzer->getCodebase()->config->getPredefinedFunctions();
if (isset($predefined_functions[$function_id])) {
/** @psalm-suppress ArgumentTypeCoercion */
if ($this->reflection->registerFunction($function_id) === false) {
return false;
}
return true;
}
return false;
}
/**
* @param non-empty-string $function_name
*
* @return non-empty-string
*/
public function getFullyQualifiedFunctionNameFromString(string $function_name, StatementsSource $source): string
{
if ($function_name[0] === '\\') {
$function_name = substr($function_name, 1);
if ($function_name === '') {
throw new \UnexpectedValueException('Malformed function name');
}
return $function_name;
}
$function_name_lcase = strtolower($function_name);
$aliases = $source->getAliases();
$imported_function_namespaces = $aliases->functions;
$imported_namespaces = $aliases->uses;
if (strpos($function_name, '\\') !== false) {
$function_name_parts = explode('\\', $function_name);
$first_namespace = array_shift($function_name_parts);
$first_namespace_lcase = strtolower($first_namespace);
if (isset($imported_namespaces[$first_namespace_lcase])) {
return $imported_namespaces[$first_namespace_lcase] . '\\' . implode('\\', $function_name_parts);
}
if (isset($imported_function_namespaces[$first_namespace_lcase])) {
return $imported_function_namespaces[$first_namespace_lcase] . '\\' .
implode('\\', $function_name_parts);
}
} elseif (isset($imported_function_namespaces[$function_name_lcase])) {
return $imported_function_namespaces[$function_name_lcase];
}
$namespace = $source->getNamespace();
return ($namespace ? $namespace . '\\' : '') . $function_name;
}
/**
* @return array<lowercase-string,FunctionStorage>
*/
public function getMatchingFunctionNames(
string $stub,
int $offset,
string $file_path,
Codebase $codebase
) : array {
if ($stub[0] === '*') {
$stub = substr($stub, 1);
}
$fully_qualified = false;
if ($stub[0] === '\\') {
$fully_qualified = true;
$stub = substr($stub, 1);
$stub_namespace = '';
} else {
// functions can reference either the current namespace or root-namespaced
// equivalents. We therefore want to make both candidates.
[$stub_namespace, $stub] = explode('-', $stub);
}
/** @var array<lowercase-string, FunctionStorage> */
$matching_functions = [];
$file_storage = $this->file_storage_provider->get($file_path);
$current_namespace_aliases = null;
foreach ($file_storage->namespace_aliases as $namespace_start => $namespace_aliases) {
if ($namespace_start < $offset) {
$current_namespace_aliases = $namespace_aliases;
break;
}
}
// We will search all functions for several patterns. This will
// be for all used namespaces, the global namespace and matched
// used functions.
$match_function_patterns = [
$stub . '*',
];
if ($stub_namespace) {
$match_function_patterns[] = $stub_namespace . '\\' . $stub . '*';
}
if ($current_namespace_aliases) {
foreach ($current_namespace_aliases->functions as $alias_name => $function_name) {
if (strpos($alias_name, $stub) === 0) {
try {
$match_function_patterns[] = $function_name;
} catch (\Exception $e) {
}
}
}
if (!$fully_qualified) {
foreach ($current_namespace_aliases->uses as $namespace_name) {
$match_function_patterns[] = $namespace_name . '\\' . $stub . '*';
}
}
}
$function_map = $file_storage->functions
+ $this->getAllStubbedFunctions()
+ $this->reflection->getFunctions()
+ $codebase->config->getPredefinedFunctions();
foreach ($function_map as $function_name => $function) {
foreach ($match_function_patterns as $pattern) {
$pattern_lc = \strtolower($pattern);
if (substr($pattern, -1, 1) === '*') {
if (strpos($function_name, rtrim($pattern_lc, '*')) !== 0) {
continue;
}
} elseif ($function_name !== $pattern) {
continue;
}
if (is_bool($function)) {
/** @var callable-string $function_name */
if ($this->reflection->registerFunction($function_name) === false) {
continue;
}
$function = $this->reflection->getFunctionStorage($function_name);
}
if ($function->cased_name) {
$cased_name_parts = \explode('\\', $function->cased_name);
$pattern_parts = \explode('\\', $pattern);
if (\end($cased_name_parts)[0] !== \end($pattern_parts)[0]) {
continue;
}
}
/** @var lowercase-string $function_name */
$matching_functions[$function_name] = $function;
}
}
return $matching_functions;
}
public static function isVariadic(Codebase $codebase, string $function_id, string $file_path): bool
{
$file_storage = $codebase->file_storage_provider->get($file_path);
if (!isset($file_storage->declaring_function_ids[$function_id])) {
return false;
}
$declaring_file_path = $file_storage->declaring_function_ids[$function_id];
$file_storage = $declaring_file_path === $file_path
? $file_storage
: $codebase->file_storage_provider->get($declaring_file_path);
return isset($file_storage->functions[$function_id]) && $file_storage->functions[$function_id]->variadic;
}
/**
* @param ?list<\PhpParser\Node\Arg> $args
*/
public function isCallMapFunctionPure(
Codebase $codebase,
?\Psalm\NodeTypeProvider $type_provider,
string $function_id,
?array $args,
bool &$must_use = true
) : bool {
$impure_functions = [
// file io
'chdir', 'chgrp', 'chmod', 'chown', 'chroot', 'copy', 'file_get_contents', 'file_put_contents',
'opendir', 'readdir', 'closedir', 'rewinddir', 'scandir',
'fopen', 'fread', 'fwrite', 'fclose', 'touch', 'fpassthru', 'fputs', 'fscanf', 'fseek', 'flock',
'ftruncate', 'fprintf', 'symlink', 'mkdir', 'unlink', 'rename', 'rmdir', 'popen', 'pclose',
'fgetcsv', 'fputcsv', 'umask', 'finfo_open', 'finfo_close', 'finfo_file', 'readline_add_history',
'stream_set_timeout', 'fgets', 'fflush', 'move_uploaded_file', 'file_exists', 'realpath', 'glob',
'is_readable', 'is_dir', 'is_file',
// stream/socket io
'stream_context_set_option', 'socket_write', 'stream_set_blocking', 'socket_close',
'socket_set_option', 'stream_set_write_buffer', 'stream_socket_enable_crypto', 'stream_copy_to_stream',
'stream_wrapper_register',
// meta calls
'call_user_func', 'call_user_func_array', 'define', 'create_function',
// http
'header', 'header_remove', 'http_response_code', 'setcookie',
// output buffer
'ob_start', 'ob_end_clean', 'readfile', 'printf', 'var_dump', 'phpinfo',
'ob_implicit_flush', 'vprintf',
// mcrypt
'mcrypt_generic_init', 'mcrypt_generic_deinit', 'mcrypt_module_close',
// internal optimisation
'opcache_compile_file', 'clearstatcache',
// process-related
'pcntl_signal', 'posix_kill', 'cli_set_process_title', 'pcntl_async_signals', 'proc_close',
'proc_nice', 'proc_open', 'proc_terminate',
// curl
'curl_setopt', 'curl_close', 'curl_multi_add_handle', 'curl_multi_remove_handle',
'curl_multi_select', 'curl_multi_close', 'curl_setopt_array',
// apc, apcu
'apc_store', 'apc_delete', 'apc_clear_cache', 'apc_add', 'apc_inc', 'apc_dec', 'apc_cas',
'apcu_store', 'apcu_delete', 'apcu_clear_cache', 'apcu_add', 'apcu_inc', 'apcu_dec', 'apcu_cas',
// gz
'gzwrite', 'gzrewind', 'gzseek', 'gzclose',
// newrelic
'newrelic_start_transaction', 'newrelic_name_transaction', 'newrelic_add_custom_parameter',
'newrelic_add_custom_tracer', 'newrelic_background_job', 'newrelic_end_transaction',
'newrelic_set_appname',
// execution
'shell_exec', 'exec', 'system', 'passthru', 'pcntl_exec',
// well-known functions
'libxml_use_internal_errors', 'libxml_disable_entity_loader', 'curl_exec',
'mt_srand', 'openssl_pkcs7_sign',
'mt_rand', 'rand', 'random_int', 'random_bytes',
'wincache_ucache_delete', 'wincache_ucache_set', 'wincache_ucache_inc',
'class_alias',
'class_exists', // impure by virtue of triggering autoloader
// php environment
'ini_set', 'sleep', 'usleep', 'register_shutdown_function',
'error_reporting', 'register_tick_function', 'unregister_tick_function',
'set_error_handler', 'user_error', 'trigger_error', 'restore_error_handler',
'date_default_timezone_set', 'assert_options', 'setlocale',
'set_exception_handler', 'set_time_limit', 'putenv', 'spl_autoload_register',
'spl_autoload_unregister', 'microtime', 'array_rand',
// logging
'openlog', 'syslog', 'error_log', 'define_syslog_variables',
// session
'session_id', 'session_decode', 'session_name', 'session_set_cookie_params',
'session_set_save_handler', 'session_regenerate_id', 'mb_internal_encoding',
'session_start', 'session_cache_limiter',
// ldap
'ldap_set_option',
// iterators
'rewind', 'iterator_apply', 'iterator_to_array',
// mysqli
'mysqli_select_db', 'mysqli_dump_debug_info', 'mysqli_kill', 'mysqli_multi_query',
'mysqli_next_result', 'mysqli_options', 'mysqli_ping', 'mysqli_query', 'mysqli_report',
'mysqli_rollback', 'mysqli_savepoint', 'mysqli_set_charset', 'mysqli_ssl_set', 'mysqli_close',
// script execution
'ignore_user_abort',
// ftp
'ftp_close',
// bcmath
'bcscale',
// json
'json_last_error',
];
if (\in_array(strtolower($function_id), $impure_functions, true)) {
return false;
}
if (strpos($function_id, 'image') === 0) {
return false;
}
if (($function_id === 'var_export' || $function_id === 'print_r') && !isset($args[1])) {
return false;
}
if ($function_id === 'assert') {
$must_use = false;
return true;
}
if ($function_id === 'func_num_args' || $function_id === 'func_get_args') {
return true;
}
if ($function_id === 'count' && isset($args[0]) && $type_provider) {
$count_type = $type_provider->getType($args[0]->value);
if ($count_type) {
foreach ($count_type->getAtomicTypes() as $atomic_count_type) {
if ($atomic_count_type instanceof TNamedObject) {
$count_method_id = new MethodIdentifier(
$atomic_count_type->value,
'count'
);
try {
$method_storage = $codebase->methods->getStorage($count_method_id);
return $method_storage->mutation_free;
} catch (\Exception $e) {
// do nothing
}
}
}
}
}
$function_callable = InternalCallMapHandler::getCallableFromCallMapById(
$codebase,
$function_id,
$args ?: [],
null
);
if (!$function_callable->params
|| ($args !== null && \count($args) === 0)
|| ($function_callable->return_type && $function_callable->return_type->isVoid())
) {
return false;
}
$must_use = $function_id !== 'array_map'
|| (isset($args[0]) && !$args[0]->value instanceof \PhpParser\Node\Expr\Closure);
foreach ($function_callable->params as $i => $param) {
if ($type_provider && $param->type && $param->type->hasCallableType() && isset($args[$i])) {
$arg_type = $type_provider->getType($args[$i]->value);
if ($arg_type) {
foreach ($arg_type->getAtomicTypes() as $possible_callable) {
$possible_callable = CallableTypeComparator::getCallableFromAtomic(
$codebase,
$possible_callable
);
if ($possible_callable && !$possible_callable->is_pure) {
return false;
}
}
}
}
if ($param->by_ref && isset($args[$i])) {
$must_use = false;
}
}
return true;
}
public static function clearCache() : void
{
self::$stubbed_functions = [];
}
}
| 1 | 10,413 | I really dislike this alignment approach, as it makes identifying individual functions quite hard, but I kept consistent with the previous style. Since no sorting was evident, I pushed it to the end of the block. Meanwhile, is this supposed to be tested or meaningfully testable somehow? | vimeo-psalm | php |
@@ -127,6 +127,10 @@ class StepDelegatingExecutor(Executor):
running_steps[step.key] = step
last_check_step_health_time = pendulum.now("UTC")
+
+ # Order of events is important here. During an interation, we call handle_event, then get_steps_to_execute,
+ # then is_complete. get_steps_to_execute updates the state of ActiveExecution, and without it
+ # is_complete can return true when we're just between steps.
while not active_execution.is_complete:
if active_execution.check_for_interrupts(): | 1 | import time
from typing import Dict, List, Optional, cast
import pendulum
from dagster import check
from dagster.core.events import DagsterEvent, EngineEventData, EventMetadataEntry, log_step_event
from dagster.core.execution.context.system import PlanOrchestrationContext
from dagster.core.execution.plan.plan import ExecutionPlan
from dagster.core.execution.plan.step import ExecutionStep
from dagster.core.execution.retries import RetryMode
from dagster.core.executor.step_delegating.step_handler.base import StepHandler, StepHandlerContext
from dagster.grpc.types import ExecuteStepArgs
from ..base import Executor
class StepDelegatingExecutor(Executor):
def __init__(
self,
step_handler: StepHandler,
retries: RetryMode,
sleep_seconds: Optional[float] = None,
check_step_health_interval_seconds: Optional[int] = None,
):
self._step_handler = step_handler
self._retries = retries
self._sleep_seconds = cast(
float, check.opt_float_param(sleep_seconds, "sleep_seconds", default=0.1)
)
self._check_step_health_interval_seconds = cast(
int,
check.opt_int_param(
check_step_health_interval_seconds, "check_step_health_interval_seconds", default=20
),
)
@property
def retries(self):
return self._retries
def _pop_events(self, instance, run_id) -> List[DagsterEvent]:
events = instance.logs_after(run_id, self._event_cursor)
self._event_cursor += len(events)
return [event.dagster_event for event in events if event.is_dagster_event]
def _get_step_handler_context(
self, plan_context, steps, active_execution
) -> StepHandlerContext:
return StepHandlerContext(
instance=plan_context.plan_data.instance,
execute_step_args=ExecuteStepArgs(
pipeline_origin=plan_context.reconstructable_pipeline.get_python_origin(),
pipeline_run_id=plan_context.pipeline_run.run_id,
step_keys_to_execute=[step.key for step in steps],
instance_ref=plan_context.plan_data.instance.get_ref(),
retry_mode=self.retries.for_inner_plan(),
known_state=active_execution.get_known_state(),
),
step_tags={step.key: step.tags for step in steps},
pipeline_run=plan_context.pipeline_run,
)
def _log_new_events(self, events, plan_context, running_steps):
# Note: this could lead to duplicated events if the returned events were already logged
# (they shouldn't be)
for event in events:
log_step_event(
plan_context.for_step(running_steps[event.step_key]),
event,
)
def execute(self, plan_context: PlanOrchestrationContext, execution_plan: ExecutionPlan):
check.inst_param(plan_context, "plan_context", PlanOrchestrationContext)
check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
self._event_cursor = -1 # pylint: disable=attribute-defined-outside-init
yield DagsterEvent.engine_event(
plan_context,
f"Starting execution with step handler {self._step_handler.name}",
EngineEventData(),
)
with execution_plan.start(retry_mode=self.retries) as active_execution:
running_steps: Dict[str, ExecutionStep] = {}
if plan_context.resume_from_failure:
yield DagsterEvent.engine_event(
plan_context,
"Resuming execution from failure",
EngineEventData(),
)
prior_events = self._pop_events(
plan_context.instance,
plan_context.run_id,
)
for dagster_event in prior_events:
yield dagster_event
possibly_in_flight_steps = active_execution.rebuild_from_events(prior_events)
for step in possibly_in_flight_steps:
yield DagsterEvent.engine_event(
plan_context,
"Checking on status of possibly launched steps",
EngineEventData(),
step.handle,
)
# TODO: check if failure event included. For now, hacky assumption that
# we don't log anything on successful check
if self._step_handler.check_step_health(
self._get_step_handler_context(plan_context, [step], active_execution)
):
# health check failed, launch the step
self._log_new_events(
self._step_handler.launch_step(
self._get_step_handler_context(
plan_context, [step], active_execution
)
),
plan_context,
{step.key: step for step in possibly_in_flight_steps},
)
running_steps[step.key] = step
last_check_step_health_time = pendulum.now("UTC")
while not active_execution.is_complete:
if active_execution.check_for_interrupts():
if not plan_context.instance.run_will_resume(plan_context.run_id):
yield DagsterEvent.engine_event(
plan_context,
"Executor received termination signal, forwarding to steps",
EngineEventData.interrupted(list(running_steps.keys())),
)
active_execution.mark_interrupted()
for _, step in running_steps.items():
self._log_new_events(
self._step_handler.terminate_step(
self._get_step_handler_context(
plan_context, [step], active_execution
)
),
plan_context,
running_steps,
)
else:
yield DagsterEvent.engine_event(
plan_context,
"Executor received termination signal, not forwarding to steps because "
"run will be resumed",
EngineEventData(
metadata_entries=[
EventMetadataEntry.text(
str(running_steps.keys()), "steps_in_flight"
)
]
),
)
active_execution.mark_interrupted()
return
curr_time = pendulum.now("UTC")
if (
curr_time - last_check_step_health_time
).total_seconds() >= self._check_step_health_interval_seconds:
last_check_step_health_time = curr_time
for _, step in running_steps.items():
self._log_new_events(
self._step_handler.check_step_health(
self._get_step_handler_context(
plan_context, [step], active_execution
)
),
plan_context,
running_steps,
)
for step in active_execution.get_steps_to_execute():
running_steps[step.key] = step
self._log_new_events(
self._step_handler.launch_step(
self._get_step_handler_context(plan_context, [step], active_execution)
),
plan_context,
running_steps,
)
# process skips from failures or uncovered inputs
for event in active_execution.plan_events_iterator(plan_context):
yield event
for dagster_event in self._pop_events(
plan_context.instance,
plan_context.run_id,
): # type: ignore
yield dagster_event
active_execution.handle_event(dagster_event)
if (
dagster_event.is_step_success
or dagster_event.is_step_failure
or dagster_event.is_step_skipped
):
assert isinstance(dagster_event.step_key, str)
del running_steps[dagster_event.step_key]
active_execution.verify_complete(plan_context, dagster_event.step_key)
time.sleep(self._sleep_seconds)
| 1 | 16,961 | Could also consider modifying the ActiveExecution... | dagster-io-dagster | py |
@@ -1522,3 +1522,19 @@ def get_iterating_dictionary_name(
return node.iter.as_string()
return None
+
+
+def get_subscript_const_value(node: astroid.Subscript) -> astroid.Const:
+ """
+ Returns the value (subscript.slice) of a Subscript node,
+ also supports python <3.9 windows where node.slice might be an Index
+ node
+ """
+ value = node.slice
+ if isinstance(value, astroid.Index):
+ value = value.value
+ inferred = safe_infer(value)
+ if not isinstance(inferred, astroid.Const):
+ raise ValueError("Subscript.slice cannot be inferred as an astroid.Const")
+
+ return inferred | 1 | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016-2017 Moises Lopez <[email protected]>
# Copyright (c) 2016 Brian C. Lane <[email protected]>
# Copyright (c) 2017-2018, 2020 hippo91 <[email protected]>
# Copyright (c) 2017 ttenhoeve-aa <[email protected]>
# Copyright (c) 2018 Alan Chan <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Yury Gribov <[email protected]>
# Copyright (c) 2018 Caio Carrara <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Brian Shaginaw <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Matthijs Blom <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Nathan Marrow <[email protected]>
# Copyright (c) 2019 Svet <[email protected]>
# Copyright (c) 2019 Pascal Corpet <[email protected]>
# Copyright (c) 2020 Batuhan Taskaya <[email protected]>
# Copyright (c) 2020 Luigi <[email protected]>
# Copyright (c) 2020 ethan-leba <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Ram Rachum <[email protected]>
# Copyright (c) 2020 Slavfox <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""some functions that may be useful for various checkers
"""
import builtins
import itertools
import numbers
import re
import string
from functools import lru_cache, partial
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Match,
Optional,
Set,
Tuple,
Union,
)
import _string
import astroid
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
EXCEPTIONS_MODULE = "builtins"
ABC_MODULES = {"abc", "_py_abc"}
ABC_METHODS = {
"abc.abstractproperty",
"abc.abstractmethod",
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
}
TYPING_PROTOCOLS = frozenset(
{"typing.Protocol", "typing_extensions.Protocol", ".Protocol"}
)
ITER_METHOD = "__iter__"
AITER_METHOD = "__aiter__"
NEXT_METHOD = "__next__"
GETITEM_METHOD = "__getitem__"
CLASS_GETITEM_METHOD = "__class_getitem__"
SETITEM_METHOD = "__setitem__"
DELITEM_METHOD = "__delitem__"
CONTAINS_METHOD = "__contains__"
KEYS_METHOD = "keys"
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ("__new__", "__init__", "__call__"),
0: (
"__del__",
"__repr__",
"__str__",
"__bytes__",
"__hash__",
"__bool__",
"__dir__",
"__len__",
"__length_hint__",
"__iter__",
"__reversed__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__float__",
"__index__",
"__trunc__",
"__floor__",
"__ceil__",
"__enter__",
"__aenter__",
"__getnewargs_ex__",
"__getnewargs__",
"__getstate__",
"__reduce__",
"__copy__",
"__unicode__",
"__nonzero__",
"__await__",
"__aiter__",
"__anext__",
"__fspath__",
),
1: (
"__format__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__getattr__",
"__getattribute__",
"__delattr__",
"__delete__",
"__instancecheck__",
"__subclasscheck__",
"__getitem__",
"__missing__",
"__delitem__",
"__contains__",
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__divmod__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__radd__",
"__rsub__",
"__rmul__",
"__rtruediv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__rlshift__",
"__rrshift__",
"__rand__",
"__rxor__",
"__ror__",
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__ipow__",
"__setstate__",
"__reduce_ex__",
"__deepcopy__",
"__cmp__",
"__matmul__",
"__rmatmul__",
"__imatmul__",
"__div__",
),
2: ("__setattr__", "__get__", "__set__", "__setitem__", "__set_name__"),
3: ("__exit__", "__aexit__"),
(0, 1): ("__round__",),
(1, 2): ("__pow__",),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods # type: ignore
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
SUBSCRIPTABLE_CLASSES_PEP585 = frozenset(
(
"builtins.tuple",
"builtins.list",
"builtins.dict",
"builtins.set",
"builtins.frozenset",
"builtins.type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"_collections_abc.Awaitable",
"_collections_abc.Coroutine",
"_collections_abc.AsyncIterable",
"_collections_abc.AsyncIterator",
"_collections_abc.AsyncGenerator",
"_collections_abc.Iterable",
"_collections_abc.Iterator",
"_collections_abc.Generator",
"_collections_abc.Reversible",
"_collections_abc.Container",
"_collections_abc.Collection",
"_collections_abc.Callable",
"_collections_abc.Set",
"_collections_abc.MutableSet",
"_collections_abc.Mapping",
"_collections_abc.MutableMapping",
"_collections_abc.Sequence",
"_collections_abc.MutableSequence",
"_collections_abc.ByteString",
"_collections_abc.MappingView",
"_collections_abc.KeysView",
"_collections_abc.ItemsView",
"_collections_abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
)
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def is_inside_lambda(node: astroid.node_classes.NodeNG) -> bool:
"""Return true if given node is inside lambda"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Lambda):
return True
parent = parent.parent
return False
def get_all_elements(
node: astroid.node_classes.NodeNG,
) -> Iterable[astroid.node_classes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
yield from get_all_elements(child)
else:
yield node
def clobber_in_except(
node: astroid.node_classes.NodeNG,
) -> Tuple[bool, Optional[Tuple[str, str]]]:
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssignAttr):
return True, (node.attrname, f"object {node.expr.as_string()!r}")
if isinstance(node, astroid.AssignName):
name = node.name
if is_builtin(name):
return True, (name, "builtins")
stmts = node.lookup(name)[1]
if stmts and not isinstance(
stmts[0].assign_type(),
(astroid.Assign, astroid.AugAssign, astroid.ExceptHandler),
):
return True, (name, "outer scope (line %s)" % stmts[0].fromlineno)
return False, None
def is_super(node: astroid.node_classes.NodeNG) -> bool:
"""return True if the node is referencing the "super" builtin function"""
if getattr(node, "name", None) == "super" and node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node: astroid.scoped_nodes.FunctionDef) -> bool:
"""Return true if the given function node only raises an exception"""
return len(node.body) == 1 and isinstance(node.body[0], astroid.Raise)
builtins = builtins.__dict__.copy() # type: ignore
SPECIAL_BUILTINS = ("__builtins__",) # '__path__', '__file__')
def is_builtin_object(node: astroid.node_classes.NodeNG) -> bool:
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name: str) -> bool:
"""return true if <name> could be considered as a builtin defined by python"""
return name in builtins or name in SPECIAL_BUILTINS # type: ignore
def is_defined_in_scope(
var_node: astroid.node_classes.NodeNG,
varname: str,
scope: astroid.node_classes.NodeNG,
) -> bool:
if isinstance(scope, astroid.If):
for node in scope.body:
if (
isinstance(node, astroid.Assign)
and any(
isinstance(target, astroid.AssignName) and target.name == varname
for target in node.targets
)
) or (isinstance(node, astroid.Nonlocal) and varname in node.names):
return True
elif isinstance(scope, (COMP_NODE_TYPES, astroid.For)):
for ass_node in scope.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(scope, astroid.With):
for expr, ids in scope.items:
if expr.parent_of(var_node):
break
if ids and isinstance(ids, astroid.AssignName) and ids.name == varname:
return True
elif isinstance(scope, (astroid.Lambda, astroid.FunctionDef)):
if scope.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if scope.args.parent_of(var_node):
try:
scope.args.default_value(varname)
scope = scope.parent
is_defined_in_scope(var_node, varname, scope)
except astroid.NoDefault:
pass
return True
if getattr(scope, "name", None) == varname:
return True
elif isinstance(scope, astroid.ExceptHandler):
if isinstance(scope.name, astroid.AssignName):
ass_node = scope.name
if ass_node.name == varname:
return True
return False
def is_defined_before(var_node: astroid.Name) -> bool:
"""Check if the given variable node is defined before
Verify that the variable node is defined by a parent node
(list, set, dict, or generator comprehension, lambda)
or in a previous sibling node on the same line
(statement_defining ; statement_using).
"""
varname = var_node.name
_node = var_node.parent
while _node:
if is_defined_in_scope(var_node, varname, _node):
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for assign_node in _node.nodes_of_class(astroid.AssignName):
if assign_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_default_argument(
node: astroid.node_classes.NodeNG,
scope: Optional[astroid.node_classes.NodeNG] = None,
) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
if not scope:
scope = node.scope()
if isinstance(scope, (astroid.FunctionDef, astroid.Lambda)):
for default_node in scope.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(
astroid.Lambda,
astroid.scoped_nodes.ComprehensionScope,
astroid.scoped_nodes.ListComp,
),
):
break
parent = parent.parent
return False
def is_ancestor_name(
frame: astroid.ClassDef, node: astroid.node_classes.NodeNG
) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
if not isinstance(frame, astroid.ClassDef):
return False
for base in frame.bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node: astroid.node_classes.NodeNG) -> astroid.node_classes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node"""
while node and isinstance(node, (astroid.AssignName, astroid.Tuple, astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node: astroid.ClassDef, name: str) -> bool:
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(
format_string: str,
) -> Tuple[Set[str], int, Dict[str, str], List[str]]:
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
key_types = dict()
pos_types = []
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
flags = "diouxXeEfFgGcrs%a"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(format_string) -> Tuple[str, Iterable[Tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string)
except ValueError as e:
raise IncompleteFormatString() from e
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
"""Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
yield from collect_string_fields(nested)
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) from exc
def parse_format_method_string(
format_string: str,
) -> Tuple[List[Tuple[str, List[Tuple[bool, str]]]], int, int]:
"""
Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args),
where keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
explicit_pos_args.add(str(keyname))
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError as e:
raise IncompleteFormatString() from e
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(node: astroid.node_classes.NodeNG) -> Optional[astroid.ClassDef]:
"""Return the class that is wrapping the given node
The function returns a class for a method node (or a staticmethod or a
classmethod), otherwise it returns `None`.
"""
klass = node.frame()
nodes_to_check = (
astroid.node_classes.NodeNG,
astroid.UnboundMethod,
astroid.BaseInstance,
)
while (
klass
and isinstance(klass, nodes_to_check)
and not isinstance(klass, astroid.ClassDef)
):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: astroid.Call, position: int = None, keyword: str = None
) -> astroid.Name:
"""Returns the specified argument from a function call.
:param astroid.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node: astroid.node_classes.NodeNG) -> bool:
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
ancestors = node.ancestors() if hasattr(node, "ancestors") else []
for ancestor in itertools.chain([node], ancestors):
if (
ancestor.name in ("Exception", "BaseException")
and ancestor.root().name == EXCEPTIONS_MODULE
):
return True
return False
def error_of_type(handler: astroid.ExceptHandler, error_type) -> bool:
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,) # type: ignore
expected_errors = {stringify_error(error) for error in error_type} # type: ignore
if not handler.type:
return False
return handler.catch(expected_errors)
def decorated_with_property(node: astroid.FunctionDef) -> bool:
"""Detect if the given function node is decorated with a property."""
if not node.decorators:
return False
for decorator in node.decorators.nodes:
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_kind(node, *kinds):
if not isinstance(node, (astroid.UnboundMethod, astroid.FunctionDef)):
return False
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Attribute) and decorator.attrname in kinds:
return True
return False
def is_property_setter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is a property setter"""
return _is_property_kind(node, "setter")
def is_property_deleter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is a property deleter"""
return _is_property_kind(node, "deleter")
def is_property_setter_or_deleter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is either a property setter or a deleter"""
return _is_property_kind(node, "setter", "deleter")
def _is_property_decorator(decorator: astroid.Name) -> bool:
for inferred in decorator.infer():
if isinstance(inferred, astroid.ClassDef):
if inferred.root().name == BUILTINS_NAME and inferred.name == "property":
return True
for ancestor in inferred.ancestors():
if (
ancestor.name == "property"
and ancestor.root().name == BUILTINS_NAME
):
return True
return False
def decorated_with(
func: Union[astroid.FunctionDef, astroid.BoundMethod, astroid.UnboundMethod],
qnames: Iterable[str],
) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
if isinstance(decorator_node, astroid.Call):
# We only want to infer the function name
decorator_node = decorator_node.func
try:
if any(
i is not None and i.qname() in qnames or i.name in qnames
for i in decorator_node.infer()
):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: astroid.ClassDef, is_abstract_cb: astroid.FunctionDef = None
) -> Dict[str, astroid.node_classes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited: Dict[str, astroid.node_classes.NodeNG] = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
inferred = obj
if isinstance(obj, astroid.AssignName):
inferred = safe_infer(obj)
if not inferred:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(inferred, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(inferred, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(inferred)
if abstract:
visited[obj.name] = inferred
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: astroid.node_classes.NodeNG,
) -> Optional[Union[astroid.ExceptHandler, astroid.TryExcept]]:
"""Return the ExceptHandler or the TryExcept node in which the node is."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def find_except_wrapper_node_in_scope(
node: astroid.node_classes.NodeNG,
) -> Optional[Union[astroid.ExceptHandler, astroid.TryExcept]]:
"""Return the ExceptHandler in which the node is, without going out of scope."""
current = node
while current.parent is not None:
current = current.parent
if isinstance(current, astroid.scoped_nodes.LocalsDictNodeNG):
# If we're inside a function/class definition, we don't want to keep checking
# higher ancestors for `except` clauses, because if these exist, it means our
# function/class was defined in an `except` clause, rather than the current code
# actually running in an `except` clause.
return None
if isinstance(current, astroid.ExceptHandler):
return current
return None
def is_from_fallback_block(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(
handlers: astroid.ExceptHandler, exception
) -> bool:
func = partial(error_of_type, error_type=(exception,))
return any(func(handler) for handler in handlers)
def get_exception_handlers(
node: astroid.node_classes.NodeNG, exception=Exception
) -> Optional[List[astroid.ExceptHandler]]:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (astroid.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, astroid.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return []
def is_node_inside_try_except(node: astroid.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (astroid.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, astroid.TryExcept)
def node_ignores_exception(
node: astroid.node_classes.NodeNG, exception=Exception
) -> bool:
"""Check if the node is in a TryExcept which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if not managing_handlers:
return False
return any(managing_handlers)
def class_is_abstract(node: astroid.ClassDef) -> bool:
"""return true if the given class node should be considered as an abstract
class
"""
# Only check for explicit metaclass=ABCMeta on this specific class
meta = node.declared_metaclass()
if meta is not None:
if meta.name == "ABCMeta" and meta.root().name in ABC_MODULES:
return True
for ancestor in node.ancestors():
if ancestor.name == "ABC" and ancestor.root().name in ABC_MODULES:
# abc.ABC inheritance
return True
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: astroid.node_classes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, astroid.AssignName):
if isinstance(first.parent.value, astroid.Const):
return False
return True
def is_comprehension(node: astroid.node_classes.NodeNG) -> bool:
comprehensions = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: astroid.node_classes.NodeNG) -> bool:
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: astroid.node_classes.NodeNG, protocol_callback: astroid.FunctionDef
) -> bool:
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
if (
isinstance(value, astroid.bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)
):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value: astroid.node_classes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: astroid.node_classes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(
value: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG
) -> bool:
if isinstance(value, astroid.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
if is_class_subscriptable_pep585_with_postponed_evaluation_enabled(value, node):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: astroid.node_classes.NodeNG, *_: Any) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: astroid.node_classes.NodeNG, *_: Any) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
def _get_python_type_of_node(node):
pytype = getattr(node, "pytype", None)
if callable(pytype):
return pytype()
return None
@lru_cache(maxsize=1024)
def safe_infer(
node: astroid.node_classes.NodeNG, context=None
) -> Optional[astroid.node_classes.NodeNG]:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred of different types).
"""
inferred_types = set()
try:
infer_gen = node.infer(context=context)
value = next(infer_gen)
except astroid.InferenceError:
return None
if value is not astroid.Uninferable:
inferred_types.add(_get_python_type_of_node(value))
try:
for inferred in infer_gen:
inferred_type = _get_python_type_of_node(inferred)
if inferred_type not in inferred_types:
return None # If there is ambiguity on the inferred node.
except astroid.InferenceError:
return None # There is some kind of ambiguity
except StopIteration:
return value
return value if len(inferred_types) <= 1 else None
def has_known_bases(klass: astroid.ClassDef, context=None) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
if (
not isinstance(result, astroid.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: astroid.node_classes.NodeNG) -> bool:
return (
node is None
or (isinstance(node, astroid.Const) and node.value is None)
or (isinstance(node, astroid.Name) and node.name == "None")
)
def node_type(node: astroid.node_classes.NodeNG) -> Optional[type]:
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types = set()
try:
for var_type in node.infer():
if var_type == astroid.Uninferable or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: astroid.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here
return decorated_with(func_def, singledispatch_qnames)
return False
def get_node_last_lineno(node: astroid.node_classes.NodeNG) -> int:
"""
Get the last lineno of the given node. For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno
def is_postponed_evaluation_enabled(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
module = node.root()
return "annotations" in module.future_imports
def is_class_subscriptable_pep585_with_postponed_evaluation_enabled(
value: astroid.ClassDef, node: astroid.node_classes.NodeNG
) -> bool:
"""Check if class is subscriptable with PEP 585 and
postponed evaluation enabled.
"""
return (
is_postponed_evaluation_enabled(node)
and value.qname() in SUBSCRIPTABLE_CLASSES_PEP585
and is_node_in_type_annotation_context(node)
)
def is_node_in_type_annotation_context(node: astroid.node_classes.NodeNG) -> bool:
"""Check if node is in type annotation context.
Check for 'AnnAssign', function 'Arguments',
or part of function return type anntation.
"""
# pylint: disable=too-many-boolean-expressions
current_node, parent_node = node, node.parent
while True:
if (
isinstance(parent_node, astroid.AnnAssign)
and parent_node.annotation == current_node
or isinstance(parent_node, astroid.Arguments)
and current_node
in (
*parent_node.annotations,
*parent_node.posonlyargs_annotations,
*parent_node.kwonlyargs_annotations,
parent_node.varargannotation,
parent_node.kwargannotation,
)
or isinstance(parent_node, astroid.FunctionDef)
and parent_node.returns == current_node
):
return True
current_node, parent_node = parent_node, parent_node.parent
if isinstance(parent_node, astroid.Module):
return False
def is_subclass_of(child: astroid.ClassDef, parent: astroid.ClassDef) -> bool:
"""
Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, astroid.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except astroid.exceptions._NonDeducibleTypeHierarchy:
continue
return False
@lru_cache(maxsize=1024)
def is_overload_stub(node: astroid.node_classes.NodeNG) -> bool:
"""Check if a node if is a function stub decorated with typing.overload.
:param node: Node to check.
:returns: True if node is an overload function stub. False otherwise.
"""
decorators = getattr(node, "decorators", None)
return bool(decorators and decorated_with(node, ["typing.overload", "overload"]))
def is_protocol_class(cls: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node represents a protocol class
:param cls: The node to check
:returns: True if the node is a typing protocol class, false otherwise.
"""
if not isinstance(cls, astroid.ClassDef):
return False
# Use .ancestors() since not all protocol classes can have
# their mro deduced.
return any(parent.qname() in TYPING_PROTOCOLS for parent in cls.ancestors())
def is_call_of_name(node: astroid.node_classes.NodeNG, name: str) -> bool:
"""Checks if node is a function call with the given name"""
return (
isinstance(node, astroid.Call)
and isinstance(node.func, astroid.Name)
and node.func.name == name
)
def is_test_condition(
node: astroid.node_classes.NodeNG,
parent: Optional[astroid.node_classes.NodeNG] = None,
) -> bool:
"""Returns true if the given node is being tested for truthiness"""
parent = parent or node.parent
if isinstance(parent, (astroid.While, astroid.If, astroid.IfExp, astroid.Assert)):
return node is parent.test or parent.test.parent_of(node)
if isinstance(parent, astroid.Comprehension):
return node in parent.ifs
return is_call_of_name(parent, "bool") and parent.parent_of(node)
def is_classdef_type(node: astroid.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
for base in node.bases:
if isinstance(base, astroid.Name) and base.name == "type":
return True
return False
def is_attribute_typed_annotation(
node: Union[astroid.ClassDef, astroid.Instance], attr_name: str
) -> bool:
"""Test if attribute is typed annotation in current node
or any base nodes.
"""
attribute = node.locals.get(attr_name, [None])[0]
if (
attribute
and isinstance(attribute, astroid.AssignName)
and isinstance(attribute.parent, astroid.AnnAssign)
):
return True
for base in node.bases:
inferred = safe_infer(base)
if (
inferred
and isinstance(inferred, astroid.ClassDef)
and is_attribute_typed_annotation(inferred, attr_name)
):
return True
return False
def is_assign_name_annotated_with(node: astroid.AssignName, typing_name: str) -> bool:
"""Test if AssignName node has `typing_name` annotation.
Especially useful to check for `typing._SpecialForm` instances
like: `Union`, `Optional`, `Literal`, `ClassVar`, `Final`.
"""
if not isinstance(node.parent, astroid.AnnAssign):
return False
annotation = node.parent.annotation
if isinstance(annotation, astroid.Subscript):
annotation = annotation.value
if (
isinstance(annotation, astroid.Name)
and annotation.name == typing_name
or isinstance(annotation, astroid.Attribute)
and annotation.attrname == typing_name
):
return True
return False
def get_iterating_dictionary_name(
node: Union[astroid.For, astroid.Comprehension]
) -> Optional[str]:
"""Get the name of the dictionary which keys are being iterated over on
a `astroid.For` or `astroid.Comprehension` node.
If the iterating object is not either the keys method of a dictionary
or a dictionary itself, this returns None.
"""
# Is it a proper keys call?
if (
isinstance(node.iter, astroid.Call)
and isinstance(node.iter.func, astroid.Attribute)
and node.iter.func.attrname == "keys"
):
inferred = safe_infer(node.iter.func)
if not isinstance(inferred, astroid.BoundMethod):
return None
return node.iter.as_string().rpartition(".keys")[0]
# Is it a dictionary?
if isinstance(node.iter, (astroid.Name, astroid.Attribute)):
inferred = safe_infer(node.iter)
if not isinstance(inferred, astroid.Dict):
return None
return node.iter.as_string()
return None
| 1 | 13,882 | Does it make sense to change that to a custom error instead of `ValueError`? | PyCQA-pylint | py |
@@ -259,7 +259,7 @@ func (vm *VM) ApplyTipSetMessages(blocks []interpreter.BlockMessagesInfo, head b
// Pay block reward.
// Dragons: missing final protocol design on if/how to determine the nominal power
- rewardMessage := makeBlockRewardMessage(blk.Miner, minerPenaltyTotal, minerGasRewardTotal, blk.TicketCount)
+ rewardMessage := makeBlockRewardMessage(blk.Miner, minerPenaltyTotal, minerGasRewardTotal, 1)
if _, err := vm.applyImplicitMessage(rewardMessage, rnd); err != nil {
return nil, err
} | 1 | package vmcontext
import (
"context"
"fmt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/account"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
specsruntime "github.com/filecoin-project/specs-actors/actors/runtime"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
"github.com/filecoin-project/go-filecoin/internal/pkg/crypto"
"github.com/filecoin-project/go-filecoin/internal/pkg/encoding"
"github.com/filecoin-project/go-filecoin/internal/pkg/types"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/gas"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/dispatch"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/gascost"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/interpreter"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/message"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/state"
)
var vmlog = logging.Logger("vm.context")
// VM holds the state and executes messages over the state.
type VM struct {
context context.Context
actorImpls ActorImplLookup
store *storage.VMStorage
state state.Tree
syscalls SyscallsImpl
currentHead block.TipSetKey
currentEpoch abi.ChainEpoch
pricelist gascost.Pricelist
}
// ActorImplLookup provides access to upgradeable actor code.
type ActorImplLookup interface {
GetActorImpl(code cid.Cid) (dispatch.Dispatcher, error)
}
type minerPenaltyFIL = abi.TokenAmount
type gasRewardFIL = abi.TokenAmount
type internalMessage struct {
from address.Address
to address.Address
value abi.TokenAmount
method abi.MethodNum
params interface{}
}
// NewVM creates a new runtime for executing messages.
// Dragons: change to take a root and the store, build the tree internally
func NewVM(actorImpls ActorImplLookup, store *storage.VMStorage, st state.Tree, syscalls SyscallsImpl) VM {
return VM{
context: context.Background(),
actorImpls: actorImpls,
store: store,
state: st,
syscalls: syscalls,
// loaded during execution
// currentEpoch: ..,
}
}
// ApplyGenesisMessage forces the execution of a message in the vm actor.
//
// This method is intended to be used in the generation of the genesis block only.
func (vm *VM) ApplyGenesisMessage(from address.Address, to address.Address, method abi.MethodNum, value abi.TokenAmount, params interface{}, rnd crypto.RandomnessSource) (interface{}, error) {
vm.pricelist = gascost.PricelistByEpoch(vm.currentEpoch)
// normalize from addr
var ok bool
if from, ok = vm.normalizeAddress(from); !ok {
runtime.Abort(exitcode.SysErrSenderInvalid)
}
// build internal message
imsg := internalMessage{
from: from,
to: to,
value: value,
method: method,
params: params,
}
ret, err := vm.applyImplicitMessage(imsg, rnd)
if err != nil {
return ret, err
}
// commit
if _, err := vm.commit(); err != nil {
return nil, err
}
return ret, nil
}
func (vm *VM) rollback(root state.Root) error {
return vm.state.Rollback(vm.context, root)
}
func (vm *VM) checkpoint() (state.Root, error) {
root, err := vm.state.Commit(vm.context)
if err != nil {
return cid.Undef, err
}
return root, nil
}
func (vm *VM) commit() (state.Root, error) {
// Note: the following assumes the state commits into the store,
// unless the store is flushed, the state is not persisted.
// commit the vm state
root, err := vm.state.Commit(vm.context)
if err != nil {
return cid.Undef, err
}
// flush all blocks out of the store
if err := vm.store.Flush(); err != nil {
return cid.Undef, err
}
return root, nil
}
// ContextStore provides access to specs-actors adt library.
//
// This type of store is used to access some internal actor state.
func (vm *VM) ContextStore() adt.Store {
return &contextStore{context: vm.context, store: vm.store}
}
func (vm *VM) normalizeAddress(addr address.Address) (address.Address, bool) {
// short-circuit if the address is already an ID address
if addr.Protocol() == address.ID {
return addr, true
}
// resolve the target address via the InitActor, and attempt to load state.
initActorEntry, found, err := vm.state.GetActor(vm.context, builtin.InitActorAddr)
if err != nil {
panic(errors.Wrapf(err, "failed to load init actor"))
}
if !found {
panic(errors.Wrapf(err, "no init actor"))
}
// get a view into the actor state
var state init_.State
if _, err := vm.store.Get(vm.context, initActorEntry.Head.Cid, &state); err != nil {
panic(err)
}
idAddr, err := state.ResolveAddress(vm.ContextStore(), addr)
if err == init_.ErrAddressNotFound {
return address.Undef, false
} else if err != nil {
panic(err)
}
return idAddr, true
}
func (vm *VM) stateView() SyscallsStateView {
// The state tree's root is not committed until the end of a tipset, so we can't use the external state view
// type for this implementation.
// Maybe we could re-work it to use a root HAMT node rather than root CID.
return &syscallsStateView{vm}
}
// implement VMInterpreter for VM
var _ interpreter.VMInterpreter = (*VM)(nil)
// ApplyTipSetMessages implements interpreter.VMInterpreter
func (vm *VM) ApplyTipSetMessages(blocks []interpreter.BlockMessagesInfo, head block.TipSetKey, epoch abi.ChainEpoch, rnd crypto.RandomnessSource) ([]message.Receipt, error) {
receipts := []message.Receipt{}
// update current tipset
vm.currentHead = head
vm.currentEpoch = epoch
vm.pricelist = gascost.PricelistByEpoch(epoch)
// create message tracker
// Note: the same message could have been included by more than one miner
seenMsgs := make(map[cid.Cid]struct{})
// process messages on each block
for _, blk := range blocks {
if blk.Miner.Protocol() != address.ID {
panic("precond failure: block miner address must be an IDAddress")
}
// initial miner penalty and gas rewards
// Note: certain msg execution failures can cause the miner to pay for the gas
minerPenaltyTotal := big.Zero()
minerGasRewardTotal := big.Zero()
// Process BLS messages from the block
for _, m := range blk.BLSMessages {
// do not recompute already seen messages
mcid := msgCID(m)
if _, found := seenMsgs[mcid]; found {
continue
}
// apply message
receipt, minerPenaltyCurr, minerGasRewardCurr := vm.applyMessage(m, m.OnChainLen(), rnd)
// accumulate result
minerPenaltyTotal = big.Add(minerPenaltyTotal, minerPenaltyCurr)
minerGasRewardTotal = big.Add(minerGasRewardTotal, minerGasRewardCurr)
receipts = append(receipts, receipt)
// flag msg as seen
seenMsgs[mcid] = struct{}{}
}
// Process SECP messages from the block
for _, sm := range blk.SECPMessages {
// extract unsigned message part
m := sm.Message
// do not recompute already seen messages
mcid := msgCID(&m)
if _, found := seenMsgs[mcid]; found {
continue
}
// apply message
// Note: the on-chain size for SECP messages is different
receipt, minerPenaltyCurr, minerGasRewardCurr := vm.applyMessage(&m, sm.OnChainLen(), rnd)
// accumulate result
minerPenaltyTotal = big.Add(minerPenaltyTotal, minerPenaltyCurr)
minerGasRewardTotal = big.Add(minerGasRewardTotal, minerGasRewardCurr)
receipts = append(receipts, receipt)
// flag msg as seen
seenMsgs[mcid] = struct{}{}
}
// Pay block reward.
// Dragons: missing final protocol design on if/how to determine the nominal power
rewardMessage := makeBlockRewardMessage(blk.Miner, minerPenaltyTotal, minerGasRewardTotal, blk.TicketCount)
if _, err := vm.applyImplicitMessage(rewardMessage, rnd); err != nil {
return nil, err
}
}
// cron tick
cronMessage := makeCronTickMessage()
if _, err := vm.applyImplicitMessage(cronMessage, rnd); err != nil {
return nil, err
}
// commit state
if _, err := vm.commit(); err != nil {
return nil, err
}
return receipts, nil
}
// applyImplicitMessage applies messages automatically generated by the vm itself.
//
// This messages do not consume client gas and must not fail.
func (vm *VM) applyImplicitMessage(imsg internalMessage, rnd crypto.RandomnessSource) (specsruntime.CBORMarshaler, error) {
// implicit messages gas is tracked separatly and not paid by the miner
gasTank := NewGasTracker(gas.SystemGasLimit)
// the execution of the implicit messages is simpler than full external/actor-actor messages
// execution:
// 1. load from actor
// 2. increment seqnumber (only for accounts)
// 3. build new context
// 4. invoke message
// 1. load from actor
fromActor, found, err := vm.state.GetActor(vm.context, imsg.from)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("implicit message `from` field actor not found, addr: %s", imsg.from)
}
originatorIsAccount := fromActor.Code.Equals(builtin.AccountActorCodeID)
// Compute the originator address. Unlike real messages, implicit ones can be originated by
// singleton non-account actors. Singleton addresses are reorg-proof so ok to use here.
var originator address.Address
if originatorIsAccount {
// Load sender account state to obtain stable pubkey address.
var senderState account.State
_, err = vm.store.Get(vm.context, fromActor.Head.Cid, &senderState)
if err != nil {
panic(err)
}
originator = senderState.Address
} else if builtin.IsBuiltinActor(fromActor.Code.Cid) {
originator = imsg.from // Cannot resolve non-account actor to pubkey addresses.
} else {
runtime.Abortf(exitcode.SysErrInternal, "implicit message from non-account or -singleton actor code %s", fromActor.Code.Cid)
}
// 2. increment seq number (only for account actors).
// The account actor distinction only makes a difference for genesis state construction via messages, where
// some messages are sent from non-account actors (e.g. fund transfers from the reward actor).
if originatorIsAccount {
fromActor.IncrementSeqNum()
if err := vm.state.SetActor(vm.context, imsg.from, fromActor); err != nil {
return nil, err
}
}
// 3. build context
topLevel := topLevelContext{
originatorStableAddress: originator,
originatorCallSeq: fromActor.CallSeqNum, // Implied CallSeqNum is that of the actor before incrementing.
newActorAddressCount: 0,
}
ctx := newInvocationContext(vm, &topLevel, imsg, fromActor, &gasTank, rnd)
// 4. invoke message
ret, code := ctx.invoke()
if code.IsError() {
return nil, fmt.Errorf("Invalid exit code during implicit message execution (code: %d)", code)
}
return ret.inner, nil
}
// applyMessage applies the message to the current state.
func (vm *VM) applyMessage(msg *types.UnsignedMessage, onChainMsgSize int, rnd crypto.RandomnessSource) (message.Receipt, minerPenaltyFIL, gasRewardFIL) {
// This method does not actually execute the message itself,
// but rather deals with the pre/post processing of a message.
// (see: `invocationContext.invoke()` for the dispatch and execution)
// initiate gas tracking
gasTank := NewGasTracker(msg.GasLimit)
// pre-send
// 1. charge for message existence
// 2. load sender actor
// 3. check message seq number
// 4. check if _sender_ has enough funds
// 5. increment message seq number
// 6. withheld maximum gas from _sender_
// 7. checkpoint state
// 1. charge for bytes used in chain
msgGasCost := vm.pricelist.OnChainMessage(onChainMsgSize)
ok := gasTank.TryCharge(msgGasCost)
if !ok {
// Invalid message; insufficient gas limit to pay for the on-chain message size.
// Note: the miner needs to pay the full msg cost, not what might have been partially consumed
return message.Failure(exitcode.SysErrOutOfGas, gas.Zero), msgGasCost.ToTokens(msg.GasPrice), big.Zero()
}
// 2. load actor from global state
if msg.From, ok = vm.normalizeAddress(msg.From); !ok {
return message.Failure(exitcode.SysErrSenderInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero()
}
fromActor, found, err := vm.state.GetActor(vm.context, msg.From)
if err != nil {
panic(err)
}
if !found {
// Execution error; sender does not exist at time of message execution.
return message.Failure(exitcode.SysErrSenderInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero()
}
if !fromActor.Code.Equals(builtin.AccountActorCodeID) {
// Execution error; sender is not an account.
return message.Failure(exitcode.SysErrSenderInvalid, gas.Zero), gasTank.gasConsumed.ToTokens(msg.GasPrice), big.Zero()
}
// 3. make sure this is the right message order for fromActor
if msg.CallSeqNum != fromActor.CallSeqNum {
// Execution error; invalid seq number.
return message.Failure(exitcode.SysErrSenderStateInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero()
}
// 4. Check sender balance (gas + value being sent)
gasLimitCost := msg.GasLimit.ToTokens(msg.GasPrice)
totalCost := big.Add(msg.Value, gasLimitCost)
if fromActor.Balance.LessThan(totalCost) {
// Execution error; sender does not have sufficient funds to pay for the gas limit.
return message.Failure(exitcode.SysErrSenderStateInvalid, gas.Zero), gasTank.GasConsumed().ToTokens(msg.GasPrice), big.Zero()
}
// 5. Increment sender CallSeqNum
fromActor.IncrementSeqNum()
// update actor
if err := vm.state.SetActor(vm.context, msg.From, fromActor); err != nil {
panic(err)
}
// 6. Deduct gas limit funds from sender first
// Note: this should always succeed, due to the sender balance check above
// Note: after this point, we need to return this funds back before exiting
vm.transfer(msg.From, builtin.RewardActorAddr, gasLimitCost)
// reload from actor
// Note: balance might have changed
fromActor, found, err = vm.state.GetActor(vm.context, msg.From)
if err != nil {
panic(err)
}
if !found {
panic("unreachable: actor cannot possibly not exist")
}
// Load sender account state to obtain stable pubkey address.
var senderState account.State
_, err = vm.store.Get(vm.context, fromActor.Head.Cid, &senderState)
if err != nil {
panic(err)
}
// 7. checkpoint state
// Even if the message fails, the following accumulated changes will be applied:
// - CallSeqNumber increment
// - sender balance withheld
priorRoot, err := vm.checkpoint()
if err != nil {
panic(err)
}
// send
// 1. build internal message
// 2. build invocation context
// 3. process the msg
topLevel := topLevelContext{
originatorStableAddress: senderState.Address,
originatorCallSeq: msg.CallSeqNum,
newActorAddressCount: 0,
}
// 1. build internal msg
imsg := internalMessage{
from: msg.From,
to: msg.To,
value: msg.Value,
method: msg.Method,
params: msg.Params,
}
// 2. build invocation context
ctx := newInvocationContext(vm, &topLevel, imsg, fromActor, &gasTank, rnd)
// 3. invoke
ret, code := ctx.invoke()
// build receipt
receipt := message.Receipt{
ExitCode: code,
}
// encode value
receipt.ReturnValue, err = ret.ToCbor()
if err != nil {
// failed to encode object returned by actor
receipt.ReturnValue = []byte{}
receipt.ExitCode = exitcode.SysErrorIllegalActor
}
// post-send
// 1. charge gas for putting the return value on the chain
// 2. settle gas money around (unused_gas -> sender)
// 3. success!
// 1. charge for the space used by the return value
// Note: the GasUsed in the message receipt does not
ok = gasTank.TryCharge(vm.pricelist.OnChainReturnValue(&receipt))
if !ok {
// Insufficient gas remaining to cover the on-chain return value; proceed as in the case
// of method execution failure.
receipt.ExitCode = exitcode.SysErrOutOfGas
receipt.ReturnValue = []byte{}
}
// Roll back all state if the receipt's exit code is not ok.
// This is required in addition to rollback within the invocation context since top level messages can fail for
// more reasons than internal ones. Invocation context still needs its own rollback so actors can recover and
// proceed from a nested call failure.
if receipt.ExitCode != exitcode.Ok {
if err := vm.rollback(priorRoot); err != nil {
panic(err)
}
}
// 2. settle gas money around (unused_gas -> sender)
receipt.GasUsed = gasTank.GasConsumed()
refundGas := msg.GasLimit - receipt.GasUsed
vm.transfer(builtin.RewardActorAddr, msg.From, refundGas.ToTokens(msg.GasPrice))
// 3. Success!
return receipt, big.Zero(), gasTank.GasConsumed().ToTokens(msg.GasPrice)
}
// transfer debits money from one account and credits it to another.
//
// WARNING: this method will panic if the the amount is negative, accounts dont exist, or have inssuficient funds.
//
// Note: this is not idiomatic, it follows the Spec expectations for this method.
func (vm *VM) transfer(debitFrom address.Address, creditTo address.Address, amount abi.TokenAmount) {
// allow only for positive amounts
if amount.LessThan(abi.NewTokenAmount(0)) {
panic("unreachable: negative funds transfer not allowed")
}
if amount.Nil() || amount.IsZero() {
// nothing to transfer
return
}
ctx := context.Background()
// retrieve debit account
fromActor, found, err := vm.state.GetActor(ctx, debitFrom)
if err != nil {
panic(err)
}
if !found {
panic(fmt.Errorf("unreachable: debit account not found. %s", err))
}
// check that account has enough balance for transfer
if fromActor.Balance.LessThan(amount) {
panic("unreachable: insufficient balance on debit account")
}
// debit funds
fromActor.Balance = big.Sub(fromActor.Balance, amount)
if err := vm.state.SetActor(ctx, debitFrom, fromActor); err != nil {
panic(err)
}
// retrieve credit account
toActor, found, err := vm.state.GetActor(ctx, creditTo)
if err != nil {
panic(err)
}
if !found {
panic(fmt.Errorf("unreachable: credit account not found. %s", err))
}
// credit funds
toActor.Balance = big.Add(toActor.Balance, amount)
if err := vm.state.SetActor(ctx, creditTo, toActor); err != nil {
panic(err)
}
}
func (vm *VM) getActorImpl(code cid.Cid) dispatch.Dispatcher {
actorImpl, err := vm.actorImpls.GetActorImpl(code)
if err != nil {
runtime.Abort(exitcode.SysErrInvalidReceiver)
}
return actorImpl
}
//
// implement runtime.Runtime for VM
//
var _ runtime.Runtime = (*VM)(nil)
// CurrentEpoch implements runtime.Runtime.
func (vm *VM) CurrentEpoch() abi.ChainEpoch {
return vm.currentEpoch
}
//
// implement runtime.MessageInfo for internalMessage
//
var _ specsruntime.Message = (*internalMessage)(nil)
// ValueReceived implements runtime.MessageInfo.
func (msg internalMessage) ValueReceived() abi.TokenAmount {
return msg.value
}
// Caller implements runtime.MessageInfo.
func (msg internalMessage) Caller() address.Address {
return msg.from
}
// Receiver implements runtime.MessageInfo.
func (msg internalMessage) Receiver() address.Address {
return msg.to
}
//
// implement syscalls state view
//
type syscallsStateView struct {
*VM
}
func (vm *syscallsStateView) AccountSignerAddress(ctx context.Context, accountAddr address.Address) (address.Address, error) {
// Short-circuit when given a pubkey address.
if accountAddr.Protocol() == address.SECP256K1 || accountAddr.Protocol() == address.BLS {
return accountAddr, nil
}
actor, found, err := vm.state.GetActor(vm.context, accountAddr)
if err != nil {
return address.Undef, errors.Wrapf(err, "signer resolution failed to find actor %s", accountAddr)
}
if !found {
return address.Undef, fmt.Errorf("signer resolution found no such actor %s", accountAddr)
}
var state account.State
if _, err := vm.store.Get(vm.context, actor.Head.Cid, &state); err != nil {
// This error is internal, shouldn't propagate as on-chain failure
panic(fmt.Errorf("signer resolution failed to lost state for %s ", accountAddr))
}
return state.Address, nil
}
func (vm *syscallsStateView) MinerControlAddresses(ctx context.Context, maddr address.Address) (owner, worker address.Address, err error) {
actor, found, err := vm.state.GetActor(vm.context, maddr)
if err != nil {
return address.Undef, address.Undef, errors.Wrapf(err, "miner resolution failed to find actor %s", maddr)
}
if !found {
return address.Undef, address.Undef, fmt.Errorf("miner resolution found no such actor %s", maddr)
}
var state miner.State
if _, err := vm.store.Get(vm.context, actor.Head.Cid, &state); err != nil {
// This error is internal, shouldn't propagate as on-chain failure
panic(fmt.Errorf("signer resolution failed to lost state for %s ", maddr))
}
return state.Info.Owner, state.Info.Worker, nil
}
//
// utils
//
func msgCID(msg *types.UnsignedMessage) cid.Cid {
cid, err := msg.Cid()
if err != nil {
runtime.Abortf(exitcode.SysErrInternal, "Could not compute CID for message")
}
return cid
}
func makeBlockRewardMessage(blockMiner address.Address, penalty abi.TokenAmount, gasReward abi.TokenAmount, ticketCount int64) internalMessage {
params := &reward.AwardBlockRewardParams{
Miner: blockMiner,
Penalty: penalty,
GasReward: gasReward,
TicketCount: ticketCount,
}
encoded, err := encoding.Encode(params)
if err != nil {
panic(fmt.Errorf("failed to encode built-in block reward. %s", err))
}
return internalMessage{
from: builtin.SystemActorAddr,
to: builtin.RewardActorAddr,
value: big.Zero(),
method: builtin.MethodsReward.AwardBlockReward,
params: encoded,
}
}
func makeCronTickMessage() internalMessage {
return internalMessage{
from: builtin.SystemActorAddr,
to: builtin.CronActorAddr,
value: big.Zero(),
method: builtin.MethodsCron.EpochTick,
params: []byte{},
}
}
| 1 | 23,565 | I guess this will need to change soon. | filecoin-project-venus | go |
@@ -37,10 +37,9 @@ import (
// The header directive goes second so that headers
// can be manipulated before doing redirects.
var directiveOrder = []string{
+ "map",
"root",
-
"header",
-
"redir",
"rewrite",
| 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"net"
"sort"
"strconv"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
// directiveOrder specifies the order
// to apply directives in HTTP routes.
//
// The root directive goes first in case rewrites or
// redirects depend on existence of files, i.e. the
// file matcher, which must know the root first.
//
// The header directive goes second so that headers
// can be manipulated before doing redirects.
var directiveOrder = []string{
"root",
"header",
"redir",
"rewrite",
// URI manipulation
"uri",
"try_files",
// middleware handlers; some wrap responses
"basicauth",
"request_header",
"encode",
"templates",
// special routing directives
"handle",
"handle_path",
"route",
// handlers that typically respond to requests
"respond",
"reverse_proxy",
"php_fastcgi",
"file_server",
"acme_server",
}
// directiveIsOrdered returns true if dir is
// a known, ordered (sorted) directive.
func directiveIsOrdered(dir string) bool {
for _, d := range directiveOrder {
if d == dir {
return true
}
}
return false
}
// RegisterDirective registers a unique directive dir with an
// associated unmarshaling (setup) function. When directive dir
// is encountered in a Caddyfile, setupFunc will be called to
// unmarshal its tokens.
func RegisterDirective(dir string, setupFunc UnmarshalFunc) {
if _, ok := registeredDirectives[dir]; ok {
panic("directive " + dir + " already registered")
}
registeredDirectives[dir] = setupFunc
}
// RegisterHandlerDirective is like RegisterDirective, but for
// directives which specifically output only an HTTP handler.
// Directives registered with this function will always have
// an optional matcher token as the first argument.
func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) {
if !h.Next() {
return nil, h.ArgErr()
}
matcherSet, ok, err := h.MatcherToken()
if err != nil {
return nil, err
}
if ok {
// strip matcher token; we don't need to
// use the return value here because a
// new dispenser should have been made
// solely for this directive's tokens,
// with no other uses of same slice
h.Dispenser.Delete()
}
h.Dispenser.Reset() // pretend this lookahead never happened
val, err := setupFunc(h)
if err != nil {
return nil, err
}
return h.NewRoute(matcherSet, val), nil
})
}
// RegisterGlobalOption registers a unique global option opt with
// an associated unmarshaling (setup) function. When the global
// option opt is encountered in a Caddyfile, setupFunc will be
// called to unmarshal its tokens.
func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) {
if _, ok := registeredGlobalOptions[opt]; ok {
panic("global option " + opt + " already registered")
}
registeredGlobalOptions[opt] = setupFunc
}
// Helper is a type which helps setup a value from
// Caddyfile tokens.
type Helper struct {
*caddyfile.Dispenser
// State stores intermediate variables during caddyfile adaptation.
State map[string]interface{}
options map[string]interface{}
warnings *[]caddyconfig.Warning
matcherDefs map[string]caddy.ModuleMap
parentBlock caddyfile.ServerBlock
groupCounter counter
}
// Option gets the option keyed by name.
func (h Helper) Option(name string) interface{} {
return h.options[name]
}
// Caddyfiles returns the list of config files from
// which tokens in the current server block were loaded.
func (h Helper) Caddyfiles() []string {
// first obtain set of names of files involved
// in this server block, without duplicates
files := make(map[string]struct{})
for _, segment := range h.parentBlock.Segments {
for _, token := range segment {
files[token.File] = struct{}{}
}
}
// then convert the set into a slice
filesSlice := make([]string, 0, len(files))
for file := range files {
filesSlice = append(filesSlice, file)
}
return filesSlice
}
// JSON converts val into JSON. Any errors are added to warnings.
func (h Helper) JSON(val interface{}) json.RawMessage {
return caddyconfig.JSON(val, h.warnings)
}
// MatcherToken assumes the next argument token is (possibly) a matcher,
// and if so, returns the matcher set along with a true value. If the next
// token is not a matcher, nil and false is returned. Note that a true
// value may be returned with a nil matcher set if it is a catch-all.
func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) {
if !h.NextArg() {
return nil, false, nil
}
return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings)
}
// ExtractMatcherSet is like MatcherToken, except this is a higher-level
// method that returns the matcher set described by the matcher token,
// or nil if there is none, and deletes the matcher token from the
// dispenser and resets it as if this look-ahead never happened. Useful
// when wrapping a route (one or more handlers) in a user-defined matcher.
func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) {
matcherSet, hasMatcher, err := h.MatcherToken()
if err != nil {
return nil, err
}
if hasMatcher {
h.Dispenser.Delete() // strip matcher token
}
h.Dispenser.Reset() // pretend this lookahead never happened
return matcherSet, nil
}
// NewRoute returns config values relevant to creating a new HTTP route.
func (h Helper) NewRoute(matcherSet caddy.ModuleMap,
handler caddyhttp.MiddlewareHandler) []ConfigValue {
mod, err := caddy.GetModule(caddy.GetModuleID(handler))
if err != nil {
*h.warnings = append(*h.warnings, caddyconfig.Warning{
File: h.File(),
Line: h.Line(),
Message: err.Error(),
})
return nil
}
var matcherSetsRaw []caddy.ModuleMap
if matcherSet != nil {
matcherSetsRaw = append(matcherSetsRaw, matcherSet)
}
return []ConfigValue{
{
Class: "route",
Value: caddyhttp.Route{
MatcherSetsRaw: matcherSetsRaw,
HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)},
},
},
}
}
// GroupRoutes adds the routes (caddyhttp.Route type) in vals to the
// same group, if there is more than one route in vals.
func (h Helper) GroupRoutes(vals []ConfigValue) {
// ensure there's at least two routes; group of one is pointless
var count int
for _, v := range vals {
if _, ok := v.Value.(caddyhttp.Route); ok {
count++
if count > 1 {
break
}
}
}
if count < 2 {
return
}
// now that we know the group will have some effect, do it
groupName := h.groupCounter.nextGroup()
for i := range vals {
if route, ok := vals[i].Value.(caddyhttp.Route); ok {
route.Group = groupName
vals[i].Value = route
}
}
}
// NewBindAddresses returns config values relevant to adding
// listener bind addresses to the config.
func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
return []ConfigValue{{Class: "bind", Value: addrs}}
}
// ParseSegmentAsSubroute parses the segment such that its subdirectives
// are themselves treated as directives, from which a subroute is built
// and returned.
func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) {
var allResults []ConfigValue
for h.Next() {
// slice the linear list of tokens into top-level segments
var segments []caddyfile.Segment
for nesting := h.Nesting(); h.NextBlock(nesting); {
segments = append(segments, h.NextSegment())
}
// copy existing matcher definitions so we can augment
// new ones that are defined only in this scope
matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
for key, val := range h.matcherDefs {
matcherDefs[key] = val
}
// find and extract any embedded matcher definitions in this scope
for i, seg := range segments {
if strings.HasPrefix(seg.Directive(), matcherPrefix) {
err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs)
if err != nil {
return nil, err
}
segments = append(segments[:i], segments[i+1:]...)
}
}
// with matchers ready to go, evaluate each directive's segment
for _, seg := range segments {
dir := seg.Directive()
dirFunc, ok := registeredDirectives[dir]
if !ok {
return nil, h.Errf("unrecognized directive: %s", dir)
}
subHelper := h
subHelper.Dispenser = caddyfile.NewDispenser(seg)
subHelper.matcherDefs = matcherDefs
results, err := dirFunc(subHelper)
if err != nil {
return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
}
for _, result := range results {
result.directive = dir
allResults = append(allResults, result)
}
}
}
return buildSubroute(allResults, h.groupCounter)
}
// ConfigValue represents a value to be added to the final
// configuration, or a value to be consulted when building
// the final configuration.
type ConfigValue struct {
// The kind of value this is. As the config is
// being built, the adapter will look in the
// "pile" for values belonging to a certain
// class when it is setting up a certain part
// of the config. The associated value will be
// type-asserted and placed accordingly.
Class string
// The value to be used when building the config.
// Generally its type is associated with the
// name of the Class.
Value interface{}
directive string
}
func sortRoutes(routes []ConfigValue) {
dirPositions := make(map[string]int)
for i, dir := range directiveOrder {
dirPositions[dir] = i
}
sort.SliceStable(routes, func(i, j int) bool {
// if the directives are different, just use the established directive order
iDir, jDir := routes[i].directive, routes[j].directive
if iDir != jDir {
return dirPositions[iDir] < dirPositions[jDir]
}
// directives are the same; sub-sort by path matcher length if there's
// only one matcher set and one path (this is a very common case and
// usually -- but not always -- helpful/expected, oh well; user can
// always take manual control of order using handler or route blocks)
iRoute, ok := routes[i].Value.(caddyhttp.Route)
if !ok {
return false
}
jRoute, ok := routes[j].Value.(caddyhttp.Route)
if !ok {
return false
}
// decode the path matchers, if there is just one of them
var iPM, jPM caddyhttp.MatchPath
if len(iRoute.MatcherSetsRaw) == 1 {
_ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM)
}
if len(jRoute.MatcherSetsRaw) == 1 {
_ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM)
}
// sort by longer path (more specific) first; missing path
// matchers or multi-matchers are treated as zero-length paths
var iPathLen, jPathLen int
if len(iPM) > 0 {
iPathLen = len(iPM[0])
}
if len(jPM) > 0 {
jPathLen = len(jPM[0])
}
return iPathLen > jPathLen
})
}
// serverBlock pairs a Caddyfile server block with
// a "pile" of config values, keyed by class name,
// as well as its parsed keys for convenience.
type serverBlock struct {
block caddyfile.ServerBlock
pile map[string][]ConfigValue // config values obtained from directives
keys []Address
}
// hostsFromKeys returns a list of all the non-empty hostnames found in
// the keys of the server block sb. If logger mode is false, a key with
// an empty hostname portion will return an empty slice, since that
// server block is interpreted to effectively match all hosts. An empty
// string is never added to the slice.
//
// If loggerMode is true, then the non-standard ports of keys will be
// joined to the hostnames. This is to effectively match the Host
// header of requests that come in for that key.
//
// The resulting slice is not sorted but will never have duplicates.
func (sb serverBlock) hostsFromKeys(loggerMode bool) []string {
// ensure each entry in our list is unique
hostMap := make(map[string]struct{})
for _, addr := range sb.keys {
if addr.Host == "" {
if !loggerMode {
// server block contains a key like ":443", i.e. the host portion
// is empty / catch-all, which means to match all hosts
return []string{}
}
// never append an empty string
continue
}
if loggerMode &&
addr.Port != "" &&
addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort) &&
addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort) {
hostMap[net.JoinHostPort(addr.Host, addr.Port)] = struct{}{}
} else {
hostMap[addr.Host] = struct{}{}
}
}
// convert map to slice
sblockHosts := make([]string, 0, len(hostMap))
for host := range hostMap {
sblockHosts = append(sblockHosts, host)
}
return sblockHosts
}
// hasHostCatchAllKey returns true if sb has a key that
// omits a host portion, i.e. it "catches all" hosts.
func (sb serverBlock) hasHostCatchAllKey() bool {
for _, addr := range sb.keys {
if addr.Host == "" {
return true
}
}
return false
}
type (
// UnmarshalFunc is a function which can unmarshal Caddyfile
// tokens into zero or more config values using a Helper type.
// These are passed in a call to RegisterDirective.
UnmarshalFunc func(h Helper) ([]ConfigValue, error)
// UnmarshalHandlerFunc is like UnmarshalFunc, except the
// output of the unmarshaling is an HTTP handler. This
// function does not need to deal with HTTP request matching
// which is abstracted away. Since writing HTTP handlers
// with Caddyfile support is very common, this is a more
// convenient way to add a handler to the chain since a lot
// of the details common to HTTP handlers are taken care of
// for you. These are passed to a call to
// RegisterHandlerDirective.
UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
// UnmarshalGlobalFunc is a function which can unmarshal Caddyfile
// tokens into a global option config value using a Helper type.
// These are passed in a call to RegisterGlobalOption.
UnmarshalGlobalFunc func(d *caddyfile.Dispenser) (interface{}, error)
)
var registeredDirectives = make(map[string]UnmarshalFunc)
var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc)
| 1 | 14,601 | Restore this spacing - not sure why it has to be removed? | caddyserver-caddy | go |
@@ -16,7 +16,10 @@
// clang-format off
// Dont't remove `format off`, it prevent reordering of win-includes.
-#define _POSIX_C_SOURCE 200112L // For stat from stat/stat.h and fseeko() (POSIX extensions).
+
+# define _XOPEN_SOURCE 700L
+# define _POSIX_C_SOURCE 200809L
+
#ifdef _WIN32
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN | 1 | /*
* Copyright 2016 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
// Dont't remove `format off`, it prevent reordering of win-includes.
#define _POSIX_C_SOURCE 200112L // For stat from stat/stat.h and fseeko() (POSIX extensions).
#ifdef _WIN32
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# ifndef NOMINMAX
# define NOMINMAX
# endif
# ifdef _MSC_VER
# include <crtdbg.h>
# endif
# include <windows.h> // Must be included before <direct.h>
# include <direct.h>
# include <winbase.h>
# undef interface // This is also important because of reasons
#else
# define _XOPEN_SOURCE 600 // For PATH_MAX from limits.h (SUSv2 extension)
# include <limits.h>
#endif
// clang-format on
#include "flatbuffers/base.h"
#include "flatbuffers/util.h"
#include <sys/stat.h>
#include <clocale>
#include <fstream>
namespace flatbuffers {
bool FileExistsRaw(const char *name) {
std::ifstream ifs(name);
return ifs.good();
}
bool LoadFileRaw(const char *name, bool binary, std::string *buf) {
if (DirExists(name)) return false;
std::ifstream ifs(name, binary ? std::ifstream::binary : std::ifstream::in);
if (!ifs.is_open()) return false;
if (binary) {
// The fastest way to read a file into a string.
ifs.seekg(0, std::ios::end);
auto size = ifs.tellg();
(*buf).resize(static_cast<size_t>(size));
ifs.seekg(0, std::ios::beg);
ifs.read(&(*buf)[0], (*buf).size());
} else {
// This is slower, but works correctly on all platforms for text files.
std::ostringstream oss;
oss << ifs.rdbuf();
*buf = oss.str();
}
return !ifs.bad();
}
static LoadFileFunction g_load_file_function = LoadFileRaw;
static FileExistsFunction g_file_exists_function = FileExistsRaw;
bool LoadFile(const char *name, bool binary, std::string *buf) {
FLATBUFFERS_ASSERT(g_load_file_function);
return g_load_file_function(name, binary, buf);
}
bool FileExists(const char *name) {
FLATBUFFERS_ASSERT(g_file_exists_function);
return g_file_exists_function(name);
}
bool DirExists(const char *name) {
// clang-format off
#ifdef _WIN32
#define flatbuffers_stat _stat
#define FLATBUFFERS_S_IFDIR _S_IFDIR
#else
#define flatbuffers_stat stat
#define FLATBUFFERS_S_IFDIR S_IFDIR
#endif
// clang-format on
struct flatbuffers_stat file_info;
if (flatbuffers_stat(name, &file_info) != 0) return false;
return (file_info.st_mode & FLATBUFFERS_S_IFDIR) != 0;
}
LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function) {
LoadFileFunction previous_function = g_load_file_function;
g_load_file_function = load_file_function ? load_file_function : LoadFileRaw;
return previous_function;
}
FileExistsFunction SetFileExistsFunction(
FileExistsFunction file_exists_function) {
FileExistsFunction previous_function = g_file_exists_function;
g_file_exists_function =
file_exists_function ? file_exists_function : FileExistsRaw;
return previous_function;
}
bool SaveFile(const char *name, const char *buf, size_t len, bool binary) {
std::ofstream ofs(name, binary ? std::ofstream::binary : std::ofstream::out);
if (!ofs.is_open()) return false;
ofs.write(buf, len);
return !ofs.bad();
}
// We internally store paths in posix format ('/'). Paths supplied
// by the user should go through PosixPath to ensure correct behavior
// on Windows when paths are string-compared.
static const char kPathSeparatorWindows = '\\';
static const char *PathSeparatorSet = "\\/"; // Intentionally no ':'
std::string StripExtension(const std::string &filepath) {
size_t i = filepath.find_last_of('.');
return i != std::string::npos ? filepath.substr(0, i) : filepath;
}
std::string GetExtension(const std::string &filepath) {
size_t i = filepath.find_last_of('.');
return i != std::string::npos ? filepath.substr(i + 1) : "";
}
std::string StripPath(const std::string &filepath) {
size_t i = filepath.find_last_of(PathSeparatorSet);
return i != std::string::npos ? filepath.substr(i + 1) : filepath;
}
std::string StripFileName(const std::string &filepath) {
size_t i = filepath.find_last_of(PathSeparatorSet);
return i != std::string::npos ? filepath.substr(0, i) : "";
}
std::string ConCatPathFileName(const std::string &path,
const std::string &filename) {
std::string filepath = path;
if (filepath.length()) {
char &filepath_last_character = string_back(filepath);
if (filepath_last_character == kPathSeparatorWindows) {
filepath_last_character = kPathSeparator;
} else if (filepath_last_character != kPathSeparator) {
filepath += kPathSeparator;
}
}
filepath += filename;
// Ignore './' at the start of filepath.
if (filepath[0] == '.' && filepath[1] == kPathSeparator) {
filepath.erase(0, 2);
}
return filepath;
}
std::string PosixPath(const char *path) {
std::string p = path;
std::replace(p.begin(), p.end(), '\\', '/');
return p;
}
void EnsureDirExists(const std::string &filepath) {
auto parent = StripFileName(filepath);
if (parent.length()) EnsureDirExists(parent);
// clang-format off
#ifdef _WIN32
(void)_mkdir(filepath.c_str());
#else
mkdir(filepath.c_str(), S_IRWXU|S_IRGRP|S_IXGRP);
#endif
// clang-format on
}
std::string AbsolutePath(const std::string &filepath) {
// clang-format off
#ifdef FLATBUFFERS_NO_ABSOLUTE_PATH_RESOLUTION
return filepath;
#else
#ifdef _WIN32
char abs_path[MAX_PATH];
return GetFullPathNameA(filepath.c_str(), MAX_PATH, abs_path, nullptr)
#else
char abs_path[PATH_MAX];
return realpath(filepath.c_str(), abs_path)
#endif
? abs_path
: filepath;
#endif // FLATBUFFERS_NO_ABSOLUTE_PATH_RESOLUTION
// clang-format on
}
// Locale-independent code.
#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && \
(FLATBUFFERS_LOCALE_INDEPENDENT > 0)
// clang-format off
// Allocate locale instance at startup of application.
ClassicLocale ClassicLocale::instance_;
#ifdef _MSC_VER
ClassicLocale::ClassicLocale()
: locale_(_create_locale(LC_ALL, "C")) {}
ClassicLocale::~ClassicLocale() { _free_locale(locale_); }
#else
ClassicLocale::ClassicLocale()
: locale_(newlocale(LC_ALL, "C", nullptr)) {}
ClassicLocale::~ClassicLocale() { freelocale(locale_); }
#endif
// clang-format on
#endif // !FLATBUFFERS_LOCALE_INDEPENDENT
std::string RemoveStringQuotes(const std::string &s) {
auto ch = *s.c_str();
return ((s.size() >= 2) && (ch == '\"' || ch == '\'') &&
(ch == string_back(s)))
? s.substr(1, s.length() - 2)
: s;
}
bool SetGlobalTestLocale(const char *locale_name, std::string *_value) {
const auto the_locale = setlocale(LC_ALL, locale_name);
if (!the_locale) return false;
if (_value) *_value = std::string(the_locale);
return true;
}
bool ReadEnvironmentVariable(const char *var_name, std::string *_value) {
#ifdef _MSC_VER
__pragma(warning(disable : 4996)); // _CRT_SECURE_NO_WARNINGS
#endif
auto env_str = std::getenv(var_name);
if (!env_str) return false;
if (_value) *_value = std::string(env_str);
return true;
}
void SetupDefaultCRTReportMode() {
// clang-format off
#ifdef _MSC_VER
// By default, send all reports to STDOUT to prevent CI hangs.
// Enable assert report box [Abort|Retry|Ignore] if a debugger is present.
const int dbg_mode = (_CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG) |
(IsDebuggerPresent() ? _CRTDBG_MODE_WNDW : 0);
(void)dbg_mode; // release mode fix
// CrtDebug reports to _CRT_WARN channel.
_CrtSetReportMode(_CRT_WARN, dbg_mode);
_CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDOUT);
// The assert from <assert.h> reports to _CRT_ERROR channel
_CrtSetReportMode(_CRT_ERROR, dbg_mode);
_CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDOUT);
// Internal CRT assert channel?
_CrtSetReportMode(_CRT_ASSERT, dbg_mode);
_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDOUT);
#endif
// clang-format on
}
} // namespace flatbuffers
| 1 | 17,862 | These defines cause issues in base.h leading to failing android builds. Locale independency is now set in multiple spots, in cmake and in base.h. The base.h uses feature test macros wrong. Feature test macros can be used to indicate that a feature should be made available. They do not guarantee that it is available. On the cmake side this is done by the check symbol macros (similar to what autoconf does). On the android side not. I would suggest removing this from base.h and define locale independent in gradle. It is available since android 21 I believe. Is there a way in gradle to set locale independent only if android api >= 21? | google-flatbuffers | java |
@@ -504,7 +504,7 @@ SubPlan GoPlanner::mToNStepsPlan(SubPlan& startVidPlan) {
}
const auto& projectInput =
- (joinInput || joinDst) ? loopBody->outputVar() : sampleLimit->outputVar();
+ (loopBody != getDst) ? loopBody->outputVar() : sampleLimit->outputVar();
loopBody = Project::make(qctx, loopBody, goCtx_->yieldExpr);
loopBody->setInputVar(projectInput);
loopBody->setColNames(std::move(goCtx_->colNames)); | 1 | /* Copyright (c) 2021 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "graph/planner/ngql/GoPlanner.h"
#include "graph/planner/plan/Logic.h"
#include "graph/util/ExpressionUtils.h"
#include "graph/util/PlannerUtil.h"
namespace nebula {
namespace graph {
std::unique_ptr<GoPlanner::EdgeProps> GoPlanner::buildEdgeProps(bool onlyDst) {
auto eProps = std::make_unique<EdgeProps>();
switch (goCtx_->over.direction) {
case storage::cpp2::EdgeDirection::IN_EDGE: {
doBuildEdgeProps(eProps, onlyDst, true);
break;
}
case storage::cpp2::EdgeDirection::OUT_EDGE: {
doBuildEdgeProps(eProps, onlyDst, false);
break;
}
case storage::cpp2::EdgeDirection::BOTH: {
doBuildEdgeProps(eProps, onlyDst, true);
doBuildEdgeProps(eProps, onlyDst, false);
break;
}
}
return eProps;
}
void GoPlanner::doBuildEdgeProps(std::unique_ptr<EdgeProps>& eProps, bool onlyDst, bool isInEdge) {
const auto& exprProps = goCtx_->exprProps;
for (const auto& e : goCtx_->over.edgeTypes) {
EdgeProp ep;
if (isInEdge) {
ep.set_type(-e);
} else {
ep.set_type(e);
}
if (onlyDst) {
ep.set_props({kDst});
eProps->emplace_back(std::move(ep));
continue;
}
auto found = exprProps.edgeProps().find(e);
if (found == exprProps.edgeProps().end()) {
ep.set_props({kDst});
} else {
std::set<folly::StringPiece> props(found->second.begin(), found->second.end());
props.emplace(kDst);
ep.set_props(std::vector<std::string>(props.begin(), props.end()));
}
eProps->emplace_back(std::move(ep));
}
}
std::unique_ptr<GoPlanner::VertexProps> GoPlanner::buildVertexProps(
const ExpressionProps::TagIDPropsMap& propsMap) {
if (propsMap.empty()) {
return nullptr;
}
auto vertexProps = std::make_unique<VertexProps>(propsMap.size());
auto fun = [](auto& tag) {
VertexProp vp;
vp.set_tag(tag.first);
std::vector<std::string> props(tag.second.begin(), tag.second.end());
vp.set_props(std::move(props));
return vp;
};
std::transform(propsMap.begin(), propsMap.end(), vertexProps->begin(), fun);
return vertexProps;
}
// ++loopSteps{0} <= steps && (var is Empty OR size(var) != 0)
Expression* GoPlanner::loopCondition(uint32_t steps, const std::string& var) {
auto* qctx = goCtx_->qctx;
auto* pool = qctx->objPool();
qctx->ectx()->setValue(loopStepVar_, 0);
auto step = ExpressionUtils::stepCondition(pool, loopStepVar_, steps);
auto empty = ExpressionUtils::equalCondition(pool, var, Value::kEmpty);
auto neZero = ExpressionUtils::neZeroCondition(pool, var);
auto* earlyEnd = LogicalExpression::makeOr(pool, empty, neZero);
return LogicalExpression::makeAnd(pool, step, earlyEnd);
}
/*
* extract vid and edge's prop from GN
* for joinDst & joinInput
* output colNames {srcProps, edgeProps, kVid, "JOIN_DST_VID"}
*/
PlanNode* GoPlanner::extractSrcEdgePropsFromGN(PlanNode* dep, const std::string& input) {
auto& srcEdgePropsExpr = goCtx_->srcEdgePropsExpr;
auto* pool = goCtx_->qctx->objPool();
if (goCtx_->joinInput) {
// extract vid from gn
auto* expr = new YieldColumn(ColumnExpression::make(pool, VID_INDEX), kVid);
srcEdgePropsExpr->addColumn(expr);
}
if (goCtx_->joinDst) {
// extract dst from gn
auto* expr = new YieldColumn(EdgePropertyExpression::make(pool, "*", kDst), "JOIN_DST_VID");
srcEdgePropsExpr->addColumn(expr);
}
auto* project = Project::make(goCtx_->qctx, dep, srcEdgePropsExpr);
project->setInputVar(input);
return project;
}
/*
* extract vid and dst from GN
* for trackStartVid
* output ColNames {srcVidColName, "TRACK_DST_VID"}
*/
PlanNode* GoPlanner::extractSrcDstFromGN(PlanNode* dep, const std::string& input) {
auto qctx = goCtx_->qctx;
auto* pool = qctx->objPool();
auto* columns = pool->add(new YieldColumns());
goCtx_->srcVidColName = qctx->vctx()->anonColGen()->getCol();
auto* vidExpr = new YieldColumn(ColumnExpression::make(pool, VID_INDEX), goCtx_->srcVidColName);
columns->addColumn(vidExpr);
auto* dstExpr = new YieldColumn(EdgePropertyExpression::make(pool, "*", kDst), "TRACK_DST_VID");
columns->addColumn(dstExpr);
auto* project = Project::make(qctx, dep, columns);
project->setInputVar(input);
auto* dedup = Dedup::make(qctx, project);
return dedup;
}
/*
* extract vid from runTime input
* for joinInput
* output ColNames {runtimeVidName, dstVidColName}
*/
PlanNode* GoPlanner::extractVidFromRuntimeInput(PlanNode* dep) {
if (dep == nullptr) {
return dep;
}
auto qctx = goCtx_->qctx;
const auto& from = goCtx_->from;
auto* columns = qctx->objPool()->add(new YieldColumns());
auto* vidExpr = new YieldColumn(from.originalSrc->clone(), from.runtimeVidName);
columns->addColumn(vidExpr);
goCtx_->dstVidColName = qctx->vctx()->anonColGen()->getCol();
auto* dstExpr = new YieldColumn(from.originalSrc->clone(), goCtx_->dstVidColName);
columns->addColumn(dstExpr);
auto* project = Project::make(qctx, dep, columns);
auto input = from.fromType == kPipe ? goCtx_->inputVarName : from.userDefinedVarName;
project->setInputVar(input);
auto* dedup = Dedup::make(qctx, project);
return dedup;
}
/*
* establish a mapping between the original vId and the expanded destination vId
* during each step of the expansion in the n-step and mton-step scenario
* left: n-1 steps
* right: step n
* output ColNames {runtimeVidName, dstVidColName}
*/
PlanNode* GoPlanner::trackStartVid(PlanNode* left, PlanNode* right) {
auto qctx = goCtx_->qctx;
auto* pool = qctx->objPool();
auto* hashKey = VariablePropertyExpression::make(pool, left->outputVar(), goCtx_->dstVidColName);
auto* probeKey =
VariablePropertyExpression::make(pool, right->outputVar(), goCtx_->srcVidColName);
auto* join = InnerJoin::make(qctx,
right,
{left->outputVar(), ExecutionContext::kLatestVersion},
{right->outputVar(), ExecutionContext::kLatestVersion},
{hashKey},
{probeKey});
std::vector<std::string> colNames = left->colNames();
colNames.insert(colNames.end(), right->colNames().begin(), right->colNames().end());
join->setColNames(std::move(colNames));
// extract runtimeVid & dst from join result
auto* columns = pool->add(new YieldColumns());
auto& vidName = goCtx_->from.runtimeVidName;
auto* vidExpr = new YieldColumn(InputPropertyExpression::make(pool, vidName), vidName);
columns->addColumn(vidExpr);
auto* dstExpr =
new YieldColumn(InputPropertyExpression::make(pool, "TRACK_DST_VID"), goCtx_->dstVidColName);
columns->addColumn(dstExpr);
auto* project = Project::make(qctx, join, columns);
auto* dedup = Dedup::make(qctx, project);
dedup->setOutputVar(left->outputVar());
return dedup;
}
/*
* output ColNames {srcProps, edgeProps, kVid, "JOIN_DST_VID", "DST_VID",
* dstProps}
*/
PlanNode* GoPlanner::buildJoinDstPlan(PlanNode* dep) {
auto qctx = goCtx_->qctx;
auto* pool = qctx->objPool();
// dst is the last column, columnName is "JOIN_DST_VID"
auto* dstExpr = ColumnExpression::make(pool, LAST_COL_INDEX);
auto* getVertex = GetVertices::make(qctx,
dep,
goCtx_->space.id,
dstExpr,
buildVertexProps(goCtx_->exprProps.dstTagProps()),
{},
true);
auto& dstPropsExpr = goCtx_->dstPropsExpr;
// extract dst's prop
auto* vidExpr = new YieldColumn(ColumnExpression::make(pool, VID_INDEX), "DST_VID");
dstPropsExpr->addColumn(vidExpr);
// extract dst's prop, vid is the last column
auto* project = Project::make(qctx, getVertex, dstPropsExpr);
// dep's colName "JOIN_DST_VID" join getVertex's colName "DST_VID"
auto* hashKey = dstExpr->clone();
auto* probeKey = ColumnExpression::make(pool, LAST_COL_INDEX);
auto* join = LeftJoin::make(qctx,
project,
{dep->outputVar(), ExecutionContext::kLatestVersion},
{project->outputVar(), ExecutionContext::kLatestVersion},
{hashKey},
{probeKey});
VLOG(1) << join->outputVar() << " hasKey: " << hashKey->toString()
<< " probeKey: " << probeKey->toString();
std::vector<std::string> colNames = dep->colNames();
colNames.insert(colNames.end(), project->colNames().begin(), project->colNames().end());
join->setColNames(std::move(colNames));
return join;
}
PlanNode* GoPlanner::buildJoinInputPlan(PlanNode* dep) {
auto qctx = goCtx_->qctx;
const auto& from = goCtx_->from;
const auto& steps = goCtx_->steps;
auto* pool = qctx->objPool();
const auto& vidName = (!steps.isMToN() && steps.steps() == 1) ? kVid : from.runtimeVidName;
auto* hashKey = VariablePropertyExpression::make(pool, dep->outputVar(), vidName);
auto* probeKey = from.originalSrc;
std::string probeName = from.fromType == kPipe ? goCtx_->inputVarName : from.userDefinedVarName;
auto* join = InnerJoin::make(qctx,
dep,
{dep->outputVar(), ExecutionContext::kLatestVersion},
{probeName, ExecutionContext::kLatestVersion},
{hashKey},
{probeKey});
std::vector<std::string> colNames = dep->colNames();
auto* varPtr = qctx->symTable()->getVar(probeName);
DCHECK(varPtr != nullptr);
colNames.insert(colNames.end(), varPtr->colNames.begin(), varPtr->colNames.end());
join->setColNames(std::move(colNames));
return join;
}
/*
* left's colName dstVidColName join right's colName kVid
* left : n-1 steps
* right : last step
*/
PlanNode* GoPlanner::lastStepJoinInput(PlanNode* left, PlanNode* right) {
auto qctx = goCtx_->qctx;
auto* pool = qctx->objPool();
auto* hashKey = VariablePropertyExpression::make(pool, left->outputVar(), goCtx_->dstVidColName);
auto* probeKey = VariablePropertyExpression::make(pool, right->outputVar(), kVid);
const auto& leftVersion = goCtx_->steps.isMToN() ? ExecutionContext::kPreviousOneVersion
: ExecutionContext::kLatestVersion;
auto* join = InnerJoin::make(qctx,
right,
{left->outputVar(), leftVersion},
{right->outputVar(), ExecutionContext::kLatestVersion},
{hashKey},
{probeKey});
std::vector<std::string> colNames = left->colNames();
colNames.insert(colNames.end(), right->colNames().begin(), right->colNames().end());
join->setColNames(std::move(colNames));
return join;
}
PlanNode* GoPlanner::buildLastStepJoinPlan(PlanNode* gn, PlanNode* join) {
if (!goCtx_->joinInput && !goCtx_->joinDst) {
return gn;
}
auto* dep = extractSrcEdgePropsFromGN(gn, gn->outputVar());
dep = goCtx_->joinDst ? buildJoinDstPlan(dep) : dep;
dep = goCtx_->joinInput ? lastStepJoinInput(join, dep) : dep;
dep = goCtx_->joinInput ? buildJoinInputPlan(dep) : dep;
return dep;
}
PlanNode* GoPlanner::lastStep(PlanNode* dep, PlanNode* join) {
auto qctx = goCtx_->qctx;
auto* gn = GetNeighbors::make(qctx, dep, goCtx_->space.id);
gn->setSrc(goCtx_->from.src);
gn->setVertexProps(buildVertexProps(goCtx_->exprProps.srcTagProps()));
gn->setEdgeProps(buildEdgeProps(false));
gn->setInputVar(goCtx_->vidsVar);
const auto& steps = goCtx_->steps;
auto* sampleLimit = buildSampleLimit(gn, steps.isMToN() ? steps.nSteps() : steps.steps());
auto* root = buildLastStepJoinPlan(sampleLimit, join);
if (goCtx_->filter != nullptr) {
root = Filter::make(qctx, root, goCtx_->filter);
}
root = Project::make(qctx, root, goCtx_->yieldExpr);
root->setColNames(std::move(goCtx_->colNames));
if (goCtx_->distinct) {
root = Dedup::make(qctx, root);
}
return root;
}
PlanNode* GoPlanner::buildOneStepJoinPlan(PlanNode* gn) {
if (!goCtx_->joinInput && !goCtx_->joinDst) {
return gn;
}
auto* dep = extractSrcEdgePropsFromGN(gn, gn->outputVar());
dep = goCtx_->joinDst ? buildJoinDstPlan(dep) : dep;
dep = goCtx_->joinInput ? buildJoinInputPlan(dep) : dep;
return dep;
}
template <typename T>
PlanNode* GoPlanner::buildSampleLimitImpl(PlanNode* input, T sampleLimit) {
DCHECK(!goCtx_->limits.empty());
PlanNode* node = nullptr;
if (goCtx_->random) {
node = Sample::make(goCtx_->qctx, input, sampleLimit);
} else {
node = Limit::make(goCtx_->qctx, input, 0, sampleLimit);
}
node->setInputVar(input->outputVar());
node->setColNames(input->outputVarPtr()->colNames);
return node;
}
// generate
// $limits[$step-1]
Expression* GoPlanner::stepSampleLimit() {
auto qctx = goCtx_->qctx;
// $limits
const auto& limitsVarName = qctx->vctx()->anonVarGen()->getVar();
List limitValues;
limitValues.reserve(goCtx_->limits.size());
for (const auto& limit : goCtx_->limits) {
limitValues.values.emplace_back(limit);
}
qctx->ectx()->setValue(limitsVarName, Value(std::move(limitValues)));
auto* limitsVar = VariableExpression::make(qctx->objPool(), limitsVarName);
// $step
auto* stepVar = VariableExpression::make(qctx->objPool(), loopStepVar_);
// step inc
auto* stepInc = ArithmeticExpression::makeMinus(
qctx->objPool(), stepVar, ConstantExpression::make(qctx->objPool(), 1));
// subscript
auto* subscript = SubscriptExpression::make(qctx->objPool(), limitsVar, stepInc);
return subscript;
}
SubPlan GoPlanner::oneStepPlan(SubPlan& startVidPlan) {
auto qctx = goCtx_->qctx;
auto* gn = GetNeighbors::make(qctx, startVidPlan.root, goCtx_->space.id);
gn->setVertexProps(buildVertexProps(goCtx_->exprProps.srcTagProps()));
gn->setEdgeProps(buildEdgeProps(false));
gn->setSrc(goCtx_->from.src);
gn->setInputVar(goCtx_->vidsVar);
auto* sampleLimit = buildSampleLimit(gn, 1 /* one step */);
SubPlan subPlan;
subPlan.tail = startVidPlan.tail != nullptr ? startVidPlan.tail : gn;
subPlan.root = buildOneStepJoinPlan(sampleLimit);
if (goCtx_->filter != nullptr) {
subPlan.root = Filter::make(qctx, subPlan.root, goCtx_->filter);
}
subPlan.root = Project::make(qctx, subPlan.root, goCtx_->yieldExpr);
subPlan.root->setColNames(std::move(goCtx_->colNames));
if (goCtx_->distinct) {
subPlan.root = Dedup::make(qctx, subPlan.root);
}
return subPlan;
}
SubPlan GoPlanner::nStepsPlan(SubPlan& startVidPlan) {
auto qctx = goCtx_->qctx;
loopStepVar_ = qctx->vctx()->anonVarGen()->getVar();
auto* start = StartNode::make(qctx);
auto* gn = GetNeighbors::make(qctx, start, goCtx_->space.id);
gn->setSrc(goCtx_->from.src);
gn->setEdgeProps(buildEdgeProps(true));
gn->setInputVar(goCtx_->vidsVar);
auto* sampleLimit = buildSampleLimit(gn);
auto* getDst = PlannerUtil::extractDstFromGN(qctx, sampleLimit, goCtx_->vidsVar);
PlanNode* loopBody = getDst;
PlanNode* loopDep = nullptr;
if (goCtx_->joinInput) {
auto* joinLeft = extractVidFromRuntimeInput(startVidPlan.root);
auto* joinRight = extractSrcDstFromGN(getDst, sampleLimit->outputVar());
loopBody = trackStartVid(joinLeft, joinRight);
loopDep = joinLeft;
}
auto* condition = loopCondition(goCtx_->steps.steps() - 1, sampleLimit->outputVar());
auto* loop = Loop::make(qctx, loopDep, loopBody, condition);
auto* root = lastStep(loop, loopBody == getDst ? nullptr : loopBody);
SubPlan subPlan;
subPlan.root = root;
subPlan.tail = startVidPlan.tail == nullptr ? loop : startVidPlan.tail;
return subPlan;
}
SubPlan GoPlanner::mToNStepsPlan(SubPlan& startVidPlan) {
auto qctx = goCtx_->qctx;
auto joinInput = goCtx_->joinInput;
auto joinDst = goCtx_->joinDst;
loopStepVar_ = qctx->vctx()->anonVarGen()->getVar();
auto* start = StartNode::make(qctx);
auto* gn = GetNeighbors::make(qctx, start, goCtx_->space.id);
gn->setSrc(goCtx_->from.src);
gn->setVertexProps(buildVertexProps(goCtx_->exprProps.srcTagProps()));
gn->setEdgeProps(buildEdgeProps(false));
gn->setInputVar(goCtx_->vidsVar);
auto* sampleLimit = buildSampleLimit(gn);
auto* getDst = PlannerUtil::extractDstFromGN(qctx, sampleLimit, goCtx_->vidsVar);
auto* loopBody = getDst;
auto* loopDep = startVidPlan.root;
PlanNode* trackVid = nullptr;
if (joinInput) {
auto* joinLeft = extractVidFromRuntimeInput(startVidPlan.root);
auto* joinRight = extractSrcDstFromGN(getDst, sampleLimit->outputVar());
trackVid = trackStartVid(joinLeft, joinRight);
loopBody = trackVid;
loopDep = joinLeft;
}
if (joinInput || joinDst) {
loopBody = extractSrcEdgePropsFromGN(loopBody, sampleLimit->outputVar());
loopBody = joinDst ? buildJoinDstPlan(loopBody) : loopBody;
loopBody = joinInput ? lastStepJoinInput(trackVid, loopBody) : loopBody;
loopBody = joinInput ? buildJoinInputPlan(loopBody) : loopBody;
}
if (goCtx_->filter) {
const auto& filterInput =
(joinInput || joinDst) ? loopBody->outputVar() : sampleLimit->outputVar();
loopBody = Filter::make(qctx, loopBody, goCtx_->filter);
loopBody->setInputVar(filterInput);
}
const auto& projectInput =
(joinInput || joinDst) ? loopBody->outputVar() : sampleLimit->outputVar();
loopBody = Project::make(qctx, loopBody, goCtx_->yieldExpr);
loopBody->setInputVar(projectInput);
loopBody->setColNames(std::move(goCtx_->colNames));
if (goCtx_->distinct) {
loopBody = Dedup::make(qctx, loopBody);
}
auto* condition = loopCondition(goCtx_->steps.nSteps(), sampleLimit->outputVar());
auto* loop = Loop::make(qctx, loopDep, loopBody, condition);
auto* dc = DataCollect::make(qctx, DataCollect::DCKind::kMToN);
dc->addDep(loop);
dc->setMToN(goCtx_->steps);
dc->setDistinct(goCtx_->distinct);
dc->setInputVars({loopBody->outputVar()});
dc->setColNames(loopBody->colNames());
SubPlan subPlan;
subPlan.root = dc;
subPlan.tail = startVidPlan.tail == nullptr ? loop : startVidPlan.tail;
return subPlan;
}
StatusOr<SubPlan> GoPlanner::transform(AstContext* astCtx) {
goCtx_ = static_cast<GoContext*>(astCtx);
auto qctx = goCtx_->qctx;
goCtx_->joinInput = goCtx_->from.fromType != FromType::kInstantExpr;
goCtx_->joinDst = !goCtx_->exprProps.dstTagProps().empty();
SubPlan startPlan = PlannerUtil::buildStart(qctx, goCtx_->from, goCtx_->vidsVar);
auto& steps = goCtx_->steps;
if (steps.isMToN()) {
return mToNStepsPlan(startPlan);
}
if (steps.steps() == 0) {
auto* pt = PassThroughNode::make(qctx, nullptr);
pt->setColNames(std::move(goCtx_->colNames));
SubPlan subPlan;
subPlan.root = subPlan.tail = pt;
return subPlan;
}
if (steps.steps() == 1) {
return oneStepPlan(startPlan);
}
return nStepsPlan(startPlan);
}
} // namespace graph
} // namespace nebula
| 1 | 31,574 | Why not delete these codes? | vesoft-inc-nebula | cpp |
@@ -112,7 +112,7 @@ bool StorageServer::start() {
metaClient_ = std::make_unique<meta::MetaClient>(ioThreadPool_,
metaAddrs_,
options);
- if (!metaClient_->waitForMetadReady()) {
+ if (!metaClient_->waitForMetadReady(FLAGS_meta_client_retry_times)) {
LOG(ERROR) << "waitForMetadReady error!";
return false;
} | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "storage/StorageServer.h"
#include "network/NetworkUtils.h"
#include "storage/StorageFlags.h"
#include "storage/StorageServiceHandler.h"
#include "storage/http/StorageHttpStatsHandler.h"
#include "storage/http/StorageHttpDownloadHandler.h"
#include "storage/http/StorageHttpIngestHandler.h"
#include "storage/http/StorageHttpAdminHandler.h"
#include "kvstore/PartManager.h"
#include "webservice/Router.h"
#include "webservice/WebService.h"
#include "storage/CompactionFilter.h"
#include "hdfs/HdfsCommandHelper.h"
#include "thread/GenericThreadPool.h"
#include <thrift/lib/cpp/concurrency/ThreadManager.h>
DEFINE_int32(port, 44500, "Storage daemon listening port");
DEFINE_bool(reuse_port, true, "Whether to turn on the SO_REUSEPORT option");
DEFINE_int32(num_io_threads, 16, "Number of IO threads");
DEFINE_int32(num_worker_threads, 32, "Number of workers");
DEFINE_int32(storage_http_thread_num, 3, "Number of storage daemon's http thread");
DEFINE_bool(local_config, false, "meta client will not retrieve latest configuration from meta");
namespace nebula {
namespace storage {
StorageServer::StorageServer(HostAddr localHost,
std::vector<HostAddr> metaAddrs,
std::vector<std::string> dataPaths)
: localHost_(localHost), metaAddrs_(std::move(metaAddrs)), dataPaths_(std::move(dataPaths)) {}
StorageServer::~StorageServer() {
stop();
}
std::unique_ptr<kvstore::KVStore> StorageServer::getStoreInstance() {
kvstore::KVOptions options;
options.dataPaths_ = dataPaths_;
options.partMan_ = std::make_unique<kvstore::MetaServerBasedPartManager>(
localHost_,
metaClient_.get());
options.cffBuilder_ = std::make_unique<StorageCompactionFilterFactoryBuilder>(schemaMan_.get(),
indexMan_.get());
if (FLAGS_store_type == "nebula") {
auto nbStore = std::make_unique<kvstore::NebulaStore>(std::move(options),
ioThreadPool_,
localHost_,
workers_);
if (!(nbStore->init())) {
LOG(ERROR) << "nebula store init failed";
return nullptr;
}
return nbStore;
} else if (FLAGS_store_type == "hbase") {
LOG(FATAL) << "HBase store has not been implemented";
} else {
LOG(FATAL) << "Unknown store type \"" << FLAGS_store_type << "\"";
}
return nullptr;
}
bool StorageServer::initWebService() {
LOG(INFO) << "Starting Storage HTTP Service";
hdfsHelper_ = std::make_unique<hdfs::HdfsCommandHelper>();
webWorkers_ = std::make_unique<nebula::thread::GenericThreadPool>();
webWorkers_->start(FLAGS_storage_http_thread_num, "http thread pool");
LOG(INFO) << "Http Thread Pool started";
webSvc_ = std::make_unique<WebService>();
auto& router = webSvc_->router();
router.get("/download").handler([this](web::PathParams&&) {
auto* handler = new storage::StorageHttpDownloadHandler();
handler->init(hdfsHelper_.get(), webWorkers_.get(), kvstore_.get(), dataPaths_);
return handler;
});
router.get("/ingest").handler([this](web::PathParams&&) {
auto handler = new nebula::storage::StorageHttpIngestHandler();
handler->init(kvstore_.get());
return handler;
});
router.get("/admin").handler([this](web::PathParams&&) {
return new storage::StorageHttpAdminHandler(schemaMan_.get(), kvstore_.get());
});
router.get("/rocksdb_stats").handler([](web::PathParams&&) {
return new storage::StorageHttpStatsHandler();
});
auto status = webSvc_->start();
return status.ok();
}
bool StorageServer::start() {
ioThreadPool_ = std::make_shared<folly::IOThreadPoolExecutor>(FLAGS_num_io_threads);
workers_ = apache::thrift::concurrency::PriorityThreadManager::newPriorityThreadManager(
FLAGS_num_worker_threads, true /*stats*/);
workers_->setNamePrefix("executor");
workers_->start();
// Meta client
meta::MetaClientOptions options;
options.localHost_ = localHost_;
options.inStoraged_ = true;
options.serviceName_ = "";
options.skipConfig_ = FLAGS_local_config;
metaClient_ = std::make_unique<meta::MetaClient>(ioThreadPool_,
metaAddrs_,
options);
if (!metaClient_->waitForMetadReady()) {
LOG(ERROR) << "waitForMetadReady error!";
return false;
}
gFlagsMan_ = std::make_unique<meta::ClientBasedGflagsManager>(metaClient_.get());
LOG(INFO) << "Init schema manager";
schemaMan_ = meta::SchemaManager::create();
schemaMan_->init(metaClient_.get());
LOG(INFO) << "Init index manager";
indexMan_ = meta::IndexManager::create();
indexMan_->init(metaClient_.get());
LOG(INFO) << "Init kvstore";
kvstore_ = getStoreInstance();
if (nullptr == kvstore_) {
LOG(ERROR) << "Init kvstore failed";
return false;
}
if (!initWebService()) {
LOG(ERROR) << "Init webservice failed!";
return false;
}
auto handler = std::make_shared<StorageServiceHandler>(kvstore_.get(),
schemaMan_.get(),
indexMan_.get(),
metaClient_.get());
try {
LOG(INFO) << "The storage deamon start on " << localHost_;
tfServer_ = std::make_unique<apache::thrift::ThriftServer>();
tfServer_->setPort(FLAGS_port);
tfServer_->setReusePort(FLAGS_reuse_port);
tfServer_->setIdleTimeout(std::chrono::seconds(0)); // No idle timeout on client connection
tfServer_->setIOThreadPool(ioThreadPool_);
tfServer_->setThreadManager(workers_);
tfServer_->setInterface(std::move(handler));
tfServer_->setStopWorkersOnStopListening(false);
Status expected{Status::INIT};
if (!status_.compare_exchange_strong(expected, Status::RUNNING)) {
LOG(ERROR) << "Impossible! How could it happens!";
return false;
}
tfServer_->serve(); // Will wait until the server shuts down
} catch (const std::exception& e) {
LOG(ERROR) << "Start thrift server failed, error:" << e.what();
return false;
}
return true;
}
void StorageServer::stop() {
Status expected{Status::RUNNING};
if (!status_.compare_exchange_strong(expected, Status::STOPPED)) {
LOG(INFO) << "The service is not running, status " << statusStr(expected);
return;
}
if (kvstore_) {
kvstore_->stop();
}
webSvc_.reset();
if (metaClient_) {
metaClient_->stop();
}
if (kvstore_) {
kvstore_.reset();
}
if (tfServer_) {
tfServer_->stop();
}
}
} // namespace storage
} // namespace nebula
| 1 | 29,980 | If pull message failed after some reties, we should ensure the storaged exit, otherwise it has risk to loss data. | vesoft-inc-nebula | cpp |
@@ -7,7 +7,7 @@
import pytest
import random
import datatable as dt
-from datatable import f, stype, ltype
+from datatable import f, stype, ltype, first, count
from tests import list_equals
| 1 | #!/usr/bin/env python
# © H2O.ai 2018; -*- encoding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#-------------------------------------------------------------------------------
import pytest
import random
import datatable as dt
from datatable import f, stype, ltype
from tests import list_equals
# Sets of tuples containing test columns of each type
dt_bool = {(False, True, False, False, True),
(True, None, None, True, False)}
dt_int = {(5, -3, 6, 3, 0),
(None, -1, 0, 26, -3),
# TODO: currently ~ operation fails on v = 2**31 - 1. Should we
# promote the resulting column to int64 in such case?
(2**31 - 2, -(2**31 - 1), 0, -1, 1)}
dt_float = {(9.5, 0.2, 5.4857301, -3.14159265338979),
(1.1, 2.3e12, -.5, None, float("inf"), 0.0)}
dt_str = {("foo", "bbar", "baz"),
(None, "", " ", " ", None, "\0"),
tuple("qwertyuiiop[]asdfghjkl;'zxcvbnm,./`1234567890-=")}
#-------------------------------------------------------------------------------
# f
#-------------------------------------------------------------------------------
def test_f():
# Check that unbounded f-expressions can be stringified (see #1024). The
# exact representation may be modified in the future; however f-expressions
# should not raise exceptions when printed.
x = f.a
assert repr(x) == "ColSelectorExpr(f_a)"
assert str(x) == "f_a"
y = f.C1 < f.C2
assert repr(y) == "RelationalOpExpr((f_C1 < f_C2))"
assert str(y) == "(f_C1 < f_C2)"
z = f[0]
assert str(z) == "f_0"
assert repr(z) == "ColSelectorExpr(f_0)"
#-------------------------------------------------------------------------------
# Unary bitwise NOT (__invert__)
#-------------------------------------------------------------------------------
def inv(t):
if t is None: return None
if isinstance(t, bool): return not t
return ~t
@pytest.mark.parametrize("src", dt_bool | dt_int)
def test_dt_invert(src):
dt0 = dt.Frame(src)
df1 = dt0(select=~f[0], engine="llvm")
df2 = dt0(select=~f[0], engine="eager")
df1.internal.check()
df2.internal.check()
assert df1.stypes == dt0.stypes
assert df2.stypes == dt0.stypes
assert df1.topython() == [[inv(x) for x in src]]
assert df2.topython() == [[inv(x) for x in src]]
@pytest.mark.parametrize("src", dt_float)
def test_dt_invert_invalid(src):
dt0 = dt.Frame(src)
for engine in ["llvm", "eager"]:
with pytest.raises(TypeError) as e:
dt0(select=~f[0], engine=engine)
assert str(e.value) == ("Operator `~` cannot be applied to a `%s` "
"column" % dt0.stypes[0].name)
#-------------------------------------------------------------------------------
# Unary minus (__neg__)
#-------------------------------------------------------------------------------
def neg(t):
if t is None: return None
return -t
@pytest.mark.parametrize("src", dt_int | dt_float)
def test_dt_neg(src):
dt0 = dt.Frame(src)
dtr = dt0(select=lambda f: -f[0])
dtr.internal.check()
assert dtr.stypes == dt0.stypes
assert list_equals(dtr.topython()[0], [neg(x) for x in src])
@pytest.mark.parametrize("src", dt_bool)
def test_dt_neg_invalid(src):
dt0 = dt.Frame(src)
with pytest.raises(TypeError) as e:
dt0(select=lambda f: -f[0])
assert str(e.value) == ("Operator `-` cannot be applied to a `%s` column"
% dt0.stypes[0].name)
#-------------------------------------------------------------------------------
# Unary plus (__pos__)
#-------------------------------------------------------------------------------
@pytest.mark.parametrize("src", dt_int | dt_float)
def test_dt_pos(src):
dt0 = dt.Frame(src)
dtr = dt0(select=lambda f: +f[0])
dtr.internal.check()
assert dtr.stypes == dt0.stypes
assert list_equals(dtr.topython()[0], list(src))
@pytest.mark.parametrize("src", dt_bool)
def test_dt_pos_invalid(src):
dt0 = dt.Frame(src)
with pytest.raises(TypeError) as e:
dt0(select=lambda f: +f[0])
assert str(e.value) == ("Operator `+` cannot be applied to a `%s` column"
% dt0.stypes[0].name)
#-------------------------------------------------------------------------------
# isna()
#-------------------------------------------------------------------------------
@pytest.mark.parametrize("src", dt_bool | dt_int | dt_float | dt_str)
def test_dt_isna(src):
dt0 = dt.Frame(src)
dt1 = dt0(select=lambda f: dt.isna(f[0]), engine="eager")
dt2 = dt0(select=lambda f: dt.isna(f[0]), engine="llvm")
dt1.internal.check()
dt2.internal.check()
assert dt1.stypes == dt2.stypes == (stype.bool8,)
pyans = [x is None for x in src]
assert dt1.topython()[0] == pyans
assert dt2.topython()[0] == pyans
#-------------------------------------------------------------------------------
# type-cast
#-------------------------------------------------------------------------------
@pytest.mark.parametrize("src", dt_bool | dt_int | dt_float)
def test_cast_to_float32(src):
dt0 = dt.Frame(src)
dt1 = dt0[:, [dt.float32(f[i]) for i in range(dt0.ncols)]]
dt1.internal.check()
assert dt1.stypes == (dt.float32,) * dt0.ncols
pyans = [float(x) if x is not None else None for x in src]
assert list_equals(dt1.topython()[0], pyans)
@pytest.mark.parametrize("stype0", ltype.int.stypes)
def test_cast_int_to_str(stype0):
dt0 = dt.Frame([None, 0, -3, 189, 77, 14, None, 394831, -52939047130424957],
stype=stype0)
dt1 = dt0[:, [dt.str32(f.C0), dt.str64(f.C0)]]
dt1.internal.check()
assert dt1.stypes == (dt.str32, dt.str64)
assert dt1.shape == (dt0.nrows, 2)
ans = [None if v is None else str(v)
for v in dt0.topython()[0]]
assert dt1.topython()[0] == ans
@pytest.mark.parametrize("src", dt_bool | dt_int | dt_float)
def test_cast_to_str(src):
def to_str(x):
if x is None: return None
if isinstance(x, bool): return str(int(x))
# if isinstance(x, float) and math.isnan(x): return None
return str(x)
dt0 = dt.Frame(src)
dt1 = dt0[:, [dt.str32(f[i]) for i in range(dt0.ncols)]]
dt2 = dt0[:, [dt.str64(f[i]) for i in range(dt0.ncols)]]
dt1.internal.check()
dt2.internal.check()
assert dt1.stypes == (dt.str32,) * dt0.ncols
assert dt2.stypes == (dt.str64,) * dt0.ncols
assert dt1.topython()[0] == [to_str(x) for x in src]
def test_cast_view():
df0 = dt.Frame({"A": [1, 2, 3]})
df1 = df0[::-1, :][:, dt.float32(f.A)]
df1.internal.check()
assert df1.stypes == (dt.float32,)
assert df1.topython() == [[3.0, 2.0, 1.0]]
#-------------------------------------------------------------------------------
# logical ops
#-------------------------------------------------------------------------------
def test_logical_and1():
src1 = [1, 5, 12, 3, 7, 14]
src2 = [1, 2] * 3
ans = [i for i in range(6)
if src1[i] < 10 and src2[i] == 1]
df0 = dt.Frame(A=src1, B=src2)
df1 = df0[(f.A < 10) & (f.B == 1), [f.A, f.B]]
assert df1.topython() == [[src1[i] for i in ans],
[src2[i] for i in ans]]
def test_logical_or1():
src1 = [1, 5, 12, 3, 7, 14]
src2 = [1, 2] * 3
ans = [i for i in range(6)
if src1[i] < 10 or src2[i] == 1]
df0 = dt.Frame(A=src1, B=src2)
df1 = df0[(f.A < 10) | (f.B == 1), [f.A, f.B]]
assert df1.topython() == [[src1[i] for i in ans],
[src2[i] for i in ans]]
@pytest.mark.parametrize("seed", [random.getrandbits(63)])
def test_logical_and2(seed):
random.seed(seed)
n = 1000
src1 = [random.choice([True, False, None]) for _ in range(n)]
src2 = [random.choice([True, False, None]) for _ in range(n)]
df0 = dt.Frame(A=src1, B=src2)
df1 = df0[:, f.A & f.B]
assert df1.topython()[0] == \
[None if (src1[i] is None or src2[i] is None) else
src1[i] and src2[i]
for i in range(n)]
@pytest.mark.parametrize("seed", [random.getrandbits(63)])
def test_logical_or2(seed):
random.seed(seed)
n = 1000
src1 = [random.choice([True, False, None]) for _ in range(n)]
src2 = [random.choice([True, False, None]) for _ in range(n)]
df0 = dt.Frame(A=src1, B=src2)
df1 = df0[:, f.A | f.B]
assert df1.topython()[0] == \
[None if (src1[i] is None or src2[i] is None) else
src1[i] or src2[i]
for i in range(n)]
#-------------------------------------------------------------------------------
# Division
#-------------------------------------------------------------------------------
@pytest.mark.parametrize("seed", [random.getrandbits(63)])
def test_div_mod(seed):
random.seed(seed)
n = 1000
src1 = [random.randint(-100, 100) for _ in range(n)]
src2 = [random.randint(-10, 10) for _ in range(n)]
df0 = dt.Frame(x=src1, y=src2)
df1 = df0[:, [f.x // f.y, f.x % f.y]]
assert df1.topython() == [
[None if src2[i] == 0 else src1[i] // src2[i] for i in range(n)],
[None if src2[i] == 0 else src1[i] % src2[i] for i in range(n)]
]
| 1 | 11,057 | looks like you don't need to import first and count here anymore | h2oai-datatable | py |
@@ -2511,6 +2511,18 @@ func (e *historyEngineImpl) NotifyNewTransferTasks(
}
}
+func (e *historyEngineImpl) NotifyNewVisibilityTasks(
+ tasks []persistence.Task,
+) {
+
+ if len(tasks) > 0 {
+ // TODO (alex): add visibility processor
+ // task := tasks[0]
+ // clusterName := e.clusterMetadata.ClusterNameForFailoverVersion(task.GetVersion())
+ // e.visibilityProcessor.NotifyNewTask(clusterName, tasks)
+ }
+}
+
func (e *historyEngineImpl) NotifyNewReplicationTasks(
tasks []persistence.Task,
) { | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination historyEngine_mock.go
package history
import (
"bytes"
"context"
"errors"
"fmt"
"sync/atomic"
"time"
"github.com/pborman/uuid"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
querypb "go.temporal.io/api/query/v1"
"go.temporal.io/api/serviceerror"
taskqueuepb "go.temporal.io/api/taskqueue/v1"
workflowpb "go.temporal.io/api/workflow/v1"
"go.temporal.io/api/workflowservice/v1"
sdkclient "go.temporal.io/sdk/client"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/historyservice/v1"
"go.temporal.io/server/api/matchingservice/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
replicationspb "go.temporal.io/server/api/replication/v1"
workflowspb "go.temporal.io/server/api/workflow/v1"
"go.temporal.io/server/client/admin"
"go.temporal.io/server/client/history"
"go.temporal.io/server/client/matching"
"go.temporal.io/server/common"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/definition"
"go.temporal.io/server/common/failure"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/messaging"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/versionhistory"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/common/service/config"
serviceerrors "go.temporal.io/server/common/serviceerror"
"go.temporal.io/server/common/xdc"
"go.temporal.io/server/service/history/configs"
"go.temporal.io/server/service/history/events"
"go.temporal.io/server/service/history/shard"
"go.temporal.io/server/service/worker/archiver"
)
const (
conditionalRetryCount = 5
activityCancellationMsgActivityIDUnknown = "ACTIVITY_ID_UNKNOWN"
activityCancellationMsgActivityNotStarted = "ACTIVITY_ID_NOT_STARTED"
timerCancellationMsgTimerIDUnknown = "TIMER_ID_UNKNOWN"
defaultQueryFirstWorkflowTaskWaitTime = time.Second
queryFirstWorkflowTaskCheckInterval = 200 * time.Millisecond
)
type (
historyEngineImpl struct {
status int32
currentClusterName string
shard shard.Context
timeSource clock.TimeSource
workflowTaskHandler workflowTaskHandlerCallbacks
clusterMetadata cluster.Metadata
historyV2Mgr persistence.HistoryManager
executionManager persistence.ExecutionManager
visibilityMgr persistence.VisibilityManager
txProcessor transferQueueProcessor
timerProcessor timerQueueProcessor
nDCReplicator nDCHistoryReplicator
nDCActivityReplicator nDCActivityReplicator
replicatorProcessor ReplicatorQueueProcessor
eventNotifier events.Notifier
tokenSerializer common.TaskTokenSerializer
historyCache *historyCache
metricsClient metrics.Client
logger log.Logger
throttledLogger log.Logger
config *configs.Config
archivalClient archiver.Client
workflowResetter workflowResetter
queueTaskProcessor queueTaskProcessor
replicationTaskProcessors []ReplicationTaskProcessor
publicClient sdkclient.Client
eventsReapplier nDCEventsReapplier
matchingClient matching.Client
rawMatchingClient matching.Client
replicationDLQHandler replicationDLQHandler
}
)
var (
// ErrTaskDiscarded is the error indicating that the timer / transfer task is pending for too long and discarded.
ErrTaskDiscarded = errors.New("passive task pending for too long")
// ErrTaskRetry is the error indicating that the timer / transfer task should be retried.
ErrTaskRetry = errors.New("passive task should retry due to condition in mutable state is not met")
// ErrDuplicate is exported temporarily for integration test
ErrDuplicate = errors.New("duplicate task, completing it")
// ErrConflict is exported temporarily for integration test
ErrConflict = errors.New("conditional update failed")
// ErrMaxAttemptsExceeded is exported temporarily for integration test
ErrMaxAttemptsExceeded = errors.New("maximum attempts exceeded to update history")
// ErrStaleState is the error returned during state update indicating that cached mutable state could be stale
ErrStaleState = errors.New("cache mutable state could potentially be stale")
// ErrActivityTaskNotFound is the error to indicate activity task could be duplicate and activity already completed
ErrActivityTaskNotFound = serviceerror.NewNotFound("invalid activityID or activity already timed out or invoking workflow is completed")
// ErrWorkflowCompleted is the error to indicate workflow execution already completed
ErrWorkflowCompleted = serviceerror.NewNotFound("workflow execution already completed")
// ErrWorkflowExecutionNotFound is the error to indicate workflow execution does not exist
ErrWorkflowExecutionNotFound = serviceerror.NewNotFound("workflow execution not found")
// ErrWorkflowParent is the error to parent execution is given and mismatch
ErrWorkflowParent = serviceerror.NewNotFound("workflow parent does not match")
// ErrDeserializingToken is the error to indicate task token is invalid
ErrDeserializingToken = serviceerror.NewInvalidArgument("error deserializing task token")
// ErrCancellationAlreadyRequested is the error indicating cancellation for target workflow is already requested
ErrCancellationAlreadyRequested = serviceerror.NewCancellationAlreadyRequested("cancellation already requested for this workflow execution")
// ErrSignalsLimitExceeded is the error indicating limit reached for maximum number of signal events
ErrSignalsLimitExceeded = serviceerror.NewResourceExhausted("exceeded workflow execution limit for signal events")
// ErrEventsAterWorkflowFinish is the error indicating server error trying to write events after workflow finish event
ErrEventsAterWorkflowFinish = serviceerror.NewInternal("error validating last event being workflow finish event")
// ErrQueryEnteredInvalidState is error indicating query entered invalid state
ErrQueryEnteredInvalidState = serviceerror.NewInvalidArgument("query entered invalid state, this should be impossible")
// ErrConsistentQueryBufferExceeded is error indicating that too many consistent queries have been buffered and until buffered queries are finished new consistent queries cannot be buffered
ErrConsistentQueryBufferExceeded = serviceerror.NewInternal("consistent query buffer is full, cannot accept new consistent queries")
// ErrEmptyHistoryRawEventBatch indicate that one single batch of history raw events is of size 0
ErrEmptyHistoryRawEventBatch = serviceerror.NewInvalidArgument("encounter empty history batch")
// ErrSizeExceedsLimit is error indicating workflow execution has exceeded system defined limit
ErrSizeExceedsLimit = serviceerror.NewResourceExhausted(common.FailureReasonSizeExceedsLimit)
// ErrUnknownCluster is error indicating unknown cluster
ErrUnknownCluster = serviceerror.NewInvalidArgument("unknown cluster")
// FailedWorkflowStatuses is a set of failed workflow close states, used for start workflow policy
// for start workflow execution API
FailedWorkflowStatuses = map[enumspb.WorkflowExecutionStatus]bool{
enumspb.WORKFLOW_EXECUTION_STATUS_FAILED: true,
enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED: true,
enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED: true,
enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT: true,
}
)
// NewEngineWithShardContext creates an instance of history engine
func NewEngineWithShardContext(
shard shard.Context,
visibilityMgr persistence.VisibilityManager,
matching matching.Client,
historyClient history.Client,
publicClient sdkclient.Client,
eventNotifier events.Notifier,
publisher messaging.Producer,
config *configs.Config,
replicationTaskFetchers ReplicationTaskFetchers,
rawMatchingClient matching.Client,
queueTaskProcessor queueTaskProcessor,
) *historyEngineImpl {
currentClusterName := shard.GetService().GetClusterMetadata().GetCurrentClusterName()
logger := shard.GetLogger()
executionManager := shard.GetExecutionManager()
historyV2Manager := shard.GetHistoryManager()
historyCache := newHistoryCache(shard)
historyEngImpl := &historyEngineImpl{
status: common.DaemonStatusInitialized,
currentClusterName: currentClusterName,
shard: shard,
clusterMetadata: shard.GetClusterMetadata(),
timeSource: shard.GetTimeSource(),
historyV2Mgr: historyV2Manager,
executionManager: executionManager,
visibilityMgr: visibilityMgr,
tokenSerializer: common.NewProtoTaskTokenSerializer(),
historyCache: historyCache,
logger: logger.WithTags(tag.ComponentHistoryEngine),
throttledLogger: shard.GetThrottledLogger().WithTags(tag.ComponentHistoryEngine),
metricsClient: shard.GetMetricsClient(),
eventNotifier: eventNotifier,
config: config,
archivalClient: archiver.NewClient(
shard.GetMetricsClient(),
logger,
publicClient,
shard.GetConfig().NumArchiveSystemWorkflows,
shard.GetConfig().ArchiveRequestRPS,
shard.GetService().GetArchiverProvider(),
),
publicClient: publicClient,
matchingClient: matching,
rawMatchingClient: rawMatchingClient,
queueTaskProcessor: queueTaskProcessor,
}
historyEngImpl.txProcessor = newTransferQueueProcessor(shard, historyEngImpl, visibilityMgr, matching, historyClient, queueTaskProcessor, logger)
historyEngImpl.timerProcessor = newTimerQueueProcessor(shard, historyEngImpl, matching, queueTaskProcessor, logger)
historyEngImpl.eventsReapplier = newNDCEventsReapplier(shard.GetMetricsClient(), logger)
if shard.GetClusterMetadata().IsGlobalNamespaceEnabled() {
historyEngImpl.replicatorProcessor = newReplicatorQueueProcessor(
shard,
historyEngImpl.historyCache,
publisher,
executionManager,
historyV2Manager,
logger,
)
historyEngImpl.nDCReplicator = newNDCHistoryReplicator(
shard,
historyCache,
historyEngImpl.eventsReapplier,
logger,
)
historyEngImpl.nDCActivityReplicator = newNDCActivityReplicator(
shard,
historyCache,
logger,
)
}
historyEngImpl.workflowResetter = newWorkflowResetter(
shard,
historyCache,
logger,
)
historyEngImpl.workflowTaskHandler = newWorkflowTaskHandlerCallback(historyEngImpl)
var replicationTaskProcessors []ReplicationTaskProcessor
replicationTaskExecutors := make(map[string]replicationTaskExecutor)
for _, replicationTaskFetcher := range replicationTaskFetchers.GetFetchers() {
sourceCluster := replicationTaskFetcher.GetSourceCluster()
// Intentionally use the raw client to create its own retry policy
adminClient := shard.GetService().GetClientBean().GetRemoteAdminClient(sourceCluster)
adminRetryableClient := admin.NewRetryableClient(
adminClient,
common.CreateReplicationServiceBusyRetryPolicy(),
common.IsResourceExhausted,
)
// Intentionally use the raw client to create its own retry policy
historyClient := shard.GetService().GetClientBean().GetHistoryClient()
historyRetryableClient := history.NewRetryableClient(
historyClient,
common.CreateReplicationServiceBusyRetryPolicy(),
common.IsResourceExhausted,
)
nDCHistoryResender := xdc.NewNDCHistoryResender(
shard.GetNamespaceCache(),
adminRetryableClient,
func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error {
_, err := historyRetryableClient.ReplicateEventsV2(ctx, request)
return err
},
shard.GetService().GetPayloadSerializer(),
shard.GetConfig().StandbyTaskReReplicationContextTimeout,
shard.GetLogger(),
)
replicationTaskExecutor := newReplicationTaskExecutor(
sourceCluster,
shard,
shard.GetNamespaceCache(),
nDCHistoryResender,
historyEngImpl,
shard.GetMetricsClient(),
shard.GetLogger(),
)
replicationTaskExecutors[sourceCluster] = replicationTaskExecutor
replicationTaskProcessor := NewReplicationTaskProcessor(
shard,
historyEngImpl,
config,
shard.GetMetricsClient(),
replicationTaskFetcher,
replicationTaskExecutor,
)
replicationTaskProcessors = append(replicationTaskProcessors, replicationTaskProcessor)
}
historyEngImpl.replicationTaskProcessors = replicationTaskProcessors
replicationMessageHandler := newReplicationDLQHandler(shard, replicationTaskExecutors)
historyEngImpl.replicationDLQHandler = replicationMessageHandler
shard.SetEngine(historyEngImpl)
return historyEngImpl
}
// Start will spin up all the components needed to start serving this shard.
// Make sure all the components are loaded lazily so start can return immediately. This is important because
// ShardController calls start sequentially for all the shards for a given host during startup.
func (e *historyEngineImpl) Start() {
if !atomic.CompareAndSwapInt32(
&e.status,
common.DaemonStatusInitialized,
common.DaemonStatusStarted,
) {
return
}
e.logger.Info("", tag.LifeCycleStarting)
defer e.logger.Info("", tag.LifeCycleStarted)
e.txProcessor.Start()
e.timerProcessor.Start()
// failover callback will try to create a failover queue processor to scan all inflight tasks
// if domain needs to be failovered. However, in the multicursor queue logic, the scan range
// can't be retrieved before the processor is started. If failover callback is registered
// before queue processor is started, it may result in a deadline as to create the failover queue,
// queue processor need to be started.
e.registerNamespaceFailoverCallback()
clusterMetadata := e.shard.GetClusterMetadata()
if e.replicatorProcessor != nil {
if clusterMetadata.GetReplicationConsumerConfig().Type == config.ReplicationConsumerTypeKafka {
e.replicatorProcessor.Start()
}
}
for _, replicationTaskProcessor := range e.replicationTaskProcessors {
replicationTaskProcessor.Start()
}
}
// Stop the service.
func (e *historyEngineImpl) Stop() {
if !atomic.CompareAndSwapInt32(
&e.status,
common.DaemonStatusStarted,
common.DaemonStatusStopped,
) {
return
}
e.logger.Info("", tag.LifeCycleStopping)
defer e.logger.Info("", tag.LifeCycleStopped)
e.txProcessor.Stop()
e.timerProcessor.Stop()
if e.replicatorProcessor != nil {
e.replicatorProcessor.Stop()
}
for _, replicationTaskProcessor := range e.replicationTaskProcessors {
replicationTaskProcessor.Stop()
}
if e.queueTaskProcessor != nil {
e.queueTaskProcessor.StopShardProcessor(e.shard)
}
// unset the failover callback
e.shard.GetNamespaceCache().UnregisterNamespaceChangeCallback(e.shard.GetShardID())
}
func (e *historyEngineImpl) registerNamespaceFailoverCallback() {
// NOTE: READ BEFORE MODIFICATION
//
// Tasks, e.g. transfer tasks and timer tasks, are created when holding the shard lock
// meaning tasks -> release of shard lock
//
// Namespace change notification follows the following steps, order matters
// 1. lock all task processing.
// 2. namespace changes visible to everyone (Note: lock of task processing prevents task processing logic seeing the namespace changes).
// 3. failover min and max task levels are calaulated, then update to shard.
// 4. failover start & task processing unlock & shard namespace version notification update. (order does not matter for this discussion)
//
// The above guarantees that task created during the failover will be processed.
// If the task is created after namespace change:
// then active processor will handle it. (simple case)
// If the task is created before namespace change:
// task -> release of shard lock
// failover min / max task levels calculated & updated to shard (using shard lock) -> failover start
// above 2 guarantees that failover start is after persistence of the task.
failoverPredicate := func(shardNotificationVersion int64, nextNamespace *cache.NamespaceCacheEntry, action func()) {
namespaceFailoverNotificationVersion := nextNamespace.GetFailoverNotificationVersion()
namespaceActiveCluster := nextNamespace.GetReplicationConfig().ActiveClusterName
if nextNamespace.IsGlobalNamespace() &&
namespaceFailoverNotificationVersion >= shardNotificationVersion &&
namespaceActiveCluster == e.currentClusterName {
action()
}
}
// first set the failover callback
e.shard.GetNamespaceCache().RegisterNamespaceChangeCallback(
e.shard.GetShardID(),
e.shard.GetNamespaceNotificationVersion(),
func() {
e.txProcessor.LockTaskProcessing()
e.timerProcessor.LockTaskProcessing()
},
func(prevNamespaces []*cache.NamespaceCacheEntry, nextNamespaces []*cache.NamespaceCacheEntry) {
defer func() {
e.txProcessor.UnlockTaskPrrocessing()
e.timerProcessor.UnlockTaskProcessing()
}()
if len(nextNamespaces) == 0 {
return
}
shardNotificationVersion := e.shard.GetNamespaceNotificationVersion()
failoverNamespaceIDs := map[string]struct{}{}
for _, nextNamespace := range nextNamespaces {
failoverPredicate(shardNotificationVersion, nextNamespace, func() {
failoverNamespaceIDs[nextNamespace.GetInfo().Id] = struct{}{}
})
}
if len(failoverNamespaceIDs) > 0 {
e.logger.Info("Namespace Failover Start.", tag.WorkflowNamespaceIDs(failoverNamespaceIDs))
e.txProcessor.FailoverNamespace(failoverNamespaceIDs)
e.timerProcessor.FailoverNamespace(failoverNamespaceIDs)
now := e.shard.GetTimeSource().Now()
// the fake tasks will not be actually used, we just need to make sure
// its length > 0 and has correct timestamp, to trigger a db scan
fakeWorkflowTask := []persistence.Task{&persistence.WorkflowTask{}}
fakeWorkflowTaskTimeoutTask := []persistence.Task{&persistence.WorkflowTaskTimeoutTask{VisibilityTimestamp: now}}
e.txProcessor.NotifyNewTask(e.currentClusterName, fakeWorkflowTask)
e.timerProcessor.NotifyNewTimers(e.currentClusterName, fakeWorkflowTaskTimeoutTask)
}
// nolint:errcheck
e.shard.UpdateNamespaceNotificationVersion(nextNamespaces[len(nextNamespaces)-1].GetNotificationVersion() + 1)
},
)
}
func (e *historyEngineImpl) createMutableState(
clusterMetadata cluster.Metadata,
namespaceEntry *cache.NamespaceCacheEntry,
runID string,
) (mutableState, error) {
var newMutableState mutableState
// version history applies to both local and global namespace
newMutableState = newMutableStateBuilderWithVersionHistories(
e.shard,
e.shard.GetEventsCache(),
e.logger,
namespaceEntry,
e.shard.GetTimeSource().Now(),
)
if err := newMutableState.SetHistoryTree(runID); err != nil {
return nil, err
}
return newMutableState, nil
}
func (e *historyEngineImpl) generateFirstWorkflowTask(
mutableState mutableState,
parentInfo *workflowspb.ParentExecutionInfo,
startEvent *historypb.HistoryEvent,
) error {
if parentInfo == nil {
// WorkflowTask is only created when it is not a Child Workflow and no backoff is needed
if err := mutableState.AddFirstWorkflowTaskScheduled(
startEvent,
); err != nil {
return err
}
}
return nil
}
// StartWorkflowExecution starts a workflow execution
func (e *historyEngineImpl) StartWorkflowExecution(
ctx context.Context,
startRequest *historyservice.StartWorkflowExecutionRequest,
) (resp *historyservice.StartWorkflowExecutionResponse, retError error) {
namespaceEntry, err := e.getActiveNamespaceEntry(startRequest.GetNamespaceId())
if err != nil {
return nil, err
}
namespaceID := namespaceEntry.GetInfo().Id
request := startRequest.StartRequest
err = validateStartWorkflowExecutionRequest(request, e.config.MaxIDLengthLimit())
if err != nil {
return nil, err
}
e.overrideStartWorkflowExecutionRequest(namespaceEntry, request, metrics.HistoryStartWorkflowExecutionScope)
workflowID := request.GetWorkflowId()
// grab the current context as a lock, nothing more
_, currentRelease, err := e.historyCache.getOrCreateCurrentWorkflowExecution(
ctx,
namespaceID,
workflowID,
)
if err != nil {
return nil, err
}
defer func() { currentRelease(retError) }()
execution := commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: uuid.New(),
}
clusterMetadata := e.shard.GetService().GetClusterMetadata()
mutableState, err := e.createMutableState(clusterMetadata, namespaceEntry, execution.GetRunId())
if err != nil {
return nil, err
}
startEvent, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
startRequest,
)
if err != nil {
return nil, serviceerror.NewInternal("Failed to add workflow execution started event.")
}
// Generate first workflow task event if not child WF and no first workflow task backoff
if err := e.generateFirstWorkflowTask(
mutableState,
startRequest.ParentExecutionInfo,
startEvent,
); err != nil {
return nil, err
}
weContext := newWorkflowExecutionContext(namespaceID, execution, e.shard, e.executionManager, e.logger)
now := e.timeSource.Now()
newWorkflow, newWorkflowEventsSeq, err := mutableState.CloseTransactionAsSnapshot(
now,
transactionPolicyActive,
)
if err != nil {
return nil, err
}
historySize, err := weContext.persistFirstWorkflowEvents(newWorkflowEventsSeq[0])
if err != nil {
return nil, err
}
// create as brand new
createMode := persistence.CreateWorkflowModeBrandNew
prevRunID := ""
prevLastWriteVersion := int64(0)
err = weContext.createWorkflowExecution(
newWorkflow, historySize, now,
createMode, prevRunID, prevLastWriteVersion,
)
if err != nil {
if t, ok := err.(*persistence.WorkflowExecutionAlreadyStartedError); ok {
if t.StartRequestID == request.GetRequestId() {
return &historyservice.StartWorkflowExecutionResponse{
RunId: t.RunID,
}, nil
// delete history is expected here because duplicate start request will create history with different rid
}
if mutableState.GetCurrentVersion() < t.LastWriteVersion {
return nil, serviceerror.NewNamespaceNotActive(
request.GetNamespace(),
clusterMetadata.GetCurrentClusterName(),
clusterMetadata.ClusterNameForFailoverVersion(t.LastWriteVersion),
)
}
// create as ID reuse
createMode = persistence.CreateWorkflowModeWorkflowIDReuse
prevRunID = t.RunID
prevLastWriteVersion = t.LastWriteVersion
if err = e.applyWorkflowIDReusePolicyHelper(
t.StartRequestID,
prevRunID,
t.State,
t.Status,
namespaceID,
execution,
startRequest.StartRequest.GetWorkflowIdReusePolicy(),
); err != nil {
return nil, err
}
err = weContext.createWorkflowExecution(
newWorkflow, historySize, now,
createMode, prevRunID, prevLastWriteVersion,
)
}
}
if err != nil {
return nil, err
}
return &historyservice.StartWorkflowExecutionResponse{
RunId: execution.GetRunId(),
}, nil
}
// GetMutableState retrieves the mutable state of the workflow execution
func (e *historyEngineImpl) GetMutableState(
ctx context.Context,
request *historyservice.GetMutableStateRequest,
) (*historyservice.GetMutableStateResponse, error) {
return e.getMutableStateOrPolling(ctx, request)
}
// PollMutableState retrieves the mutable state of the workflow execution with long polling
func (e *historyEngineImpl) PollMutableState(
ctx context.Context,
request *historyservice.PollMutableStateRequest,
) (*historyservice.PollMutableStateResponse, error) {
response, err := e.getMutableStateOrPolling(ctx, &historyservice.GetMutableStateRequest{
NamespaceId: request.GetNamespaceId(),
Execution: request.Execution,
ExpectedNextEventId: request.ExpectedNextEventId,
CurrentBranchToken: request.CurrentBranchToken})
if err != nil {
return nil, e.updateEntityNotExistsErrorOnPassiveCluster(err, request.GetNamespaceId())
}
return &historyservice.PollMutableStateResponse{
Execution: response.Execution,
WorkflowType: response.WorkflowType,
NextEventId: response.NextEventId,
PreviousStartedEventId: response.PreviousStartedEventId,
LastFirstEventId: response.LastFirstEventId,
TaskQueue: response.TaskQueue,
StickyTaskQueue: response.StickyTaskQueue,
StickyTaskQueueScheduleToStartTimeout: response.StickyTaskQueueScheduleToStartTimeout,
CurrentBranchToken: response.CurrentBranchToken,
VersionHistories: response.VersionHistories,
WorkflowState: response.WorkflowState,
WorkflowStatus: response.WorkflowStatus,
}, nil
}
func (e *historyEngineImpl) updateEntityNotExistsErrorOnPassiveCluster(err error, namespaceID string) error {
switch err.(type) {
case *serviceerror.NotFound:
namespaceCache, namespaceCacheErr := e.shard.GetNamespaceCache().GetNamespaceByID(namespaceID)
if namespaceCacheErr != nil {
return err // if could not access namespace cache simply return original error
}
if namespaceNotActiveErr, ok := namespaceCache.GetNamespaceNotActiveErr().(*serviceerror.NamespaceNotActive); ok && namespaceNotActiveErr != nil {
updatedErr := serviceerror.NewNotFound("Workflow execution not found in non-active cluster")
updatedErr.ActiveCluster = namespaceNotActiveErr.ActiveCluster
updatedErr.CurrentCluster = namespaceNotActiveErr.CurrentCluster
return updatedErr
}
}
return err
}
func (e *historyEngineImpl) getMutableStateOrPolling(
ctx context.Context,
request *historyservice.GetMutableStateRequest,
) (*historyservice.GetMutableStateResponse, error) {
namespaceID, err := validateNamespaceUUID(request.GetNamespaceId())
if err != nil {
return nil, err
}
execution := commonpb.WorkflowExecution{
WorkflowId: request.Execution.WorkflowId,
RunId: request.Execution.RunId,
}
response, err := e.getMutableState(ctx, namespaceID, execution)
if err != nil {
return nil, err
}
if request.CurrentBranchToken == nil {
request.CurrentBranchToken = response.CurrentBranchToken
}
if !bytes.Equal(request.CurrentBranchToken, response.CurrentBranchToken) {
return nil, serviceerrors.NewCurrentBranchChanged(response.CurrentBranchToken, request.CurrentBranchToken)
}
// set the run id in case query the current running workflow
execution.RunId = response.Execution.RunId
// expectedNextEventID is 0 when caller want to get the current next event ID without blocking
expectedNextEventID := common.FirstEventID
if request.ExpectedNextEventId != common.EmptyEventID {
expectedNextEventID = request.GetExpectedNextEventId()
}
// if caller decide to long poll on workflow execution
// and the event ID we are looking for is smaller than current next event ID
if expectedNextEventID >= response.GetNextEventId() && response.GetWorkflowStatus() == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING {
subscriberID, channel, err := e.eventNotifier.WatchHistoryEvent(definition.NewWorkflowIdentifier(namespaceID, execution.GetWorkflowId(), execution.GetRunId()))
if err != nil {
return nil, err
}
defer e.eventNotifier.UnwatchHistoryEvent(definition.NewWorkflowIdentifier(namespaceID, execution.GetWorkflowId(), execution.GetRunId()), subscriberID) // nolint:errcheck
// check again in case the next event ID is updated
response, err = e.getMutableState(ctx, namespaceID, execution)
if err != nil {
return nil, err
}
// check again if the current branch token changed
if !bytes.Equal(request.CurrentBranchToken, response.CurrentBranchToken) {
return nil, serviceerrors.NewCurrentBranchChanged(response.CurrentBranchToken, request.CurrentBranchToken)
}
if expectedNextEventID < response.GetNextEventId() || response.GetWorkflowStatus() != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING {
return response, nil
}
namespaceCache, err := e.shard.GetNamespaceCache().GetNamespaceByID(namespaceID)
if err != nil {
return nil, err
}
timer := time.NewTimer(e.shard.GetConfig().LongPollExpirationInterval(namespaceCache.GetInfo().Name))
defer timer.Stop()
for {
select {
case event := <-channel:
response.LastFirstEventId = event.LastFirstEventID
response.NextEventId = event.NextEventID
response.PreviousStartedEventId = event.PreviousStartedEventID
response.WorkflowState = event.WorkflowState
response.WorkflowStatus = event.WorkflowStatus
if !bytes.Equal(request.CurrentBranchToken, event.CurrentBranchToken) {
return nil, serviceerrors.NewCurrentBranchChanged(event.CurrentBranchToken, request.CurrentBranchToken)
}
if expectedNextEventID < response.GetNextEventId() || response.GetWorkflowStatus() != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING {
return response, nil
}
case <-timer.C:
return response, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
return response, nil
}
func (e *historyEngineImpl) QueryWorkflow(
ctx context.Context,
request *historyservice.QueryWorkflowRequest,
) (retResp *historyservice.QueryWorkflowResponse, retErr error) {
scope := e.metricsClient.Scope(metrics.HistoryQueryWorkflowScope)
mutableStateResp, err := e.getMutableState(ctx, request.GetNamespaceId(), *request.GetRequest().GetExecution())
if err != nil {
return nil, err
}
req := request.GetRequest()
if mutableStateResp.GetWorkflowStatus() != enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING && req.QueryRejectCondition != enumspb.QUERY_REJECT_CONDITION_NONE {
notOpenReject := req.GetQueryRejectCondition() == enumspb.QUERY_REJECT_CONDITION_NOT_OPEN
status := mutableStateResp.GetWorkflowStatus()
notCompletedCleanlyReject := req.GetQueryRejectCondition() == enumspb.QUERY_REJECT_CONDITION_NOT_COMPLETED_CLEANLY && status != enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED
if notOpenReject || notCompletedCleanlyReject {
return &historyservice.QueryWorkflowResponse{
Response: &workflowservice.QueryWorkflowResponse{
QueryRejected: &querypb.QueryRejected{
Status: status,
},
},
}, nil
}
}
de, err := e.shard.GetNamespaceCache().GetNamespaceByID(request.GetNamespaceId())
if err != nil {
return nil, err
}
context, release, err := e.historyCache.getOrCreateWorkflowExecution(ctx, request.GetNamespaceId(), *request.GetRequest().GetExecution())
if err != nil {
return nil, err
}
defer func() { release(retErr) }()
mutableState, err := context.loadWorkflowExecution()
if err != nil {
return nil, err
}
// There are two ways in which queries get dispatched to workflow worker. First, queries can be dispatched on workflow tasks.
// These workflow tasks potentially contain new events and queries. The events are treated as coming before the query in time.
// The second way in which queries are dispatched to workflow worker is directly through matching; in this approach queries can be
// dispatched to workflow worker immediately even if there are outstanding events that came before the query. The following logic
// is used to determine if a query can be safely dispatched directly through matching or must be dispatched on a workflow task.
//
// There are three cases in which a query can be dispatched directly through matching safely, without violating strong consistency level:
// 1. the namespace is not active, in this case history is immutable so a query dispatched at any time is consistent
// 2. the workflow is not running, whenever a workflow is not running dispatching query directly is consistent
// 3. if there is no pending or started workflow tasks it means no events came before query arrived, so its safe to dispatch directly
safeToDispatchDirectly := !de.IsNamespaceActive() ||
!mutableState.IsWorkflowExecutionRunning() ||
(!mutableState.HasPendingWorkflowTask() && !mutableState.HasInFlightWorkflowTask())
if safeToDispatchDirectly {
release(nil)
msResp, err := e.getMutableState(ctx, request.GetNamespaceId(), *request.GetRequest().GetExecution())
if err != nil {
return nil, err
}
req.Execution.RunId = msResp.Execution.RunId
return e.queryDirectlyThroughMatching(ctx, msResp, request.GetNamespaceId(), req, scope)
}
// If we get here it means query could not be dispatched through matching directly, so it must block
// until either an result has been obtained on a workflow task response or until it is safe to dispatch directly through matching.
sw := scope.StartTimer(metrics.WorkflowTaskQueryLatency)
defer sw.Stop()
queryReg := mutableState.GetQueryRegistry()
if len(queryReg.getBufferedIDs()) >= e.config.MaxBufferedQueryCount() {
scope.IncCounter(metrics.QueryBufferExceededCount)
return nil, ErrConsistentQueryBufferExceeded
}
queryID, termCh := queryReg.bufferQuery(req.GetQuery())
defer queryReg.removeQuery(queryID)
release(nil)
select {
case <-termCh:
state, err := queryReg.getTerminationState(queryID)
if err != nil {
scope.IncCounter(metrics.QueryRegistryInvalidStateCount)
return nil, err
}
switch state.queryTerminationType {
case queryTerminationTypeCompleted:
result := state.queryResult
switch result.GetResultType() {
case enumspb.QUERY_RESULT_TYPE_ANSWERED:
return &historyservice.QueryWorkflowResponse{
Response: &workflowservice.QueryWorkflowResponse{
QueryResult: result.GetAnswer(),
},
}, nil
case enumspb.QUERY_RESULT_TYPE_FAILED:
return nil, serviceerror.NewQueryFailed(result.GetErrorMessage())
default:
scope.IncCounter(metrics.QueryRegistryInvalidStateCount)
return nil, ErrQueryEnteredInvalidState
}
case queryTerminationTypeUnblocked:
msResp, err := e.getMutableState(ctx, request.GetNamespaceId(), *request.GetRequest().GetExecution())
if err != nil {
return nil, err
}
req.Execution.RunId = msResp.Execution.RunId
return e.queryDirectlyThroughMatching(ctx, msResp, request.GetNamespaceId(), req, scope)
case queryTerminationTypeFailed:
return nil, state.failure
default:
scope.IncCounter(metrics.QueryRegistryInvalidStateCount)
return nil, ErrQueryEnteredInvalidState
}
case <-ctx.Done():
scope.IncCounter(metrics.ConsistentQueryTimeoutCount)
return nil, ctx.Err()
}
}
func (e *historyEngineImpl) queryDirectlyThroughMatching(
ctx context.Context,
msResp *historyservice.GetMutableStateResponse,
namespaceID string,
queryRequest *workflowservice.QueryWorkflowRequest,
scope metrics.Scope,
) (*historyservice.QueryWorkflowResponse, error) {
sw := scope.StartTimer(metrics.DirectQueryDispatchLatency)
defer sw.Stop()
if msResp.GetIsStickyTaskQueueEnabled() &&
len(msResp.GetStickyTaskQueue().GetName()) != 0 &&
e.config.EnableStickyQuery(queryRequest.GetNamespace()) {
stickyMatchingRequest := &matchingservice.QueryWorkflowRequest{
NamespaceId: namespaceID,
QueryRequest: queryRequest,
TaskQueue: msResp.GetStickyTaskQueue(),
}
// using a clean new context in case customer provide a context which has
// a really short deadline, causing we clear the stickiness
stickyContext, cancel := context.WithTimeout(context.Background(), timestamp.DurationValue(msResp.GetStickyTaskQueueScheduleToStartTimeout()))
stickyStopWatch := scope.StartTimer(metrics.DirectQueryDispatchStickyLatency)
matchingResp, err := e.rawMatchingClient.QueryWorkflow(stickyContext, stickyMatchingRequest)
stickyStopWatch.Stop()
cancel()
if err == nil {
scope.IncCounter(metrics.DirectQueryDispatchStickySuccessCount)
return &historyservice.QueryWorkflowResponse{
Response: &workflowservice.QueryWorkflowResponse{
QueryResult: matchingResp.GetQueryResult(),
QueryRejected: matchingResp.GetQueryRejected(),
}}, nil
}
if !common.IsContextDeadlineExceededErr(err) && !common.IsContextCanceledErr(err) {
e.logger.Error("query directly though matching on sticky failed, will not attempt query on non-sticky",
tag.WorkflowNamespace(queryRequest.GetNamespace()),
tag.WorkflowID(queryRequest.Execution.GetWorkflowId()),
tag.WorkflowRunID(queryRequest.Execution.GetRunId()),
tag.WorkflowQueryType(queryRequest.Query.GetQueryType()),
tag.Error(err))
return nil, err
}
if msResp.GetWorkflowStatus() == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING {
e.logger.Info("query direct through matching failed on sticky, clearing sticky before attempting on non-sticky",
tag.WorkflowNamespace(queryRequest.GetNamespace()),
tag.WorkflowID(queryRequest.Execution.GetWorkflowId()),
tag.WorkflowRunID(queryRequest.Execution.GetRunId()),
tag.WorkflowQueryType(queryRequest.Query.GetQueryType()))
resetContext, cancel := context.WithTimeout(context.Background(), 5*time.Second)
clearStickinessStopWatch := scope.StartTimer(metrics.DirectQueryDispatchClearStickinessLatency)
_, err := e.ResetStickyTaskQueue(resetContext, &historyservice.ResetStickyTaskQueueRequest{
NamespaceId: namespaceID,
Execution: queryRequest.GetExecution(),
})
clearStickinessStopWatch.Stop()
cancel()
if err != nil && err != ErrWorkflowCompleted {
return nil, err
}
scope.IncCounter(metrics.DirectQueryDispatchClearStickinessSuccessCount)
}
}
if err := common.IsValidContext(ctx); err != nil {
e.logger.Info("query context timed out before query on non-sticky task queue could be attempted",
tag.WorkflowNamespace(queryRequest.GetNamespace()),
tag.WorkflowID(queryRequest.Execution.GetWorkflowId()),
tag.WorkflowRunID(queryRequest.Execution.GetRunId()),
tag.WorkflowQueryType(queryRequest.Query.GetQueryType()))
scope.IncCounter(metrics.DirectQueryDispatchTimeoutBeforeNonStickyCount)
return nil, err
}
e.logger.Info("query directly through matching on sticky timed out, attempting to query on non-sticky",
tag.WorkflowNamespace(queryRequest.GetNamespace()),
tag.WorkflowID(queryRequest.Execution.GetWorkflowId()),
tag.WorkflowRunID(queryRequest.Execution.GetRunId()),
tag.WorkflowQueryType(queryRequest.Query.GetQueryType()),
tag.WorkflowTaskQueueName(msResp.GetStickyTaskQueue().GetName()),
tag.WorkflowNextEventID(msResp.GetNextEventId()))
nonStickyMatchingRequest := &matchingservice.QueryWorkflowRequest{
NamespaceId: namespaceID,
QueryRequest: queryRequest,
TaskQueue: msResp.TaskQueue,
}
nonStickyStopWatch := scope.StartTimer(metrics.DirectQueryDispatchNonStickyLatency)
matchingResp, err := e.matchingClient.QueryWorkflow(ctx, nonStickyMatchingRequest)
nonStickyStopWatch.Stop()
if err != nil {
e.logger.Error("query directly though matching on non-sticky failed",
tag.WorkflowNamespace(queryRequest.GetNamespace()),
tag.WorkflowID(queryRequest.Execution.GetWorkflowId()),
tag.WorkflowRunID(queryRequest.Execution.GetRunId()),
tag.WorkflowQueryType(queryRequest.Query.GetQueryType()),
tag.Error(err))
return nil, err
}
scope.IncCounter(metrics.DirectQueryDispatchNonStickySuccessCount)
return &historyservice.QueryWorkflowResponse{
Response: &workflowservice.QueryWorkflowResponse{
QueryResult: matchingResp.GetQueryResult(),
QueryRejected: matchingResp.GetQueryRejected(),
}}, err
}
func (e *historyEngineImpl) getMutableState(
ctx context.Context,
namespaceID string,
execution commonpb.WorkflowExecution,
) (retResp *historyservice.GetMutableStateResponse, retError error) {
context, release, retError := e.historyCache.getOrCreateWorkflowExecution(ctx, namespaceID, execution)
if retError != nil {
return
}
defer func() { release(retError) }()
mutableState, retError := context.loadWorkflowExecution()
if retError != nil {
return
}
currentBranchToken, err := mutableState.GetCurrentBranchToken()
if err != nil {
return nil, err
}
executionInfo := mutableState.GetExecutionInfo()
execution.RunId = context.getExecution().RunId
workflowState, workflowStatus := mutableState.GetWorkflowStateStatus()
retResp = &historyservice.GetMutableStateResponse{
Execution: &execution,
WorkflowType: &commonpb.WorkflowType{Name: executionInfo.WorkflowTypeName},
LastFirstEventId: mutableState.GetLastFirstEventID(),
NextEventId: mutableState.GetNextEventID(),
PreviousStartedEventId: mutableState.GetPreviousStartedEventID(),
TaskQueue: &taskqueuepb.TaskQueue{
Name: executionInfo.TaskQueue,
Kind: enumspb.TASK_QUEUE_KIND_NORMAL,
},
StickyTaskQueue: &taskqueuepb.TaskQueue{
Name: executionInfo.StickyTaskQueue,
Kind: enumspb.TASK_QUEUE_KIND_STICKY,
},
StickyTaskQueueScheduleToStartTimeout: executionInfo.StickyScheduleToStartTimeout,
CurrentBranchToken: currentBranchToken,
WorkflowState: workflowState,
WorkflowStatus: workflowStatus,
IsStickyTaskQueueEnabled: mutableState.IsStickyTaskQueueEnabled(),
}
versionHistories := mutableState.GetExecutionInfo().GetVersionHistories()
if versionHistories != nil {
retResp.VersionHistories = versionhistory.CopyVersionHistories(versionHistories)
}
return
}
func (e *historyEngineImpl) DescribeMutableState(
ctx context.Context,
request *historyservice.DescribeMutableStateRequest,
) (response *historyservice.DescribeMutableStateResponse, retError error) {
namespaceID, err := validateNamespaceUUID(request.GetNamespaceId())
if err != nil {
return nil, err
}
execution := commonpb.WorkflowExecution{
WorkflowId: request.Execution.WorkflowId,
RunId: request.Execution.RunId,
}
cacheCtx, dbCtx, release, cacheHit, err := e.historyCache.getAndCreateWorkflowExecution(
ctx, namespaceID, execution,
)
if err != nil {
return nil, err
}
defer func() { release(retError) }()
response = &historyservice.DescribeMutableStateResponse{}
if cacheHit && cacheCtx.(*workflowExecutionContextImpl).mutableState != nil {
msb := cacheCtx.(*workflowExecutionContextImpl).mutableState
response.CacheMutableState = msb.ToProto()
}
msb, err := dbCtx.loadWorkflowExecution()
if err != nil {
return nil, err
}
response.DatabaseMutableState = msb.ToProto()
return response, nil
}
// ResetStickyTaskQueue reset the volatile information in mutable state of a given workflow.
// Volatile information are the information related to client, such as:
// 1. StickyTaskQueue
// 2. StickyScheduleToStartTimeout
func (e *historyEngineImpl) ResetStickyTaskQueue(
ctx context.Context,
resetRequest *historyservice.ResetStickyTaskQueueRequest,
) (*historyservice.ResetStickyTaskQueueResponse, error) {
namespaceID, err := validateNamespaceUUID(resetRequest.GetNamespaceId())
if err != nil {
return nil, err
}
err = e.updateWorkflowExecution(ctx, namespaceID, *resetRequest.Execution, false,
func(context workflowExecutionContext, mutableState mutableState) error {
if !mutableState.IsWorkflowExecutionRunning() {
return ErrWorkflowCompleted
}
mutableState.ClearStickyness()
return nil
},
)
if err != nil {
return nil, err
}
return &historyservice.ResetStickyTaskQueueResponse{}, nil
}
// DescribeWorkflowExecution returns information about the specified workflow execution.
func (e *historyEngineImpl) DescribeWorkflowExecution(
ctx context.Context,
request *historyservice.DescribeWorkflowExecutionRequest,
) (retResp *historyservice.DescribeWorkflowExecutionResponse, retError error) {
namespaceID, err := validateNamespaceUUID(request.GetNamespaceId())
if err != nil {
return nil, err
}
execution := *request.Request.Execution
context, release, err0 := e.historyCache.getOrCreateWorkflowExecution(ctx, namespaceID, execution)
if err0 != nil {
return nil, err0
}
defer func() { release(retError) }()
mutableState, err1 := context.loadWorkflowExecution()
if err1 != nil {
return nil, err1
}
executionInfo := mutableState.GetExecutionInfo()
executionState := mutableState.GetExecutionState()
result := &historyservice.DescribeWorkflowExecutionResponse{
ExecutionConfig: &workflowpb.WorkflowExecutionConfig{
TaskQueue: &taskqueuepb.TaskQueue{
Name: executionInfo.TaskQueue,
Kind: enumspb.TASK_QUEUE_KIND_NORMAL,
},
WorkflowExecutionTimeout: executionInfo.WorkflowExecutionTimeout,
WorkflowRunTimeout: executionInfo.WorkflowRunTimeout,
DefaultWorkflowTaskTimeout: executionInfo.DefaultWorkflowTaskTimeout,
},
WorkflowExecutionInfo: &workflowpb.WorkflowExecutionInfo{
Execution: &commonpb.WorkflowExecution{
WorkflowId: executionInfo.WorkflowId,
RunId: executionState.RunId,
},
Type: &commonpb.WorkflowType{Name: executionInfo.WorkflowTypeName},
StartTime: executionInfo.StartTime,
HistoryLength: mutableState.GetNextEventID() - common.FirstEventID,
AutoResetPoints: executionInfo.AutoResetPoints,
Memo: &commonpb.Memo{Fields: executionInfo.Memo},
SearchAttributes: &commonpb.SearchAttributes{IndexedFields: executionInfo.SearchAttributes},
Status: executionState.Status,
},
}
// TODO: we need to consider adding execution time to mutable state
// For now execution time will be calculated based on start time and cron schedule/retry policy
// each time DescribeWorkflowExecution is called.
startEvent, err := mutableState.GetStartEvent()
if err != nil {
return nil, err
}
backoffDuration := timestamp.DurationValue(startEvent.GetWorkflowExecutionStartedEventAttributes().GetFirstWorkflowTaskBackoff())
result.WorkflowExecutionInfo.ExecutionTime = timestamp.TimePtr(timestamp.TimeValue(result.WorkflowExecutionInfo.GetStartTime()).Add(backoffDuration))
if executionInfo.ParentRunId != "" {
result.WorkflowExecutionInfo.ParentExecution = &commonpb.WorkflowExecution{
WorkflowId: executionInfo.ParentWorkflowId,
RunId: executionInfo.ParentRunId,
}
result.WorkflowExecutionInfo.ParentNamespaceId = executionInfo.ParentNamespaceId
}
if executionState.State == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED {
// for closed workflow
result.WorkflowExecutionInfo.Status = executionState.Status
completionEvent, err := mutableState.GetCompletionEvent()
if err != nil {
return nil, err
}
result.WorkflowExecutionInfo.CloseTime = completionEvent.GetEventTime()
}
if len(mutableState.GetPendingActivityInfos()) > 0 {
for _, ai := range mutableState.GetPendingActivityInfos() {
p := &workflowpb.PendingActivityInfo{
ActivityId: ai.ActivityId,
}
if ai.CancelRequested {
p.State = enumspb.PENDING_ACTIVITY_STATE_CANCEL_REQUESTED
} else if ai.StartedId != common.EmptyEventID {
p.State = enumspb.PENDING_ACTIVITY_STATE_STARTED
} else {
p.State = enumspb.PENDING_ACTIVITY_STATE_SCHEDULED
}
if !timestamp.TimeValue(ai.LastHeartbeatUpdateTime).IsZero() {
p.LastHeartbeatTime = ai.LastHeartbeatUpdateTime
p.HeartbeatDetails = ai.LastHeartbeatDetails
}
// TODO: move to mutable state instead of loading it from event
scheduledEvent, err := mutableState.GetActivityScheduledEvent(ai.ScheduleId)
if err != nil {
return nil, err
}
p.ActivityType = scheduledEvent.GetActivityTaskScheduledEventAttributes().ActivityType
if p.State == enumspb.PENDING_ACTIVITY_STATE_SCHEDULED {
p.ScheduledTime = ai.ScheduledTime
} else {
p.LastStartedTime = ai.StartedTime
}
if ai.HasRetryPolicy {
p.Attempt = int32(ai.Attempt)
p.ExpirationTime = ai.RetryExpirationTime
if ai.RetryMaximumAttempts != 0 {
p.MaximumAttempts = ai.RetryMaximumAttempts
}
if ai.RetryLastFailure != nil {
p.LastFailure = ai.RetryLastFailure
}
if ai.RetryLastWorkerIdentity != "" {
p.LastWorkerIdentity = ai.RetryLastWorkerIdentity
}
} else {
p.Attempt = 1
}
result.PendingActivities = append(result.PendingActivities, p)
}
}
if len(mutableState.GetPendingChildExecutionInfos()) > 0 {
for _, ch := range mutableState.GetPendingChildExecutionInfos() {
p := &workflowpb.PendingChildExecutionInfo{
WorkflowId: ch.StartedWorkflowId,
RunId: ch.StartedRunId,
WorkflowTypeName: ch.WorkflowTypeName,
InitiatedId: ch.InitiatedId,
ParentClosePolicy: ch.ParentClosePolicy,
}
result.PendingChildren = append(result.PendingChildren, p)
}
}
return result, nil
}
func (e *historyEngineImpl) RecordActivityTaskStarted(
ctx context.Context,
request *historyservice.RecordActivityTaskStartedRequest,
) (*historyservice.RecordActivityTaskStartedResponse, error) {
namespaceEntry, err := e.getActiveNamespaceEntry(request.GetNamespaceId())
if err != nil {
return nil, err
}
namespaceInfo := namespaceEntry.GetInfo()
namespaceID := namespaceInfo.Id
namespace := namespaceInfo.Name
execution := commonpb.WorkflowExecution{
WorkflowId: request.WorkflowExecution.WorkflowId,
RunId: request.WorkflowExecution.RunId,
}
response := &historyservice.RecordActivityTaskStartedResponse{}
err = e.updateWorkflowExecution(ctx, namespaceID, execution, false,
func(context workflowExecutionContext, mutableState mutableState) error {
if !mutableState.IsWorkflowExecutionRunning() {
return ErrWorkflowCompleted
}
scheduleID := request.GetScheduleId()
requestID := request.GetRequestId()
ai, isRunning := mutableState.GetActivityInfo(scheduleID)
// First check to see if cache needs to be refreshed as we could potentially have stale workflow execution in
// some extreme cassandra failure cases.
if !isRunning && scheduleID >= mutableState.GetNextEventID() {
e.metricsClient.IncCounter(metrics.HistoryRecordActivityTaskStartedScope, metrics.StaleMutableStateCounter)
return ErrStaleState
}
// Check execution state to make sure task is in the list of outstanding tasks and it is not yet started. If
// task is not outstanding than it is most probably a duplicate and complete the task.
if !isRunning {
// Looks like ActivityTask already completed as a result of another call.
// It is OK to drop the task at this point.
e.logger.Debug("Potentially duplicate task.", tag.TaskID(request.GetTaskId()), tag.WorkflowScheduleID(scheduleID), tag.TaskType(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK))
return ErrActivityTaskNotFound
}
scheduledEvent, err := mutableState.GetActivityScheduledEvent(scheduleID)
if err != nil {
return err
}
response.ScheduledEvent = scheduledEvent
response.CurrentAttemptScheduledTime = ai.ScheduledTime
if ai.StartedId != common.EmptyEventID {
// If activity is started as part of the current request scope then return a positive response
if ai.RequestId == requestID {
response.StartedTime = ai.StartedTime
response.Attempt = ai.Attempt
return nil
}
// Looks like ActivityTask already started as a result of another call.
// It is OK to drop the task at this point.
e.logger.Debug("Potentially duplicate task.", tag.TaskID(request.GetTaskId()), tag.WorkflowScheduleID(scheduleID), tag.TaskType(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK))
return serviceerrors.NewTaskAlreadyStarted("Activity")
}
if _, err := mutableState.AddActivityTaskStartedEvent(
ai, scheduleID, requestID, request.PollRequest.GetIdentity(),
); err != nil {
return err
}
response.StartedTime = ai.StartedTime
response.Attempt = ai.Attempt
response.HeartbeatDetails = ai.LastHeartbeatDetails
response.WorkflowType = mutableState.GetWorkflowType()
response.WorkflowNamespace = namespace
return nil
})
if err != nil {
return nil, err
}
return response, err
}
// ScheduleWorkflowTask schedules a workflow task if no outstanding workflow task found
func (e *historyEngineImpl) ScheduleWorkflowTask(
ctx context.Context,
req *historyservice.ScheduleWorkflowTaskRequest,
) error {
return e.workflowTaskHandler.handleWorkflowTaskScheduled(ctx, req)
}
// RecordWorkflowTaskStarted starts a workflow task
func (e *historyEngineImpl) RecordWorkflowTaskStarted(
ctx context.Context,
request *historyservice.RecordWorkflowTaskStartedRequest,
) (*historyservice.RecordWorkflowTaskStartedResponse, error) {
return e.workflowTaskHandler.handleWorkflowTaskStarted(ctx, request)
}
// RespondWorkflowTaskCompleted completes a workflow task
func (e *historyEngineImpl) RespondWorkflowTaskCompleted(
ctx context.Context,
req *historyservice.RespondWorkflowTaskCompletedRequest,
) (*historyservice.RespondWorkflowTaskCompletedResponse, error) {
return e.workflowTaskHandler.handleWorkflowTaskCompleted(ctx, req)
}
// RespondWorkflowTaskFailed fails a workflow task
func (e *historyEngineImpl) RespondWorkflowTaskFailed(
ctx context.Context,
req *historyservice.RespondWorkflowTaskFailedRequest,
) error {
return e.workflowTaskHandler.handleWorkflowTaskFailed(ctx, req)
}
// RespondActivityTaskCompleted completes an activity task.
func (e *historyEngineImpl) RespondActivityTaskCompleted(
ctx context.Context,
req *historyservice.RespondActivityTaskCompletedRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(req.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
namespace := namespaceEntry.GetInfo().Name
request := req.CompleteRequest
token, err0 := e.tokenSerializer.Deserialize(request.TaskToken)
if err0 != nil {
return ErrDeserializingToken
}
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: token.GetWorkflowId(),
RunId: token.GetRunId(),
}
var activityStartedTime time.Time
var taskQueue string
err = e.updateWorkflowExecution(ctx, namespaceID, workflowExecution, true,
func(context workflowExecutionContext, mutableState mutableState) error {
if !mutableState.IsWorkflowExecutionRunning() {
return ErrWorkflowCompleted
}
scheduleID := token.GetScheduleId()
if scheduleID == common.EmptyEventID { // client call CompleteActivityById, so get scheduleID by activityID
scheduleID, err0 = getScheduleID(token.GetActivityId(), mutableState)
if err0 != nil {
return err0
}
}
ai, isRunning := mutableState.GetActivityInfo(scheduleID)
// First check to see if cache needs to be refreshed as we could potentially have stale workflow execution in
// some extreme cassandra failure cases.
if !isRunning && scheduleID >= mutableState.GetNextEventID() {
e.metricsClient.IncCounter(metrics.HistoryRespondActivityTaskCompletedScope, metrics.StaleMutableStateCounter)
return ErrStaleState
}
if !isRunning || ai.StartedId == common.EmptyEventID ||
(token.GetScheduleId() != common.EmptyEventID && token.ScheduleAttempt != ai.Attempt) {
return ErrActivityTaskNotFound
}
if _, err := mutableState.AddActivityTaskCompletedEvent(scheduleID, ai.StartedId, request); err != nil {
// Unable to add ActivityTaskCompleted event to history
return serviceerror.NewInternal("Unable to add ActivityTaskCompleted event to history.")
}
activityStartedTime = *ai.StartedTime
taskQueue = ai.TaskQueue
return nil
})
if err == nil && !activityStartedTime.IsZero() {
scope := e.metricsClient.Scope(metrics.HistoryRespondActivityTaskCompletedScope).
Tagged(
metrics.NamespaceTag(namespace),
metrics.WorkflowTypeTag(token.WorkflowType),
metrics.ActivityTypeTag(token.ActivityType),
metrics.TaskQueueTag(taskQueue),
)
scope.RecordTimer(metrics.ActivityE2ELatency, time.Since(activityStartedTime))
}
return err
}
// RespondActivityTaskFailed completes an activity task failure.
func (e *historyEngineImpl) RespondActivityTaskFailed(
ctx context.Context,
req *historyservice.RespondActivityTaskFailedRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(req.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
namespace := namespaceEntry.GetInfo().Name
request := req.FailedRequest
token, err0 := e.tokenSerializer.Deserialize(request.TaskToken)
if err0 != nil {
return ErrDeserializingToken
}
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: token.GetWorkflowId(),
RunId: token.GetRunId(),
}
var activityStartedTime time.Time
var taskQueue string
err = e.updateWorkflowExecutionWithAction(ctx, namespaceID, workflowExecution,
func(context workflowExecutionContext, mutableState mutableState) (*updateWorkflowAction, error) {
if !mutableState.IsWorkflowExecutionRunning() {
return nil, ErrWorkflowCompleted
}
scheduleID := token.GetScheduleId()
if scheduleID == common.EmptyEventID { // client call CompleteActivityById, so get scheduleID by activityID
scheduleID, err0 = getScheduleID(token.GetActivityId(), mutableState)
if err0 != nil {
return nil, err0
}
}
ai, isRunning := mutableState.GetActivityInfo(scheduleID)
// First check to see if cache needs to be refreshed as we could potentially have stale workflow execution in
// some extreme cassandra failure cases.
if !isRunning && scheduleID >= mutableState.GetNextEventID() {
e.metricsClient.IncCounter(metrics.HistoryRespondActivityTaskFailedScope, metrics.StaleMutableStateCounter)
return nil, ErrStaleState
}
if !isRunning || ai.StartedId == common.EmptyEventID ||
(token.GetScheduleId() != common.EmptyEventID && token.ScheduleAttempt != ai.Attempt) {
return nil, ErrActivityTaskNotFound
}
postActions := &updateWorkflowAction{}
failure := request.GetFailure()
retryState, err := mutableState.RetryActivity(ai, failure)
if err != nil {
return nil, err
}
if retryState != enumspb.RETRY_STATE_IN_PROGRESS {
// no more retry, and we want to record the failure event
if _, err := mutableState.AddActivityTaskFailedEvent(scheduleID, ai.StartedId, failure, retryState, request.GetIdentity()); err != nil {
// Unable to add ActivityTaskFailed event to history
return nil, serviceerror.NewInternal("Unable to add ActivityTaskFailed event to history.")
}
postActions.createWorkflowTask = true
}
activityStartedTime = *ai.StartedTime
taskQueue = ai.TaskQueue
return postActions, nil
})
if err == nil && !activityStartedTime.IsZero() {
scope := e.metricsClient.Scope(metrics.HistoryRespondActivityTaskFailedScope).
Tagged(
metrics.NamespaceTag(namespace),
metrics.WorkflowTypeTag(token.WorkflowType),
metrics.ActivityTypeTag(token.ActivityType),
metrics.TaskQueueTag(taskQueue),
)
scope.RecordTimer(metrics.ActivityE2ELatency, time.Since(activityStartedTime))
}
return err
}
// RespondActivityTaskCanceled completes an activity task failure.
func (e *historyEngineImpl) RespondActivityTaskCanceled(
ctx context.Context,
req *historyservice.RespondActivityTaskCanceledRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(req.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
namespace := namespaceEntry.GetInfo().Name
request := req.CancelRequest
token, err0 := e.tokenSerializer.Deserialize(request.TaskToken)
if err0 != nil {
return ErrDeserializingToken
}
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: token.GetWorkflowId(),
RunId: token.GetRunId(),
}
var activityStartedTime time.Time
var taskQueue string
err = e.updateWorkflowExecution(ctx, namespaceID, workflowExecution, true,
func(context workflowExecutionContext, mutableState mutableState) error {
if !mutableState.IsWorkflowExecutionRunning() {
return ErrWorkflowCompleted
}
scheduleID := token.GetScheduleId()
if scheduleID == common.EmptyEventID { // client call CompleteActivityById, so get scheduleID by activityID
scheduleID, err0 = getScheduleID(token.GetActivityId(), mutableState)
if err0 != nil {
return err0
}
}
ai, isRunning := mutableState.GetActivityInfo(scheduleID)
// First check to see if cache needs to be refreshed as we could potentially have stale workflow execution in
// some extreme cassandra failure cases.
if !isRunning && scheduleID >= mutableState.GetNextEventID() {
e.metricsClient.IncCounter(metrics.HistoryRespondActivityTaskCanceledScope, metrics.StaleMutableStateCounter)
return ErrStaleState
}
if !isRunning || ai.StartedId == common.EmptyEventID ||
(token.GetScheduleId() != common.EmptyEventID && token.ScheduleAttempt != ai.Attempt) {
return ErrActivityTaskNotFound
}
if _, err := mutableState.AddActivityTaskCanceledEvent(
scheduleID,
ai.StartedId,
ai.CancelRequestId,
request.Details,
request.Identity); err != nil {
// Unable to add ActivityTaskCanceled event to history
return serviceerror.NewInternal("Unable to add ActivityTaskCanceled event to history.")
}
activityStartedTime = *ai.StartedTime
taskQueue = ai.TaskQueue
return nil
})
if err == nil && !activityStartedTime.IsZero() {
scope := e.metricsClient.Scope(metrics.HistoryClientRespondActivityTaskCanceledScope).
Tagged(
metrics.NamespaceTag(namespace),
metrics.WorkflowTypeTag(token.WorkflowType),
metrics.ActivityTypeTag(token.ActivityType),
metrics.TaskQueueTag(taskQueue),
)
scope.RecordTimer(metrics.ActivityE2ELatency, time.Since(activityStartedTime))
}
return err
}
// RecordActivityTaskHeartbeat records an hearbeat for a task.
// This method can be used for two purposes.
// - For reporting liveness of the activity.
// - For reporting progress of the activity, this can be done even if the liveness is not configured.
func (e *historyEngineImpl) RecordActivityTaskHeartbeat(
ctx context.Context,
req *historyservice.RecordActivityTaskHeartbeatRequest,
) (*historyservice.RecordActivityTaskHeartbeatResponse, error) {
namespaceEntry, err := e.getActiveNamespaceEntry(req.GetNamespaceId())
if err != nil {
return nil, err
}
namespaceID := namespaceEntry.GetInfo().Id
request := req.HeartbeatRequest
token, err0 := e.tokenSerializer.Deserialize(request.TaskToken)
if err0 != nil {
return nil, ErrDeserializingToken
}
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: token.GetWorkflowId(),
RunId: token.GetRunId(),
}
var cancelRequested bool
err = e.updateWorkflowExecution(ctx, namespaceID, workflowExecution, false,
func(context workflowExecutionContext, mutableState mutableState) error {
if !mutableState.IsWorkflowExecutionRunning() {
e.logger.Debug("Heartbeat failed")
return ErrWorkflowCompleted
}
scheduleID := token.GetScheduleId()
if scheduleID == common.EmptyEventID { // client call RecordActivityHeartbeatByID, so get scheduleID by activityID
scheduleID, err0 = getScheduleID(token.GetActivityId(), mutableState)
if err0 != nil {
return err0
}
}
ai, isRunning := mutableState.GetActivityInfo(scheduleID)
// First check to see if cache needs to be refreshed as we could potentially have stale workflow execution in
// some extreme cassandra failure cases.
if !isRunning && scheduleID >= mutableState.GetNextEventID() {
e.metricsClient.IncCounter(metrics.HistoryRecordActivityTaskHeartbeatScope, metrics.StaleMutableStateCounter)
return ErrStaleState
}
if !isRunning || ai.StartedId == common.EmptyEventID ||
(token.GetScheduleId() != common.EmptyEventID && token.ScheduleAttempt != ai.Attempt) {
return ErrActivityTaskNotFound
}
cancelRequested = ai.CancelRequested
e.logger.Debug("Activity heartbeat", tag.WorkflowScheduleID(scheduleID), tag.ActivityInfo(ai), tag.Bool(cancelRequested))
// Save progress and last HB reported time.
mutableState.UpdateActivityProgress(ai, request)
return nil
})
if err != nil {
return &historyservice.RecordActivityTaskHeartbeatResponse{}, err
}
return &historyservice.RecordActivityTaskHeartbeatResponse{CancelRequested: cancelRequested}, nil
}
// RequestCancelWorkflowExecution records request cancellation event for workflow execution
func (e *historyEngineImpl) RequestCancelWorkflowExecution(
ctx context.Context,
req *historyservice.RequestCancelWorkflowExecutionRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(req.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
request := req.CancelRequest
parentExecution := req.ExternalWorkflowExecution
childWorkflowOnly := req.GetChildWorkflowOnly()
execution := commonpb.WorkflowExecution{
WorkflowId: request.WorkflowExecution.WorkflowId,
}
firstExecutionRunID := request.GetFirstExecutionRunId()
// If firstExecutionRunID is set on the request always try to cancel currently running execution
if len(firstExecutionRunID) == 0 {
execution.RunId = request.WorkflowExecution.RunId
}
return e.updateWorkflow(ctx, namespaceID, execution,
func(context workflowExecutionContext, mutableState mutableState) (*updateWorkflowAction, error) {
if !mutableState.IsWorkflowExecutionRunning() {
// the request to cancel this workflow is a success even
// if the target workflow has already finished
return &updateWorkflowAction{noop: true}, nil
}
// There is a workflow execution currently running with the WorkflowID.
// If user passed in a FirstExecutionRunID with the request to allow cancel to work across runs then
// let's compare the FirstExecutionRunID on the request to make sure we cancel the correct workflow
// execution.
executionInfo := mutableState.GetExecutionInfo()
if len(firstExecutionRunID) > 0 && executionInfo.FirstExecutionRunId != firstExecutionRunID {
return nil, ErrWorkflowExecutionNotFound
}
if childWorkflowOnly {
parentWorkflowID := executionInfo.ParentWorkflowId
parentRunID := executionInfo.ParentRunId
if parentExecution.GetWorkflowId() != parentWorkflowID ||
parentExecution.GetRunId() != parentRunID {
return nil, ErrWorkflowParent
}
}
isCancelRequested := mutableState.IsCancelRequested()
if isCancelRequested {
// since cancellation is idempotent
return &updateWorkflowAction{noop: true}, nil
}
if _, err := mutableState.AddWorkflowExecutionCancelRequestedEvent(req); err != nil {
return nil, serviceerror.NewInternal("Unable to cancel workflow execution.")
}
return updateWorkflowWithNewWorkflowTask, nil
})
}
func (e *historyEngineImpl) SignalWorkflowExecution(
ctx context.Context,
signalRequest *historyservice.SignalWorkflowExecutionRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(signalRequest.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
request := signalRequest.SignalRequest
parentExecution := signalRequest.ExternalWorkflowExecution
childWorkflowOnly := signalRequest.GetChildWorkflowOnly()
execution := commonpb.WorkflowExecution{
WorkflowId: request.WorkflowExecution.WorkflowId,
RunId: request.WorkflowExecution.RunId,
}
return e.updateWorkflow(
ctx,
namespaceID,
execution,
func(context workflowExecutionContext, mutableState mutableState) (*updateWorkflowAction, error) {
executionInfo := mutableState.GetExecutionInfo()
createWorkflowTask := true
// Do not create workflow task when the workflow is cron and the cron has not been started yet
if mutableState.GetExecutionInfo().CronSchedule != "" && !mutableState.HasProcessedOrPendingWorkflowTask() {
createWorkflowTask = false
}
postActions := &updateWorkflowAction{
createWorkflowTask: createWorkflowTask,
}
if !mutableState.IsWorkflowExecutionRunning() {
return nil, ErrWorkflowCompleted
}
maxAllowedSignals := e.config.MaximumSignalsPerExecution(namespaceEntry.GetInfo().Name)
if maxAllowedSignals > 0 && int(executionInfo.SignalCount) >= maxAllowedSignals {
e.logger.Info("Execution limit reached for maximum signals", tag.WorkflowSignalCount(executionInfo.SignalCount),
tag.WorkflowID(execution.GetWorkflowId()),
tag.WorkflowRunID(execution.GetRunId()),
tag.WorkflowNamespaceID(namespaceID))
return nil, ErrSignalsLimitExceeded
}
if childWorkflowOnly {
parentWorkflowID := executionInfo.ParentWorkflowId
parentRunID := executionInfo.ParentRunId
if parentExecution.GetWorkflowId() != parentWorkflowID ||
parentExecution.GetRunId() != parentRunID {
return nil, ErrWorkflowParent
}
}
// deduplicate by request id for signal workflow task
if requestID := request.GetRequestId(); requestID != "" {
if mutableState.IsSignalRequested(requestID) {
return postActions, nil
}
mutableState.AddSignalRequested(requestID)
}
if _, err := mutableState.AddWorkflowExecutionSignaled(
request.GetSignalName(),
request.GetInput(),
request.GetIdentity()); err != nil {
return nil, serviceerror.NewInternal("Unable to signal workflow execution.")
}
return postActions, nil
})
}
func (e *historyEngineImpl) SignalWithStartWorkflowExecution(
ctx context.Context,
signalWithStartRequest *historyservice.SignalWithStartWorkflowExecutionRequest,
) (retResp *historyservice.SignalWithStartWorkflowExecutionResponse, retError error) {
namespaceEntry, err := e.getActiveNamespaceEntry(signalWithStartRequest.GetNamespaceId())
if err != nil {
return nil, err
}
namespaceID := namespaceEntry.GetInfo().Id
sRequest := signalWithStartRequest.SignalWithStartRequest
execution := commonpb.WorkflowExecution{
WorkflowId: sRequest.WorkflowId,
}
var prevMutableState mutableState
attempt := 1
context, release, err0 := e.historyCache.getOrCreateWorkflowExecution(ctx, namespaceID, execution)
if err0 == nil {
defer func() { release(retError) }()
Just_Signal_Loop:
for ; attempt <= conditionalRetryCount; attempt++ {
// workflow not exist, will create workflow then signal
mutableState, err1 := context.loadWorkflowExecution()
if err1 != nil {
if _, ok := err1.(*serviceerror.NotFound); ok {
break
}
return nil, err1
}
// workflow exist but not running, will restart workflow then signal
if !mutableState.IsWorkflowExecutionRunning() {
prevMutableState = mutableState
break
}
executionInfo := mutableState.GetExecutionInfo()
maxAllowedSignals := e.config.MaximumSignalsPerExecution(namespaceEntry.GetInfo().Name)
if maxAllowedSignals > 0 && int(executionInfo.SignalCount) >= maxAllowedSignals {
e.logger.Info("Execution limit reached for maximum signals", tag.WorkflowSignalCount(executionInfo.SignalCount),
tag.WorkflowID(execution.GetWorkflowId()),
tag.WorkflowRunID(execution.GetRunId()),
tag.WorkflowNamespaceID(namespaceID))
return nil, ErrSignalsLimitExceeded
}
if _, err := mutableState.AddWorkflowExecutionSignaled(
sRequest.GetSignalName(),
sRequest.GetSignalInput(),
sRequest.GetIdentity()); err != nil {
return nil, serviceerror.NewInternal("Unable to signal workflow execution.")
}
// Create a transfer task to schedule a workflow task
if !mutableState.HasPendingWorkflowTask() {
_, err := mutableState.AddWorkflowTaskScheduledEvent(false)
if err != nil {
return nil, serviceerror.NewInternal("Failed to add workflow task scheduled event.")
}
}
// We apply the update to execution using optimistic concurrency. If it fails due to a conflict then reload
// the history and try the operation again.
if err := context.updateWorkflowExecutionAsActive(e.shard.GetTimeSource().Now()); err != nil {
if err == ErrConflict {
continue Just_Signal_Loop
}
return nil, err
}
return &historyservice.SignalWithStartWorkflowExecutionResponse{RunId: context.getExecution().RunId}, nil
} // end for Just_Signal_Loop
if attempt == conditionalRetryCount+1 {
return nil, ErrMaxAttemptsExceeded
}
} else {
if _, ok := err0.(*serviceerror.NotFound); !ok {
return nil, err0
}
// workflow not exist, will create workflow then signal
}
// Start workflow and signal
startRequest := e.getStartRequest(namespaceID, sRequest)
request := startRequest.StartRequest
err = validateStartWorkflowExecutionRequest(request, e.config.MaxIDLengthLimit())
if err != nil {
return nil, err
}
e.overrideStartWorkflowExecutionRequest(namespaceEntry, request, metrics.HistorySignalWorkflowExecutionScope)
workflowID := request.GetWorkflowId()
// grab the current context as a lock, nothing more
_, currentRelease, err := e.historyCache.getOrCreateCurrentWorkflowExecution(
ctx,
namespaceID,
workflowID,
)
if err != nil {
return nil, err
}
defer func() { currentRelease(retError) }()
execution = commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: uuid.New(),
}
clusterMetadata := e.shard.GetService().GetClusterMetadata()
mutableState, err := e.createMutableState(clusterMetadata, namespaceEntry, execution.GetRunId())
if err != nil {
return nil, err
}
if prevMutableState != nil {
prevLastWriteVersion, err := prevMutableState.GetLastWriteVersion()
if err != nil {
return nil, err
}
if prevLastWriteVersion > mutableState.GetCurrentVersion() {
return nil, serviceerror.NewNamespaceNotActive(
namespaceEntry.GetInfo().Name,
clusterMetadata.GetCurrentClusterName(),
clusterMetadata.ClusterNameForFailoverVersion(prevLastWriteVersion),
)
}
err = e.applyWorkflowIDReusePolicyForSigWithStart(prevMutableState.GetExecutionState(), namespaceID, execution, request.WorkflowIdReusePolicy)
if err != nil {
return nil, err
}
}
// Add WF start event
startEvent, err := mutableState.AddWorkflowExecutionStartedEvent(
execution,
startRequest,
)
if err != nil {
return nil, serviceerror.NewInternal("Failed to add workflow execution started event.")
}
// Add signal event
if _, err := mutableState.AddWorkflowExecutionSignaled(
sRequest.GetSignalName(),
sRequest.GetSignalInput(),
sRequest.GetIdentity()); err != nil {
return nil, serviceerror.NewInternal("Failed to add workflow execution signaled event.")
}
if err = e.generateFirstWorkflowTask(
mutableState,
startRequest.ParentExecutionInfo,
startEvent,
); err != nil {
return nil, err
}
context = newWorkflowExecutionContext(namespaceID, execution, e.shard, e.executionManager, e.logger)
now := e.timeSource.Now()
newWorkflow, newWorkflowEventsSeq, err := mutableState.CloseTransactionAsSnapshot(
now,
transactionPolicyActive,
)
if err != nil {
return nil, err
}
historySize, err := context.persistFirstWorkflowEvents(newWorkflowEventsSeq[0])
if err != nil {
return nil, err
}
createMode := persistence.CreateWorkflowModeBrandNew
prevRunID := ""
prevLastWriteVersion := int64(0)
if prevMutableState != nil {
createMode = persistence.CreateWorkflowModeWorkflowIDReuse
prevRunID = prevMutableState.GetExecutionState().GetRunId()
prevLastWriteVersion, err = prevMutableState.GetLastWriteVersion()
if err != nil {
return nil, err
}
}
err = context.createWorkflowExecution(
newWorkflow, historySize, now,
createMode, prevRunID, prevLastWriteVersion,
)
if t, ok := err.(*persistence.WorkflowExecutionAlreadyStartedError); ok {
if t.StartRequestID == request.GetRequestId() {
return &historyservice.SignalWithStartWorkflowExecutionResponse{
RunId: t.RunID,
}, nil
// delete history is expected here because duplicate start request will create history with different rid
}
return nil, err
}
if err != nil {
return nil, err
}
return &historyservice.SignalWithStartWorkflowExecutionResponse{
RunId: execution.RunId,
}, nil
}
// RemoveSignalMutableState remove the signal request id in signal_requested for deduplicate
func (e *historyEngineImpl) RemoveSignalMutableState(
ctx context.Context,
request *historyservice.RemoveSignalMutableStateRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(request.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
execution := commonpb.WorkflowExecution{
WorkflowId: request.WorkflowExecution.WorkflowId,
RunId: request.WorkflowExecution.RunId,
}
return e.updateWorkflowExecution(ctx, namespaceID, execution, false,
func(context workflowExecutionContext, mutableState mutableState) error {
if !mutableState.IsWorkflowExecutionRunning() {
return ErrWorkflowCompleted
}
mutableState.DeleteSignalRequested(request.GetRequestId())
return nil
})
}
func (e *historyEngineImpl) TerminateWorkflowExecution(
ctx context.Context,
terminateRequest *historyservice.TerminateWorkflowExecutionRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(terminateRequest.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
request := terminateRequest.TerminateRequest
execution := commonpb.WorkflowExecution{
WorkflowId: request.WorkflowExecution.WorkflowId,
}
firstExecutionRunID := request.GetFirstExecutionRunId()
// If firstExecutionRunID is set on the request always try to terminate currently running execution
if len(firstExecutionRunID) == 0 {
execution.RunId = request.WorkflowExecution.RunId
}
return e.updateWorkflow(
ctx,
namespaceID,
execution,
func(context workflowExecutionContext, mutableState mutableState) (*updateWorkflowAction, error) {
if !mutableState.IsWorkflowExecutionRunning() {
return nil, ErrWorkflowCompleted
}
// There is a workflow execution currently running with the WorkflowID.
// If user passed in a FirstExecutionRunID with the request to allow terminate to work across runs then
// let's compare the FirstExecutionRunID on the request to make sure we terminate the correct workflow
// execution.
executionInfo := mutableState.GetExecutionInfo()
if len(firstExecutionRunID) > 0 && executionInfo.FirstExecutionRunId != firstExecutionRunID {
return nil, ErrWorkflowExecutionNotFound
}
eventBatchFirstEventID := mutableState.GetNextEventID()
return updateWorkflowWithoutWorkflowTask, terminateWorkflow(
mutableState,
eventBatchFirstEventID,
request.GetReason(),
request.GetDetails(),
request.GetIdentity(),
)
})
}
// RecordChildExecutionCompleted records the completion of child execution into parent execution history
func (e *historyEngineImpl) RecordChildExecutionCompleted(
ctx context.Context,
completionRequest *historyservice.RecordChildExecutionCompletedRequest,
) error {
namespaceEntry, err := e.getActiveNamespaceEntry(completionRequest.GetNamespaceId())
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
execution := commonpb.WorkflowExecution{
WorkflowId: completionRequest.WorkflowExecution.WorkflowId,
RunId: completionRequest.WorkflowExecution.RunId,
}
return e.updateWorkflowExecution(ctx, namespaceID, execution, true,
func(context workflowExecutionContext, mutableState mutableState) error {
if !mutableState.IsWorkflowExecutionRunning() {
return ErrWorkflowCompleted
}
initiatedID := completionRequest.InitiatedId
completedExecution := completionRequest.CompletedExecution
completionEvent := completionRequest.CompletionEvent
// Check mutable state to make sure child execution is in pending child executions
ci, isRunning := mutableState.GetChildExecutionInfo(initiatedID)
if !isRunning || ci.StartedId == common.EmptyEventID {
return serviceerror.NewNotFound("Pending child execution not found.")
}
switch completionEvent.GetEventType() {
case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED:
attributes := completionEvent.GetWorkflowExecutionCompletedEventAttributes()
_, err = mutableState.AddChildWorkflowExecutionCompletedEvent(initiatedID, completedExecution, attributes)
case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED:
attributes := completionEvent.GetWorkflowExecutionFailedEventAttributes()
_, err = mutableState.AddChildWorkflowExecutionFailedEvent(initiatedID, completedExecution, attributes)
case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED:
attributes := completionEvent.GetWorkflowExecutionCanceledEventAttributes()
_, err = mutableState.AddChildWorkflowExecutionCanceledEvent(initiatedID, completedExecution, attributes)
case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED:
attributes := completionEvent.GetWorkflowExecutionTerminatedEventAttributes()
_, err = mutableState.AddChildWorkflowExecutionTerminatedEvent(initiatedID, completedExecution, attributes)
case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT:
attributes := completionEvent.GetWorkflowExecutionTimedOutEventAttributes()
_, err = mutableState.AddChildWorkflowExecutionTimedOutEvent(initiatedID, completedExecution, attributes)
}
return err
})
}
func (e *historyEngineImpl) ReplicateEventsV2(
ctx context.Context,
replicateRequest *historyservice.ReplicateEventsV2Request,
) error {
return e.nDCReplicator.ApplyEvents(ctx, replicateRequest)
}
func (e *historyEngineImpl) SyncShardStatus(
ctx context.Context,
request *historyservice.SyncShardStatusRequest,
) error {
clusterName := request.GetSourceCluster()
now := timestamp.TimeValue(request.GetStatusTime())
// here there are 3 main things
// 1. update the view of remote cluster's shard time
// 2. notify the timer gate in the timer queue standby processor
// 3, notify the transfer (essentially a no op, just put it here so it looks symmetric)
e.shard.SetCurrentTime(clusterName, now)
e.txProcessor.NotifyNewTask(clusterName, []persistence.Task{})
e.timerProcessor.NotifyNewTimers(clusterName, []persistence.Task{})
return nil
}
func (e *historyEngineImpl) SyncActivity(
ctx context.Context,
request *historyservice.SyncActivityRequest,
) (retError error) {
return e.nDCActivityReplicator.SyncActivity(ctx, request)
}
func (e *historyEngineImpl) ResetWorkflowExecution(
ctx context.Context,
resetRequest *historyservice.ResetWorkflowExecutionRequest,
) (response *historyservice.ResetWorkflowExecutionResponse, retError error) {
request := resetRequest.ResetRequest
namespaceID := resetRequest.GetNamespaceId()
workflowID := request.WorkflowExecution.GetWorkflowId()
baseRunID := request.WorkflowExecution.GetRunId()
baseContext, baseReleaseFn, err := e.historyCache.getOrCreateWorkflowExecution(
ctx,
namespaceID,
commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: baseRunID,
},
)
if err != nil {
return nil, err
}
defer func() { baseReleaseFn(retError) }()
baseMutableState, err := baseContext.loadWorkflowExecution()
if err != nil {
return nil, err
}
if request.GetWorkflowTaskFinishEventId() <= common.FirstEventID ||
request.GetWorkflowTaskFinishEventId() >= baseMutableState.GetNextEventID() {
return nil, serviceerror.NewInvalidArgument("Workflow task finish ID must be > 1 && <= workflow next event ID.")
}
// also load the current run of the workflow, it can be different from the base runID
resp, err := e.executionManager.GetCurrentExecution(&persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: request.WorkflowExecution.GetWorkflowId(),
})
if err != nil {
return nil, err
}
currentRunID := resp.RunID
if baseRunID == "" {
baseRunID = currentRunID
}
var currentContext workflowExecutionContext
var currentMutableState mutableState
var currentReleaseFn releaseWorkflowExecutionFunc
if currentRunID == baseRunID {
currentContext = baseContext
currentMutableState = baseMutableState
} else {
currentContext, currentReleaseFn, err = e.historyCache.getOrCreateWorkflowExecution(
ctx,
namespaceID,
commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: currentRunID,
},
)
if err != nil {
return nil, err
}
defer func() { currentReleaseFn(retError) }()
currentMutableState, err = currentContext.loadWorkflowExecution()
if err != nil {
return nil, err
}
}
// dedup by requestID
if currentMutableState.GetExecutionState().CreateRequestId == request.GetRequestId() {
e.logger.Info("Duplicated reset request",
tag.WorkflowID(workflowID),
tag.WorkflowRunID(currentRunID),
tag.WorkflowNamespaceID(namespaceID))
return &historyservice.ResetWorkflowExecutionResponse{
RunId: currentRunID,
}, nil
}
resetRunID := uuid.New()
baseRebuildLastEventID := request.GetWorkflowTaskFinishEventId() - 1
baseVersionHistories := baseMutableState.GetExecutionInfo().GetVersionHistories()
baseCurrentVersionHistory, err := versionhistory.GetCurrentVersionHistory(baseVersionHistories)
if err != nil {
return nil, err
}
baseRebuildLastEventVersion, err := versionhistory.GetVersionHistoryEventVersion(baseCurrentVersionHistory, baseRebuildLastEventID)
if err != nil {
return nil, err
}
baseCurrentBranchToken := baseCurrentVersionHistory.GetBranchToken()
baseNextEventID := baseMutableState.GetNextEventID()
if err := e.workflowResetter.resetWorkflow(
ctx,
namespaceID,
workflowID,
baseRunID,
baseCurrentBranchToken,
baseRebuildLastEventID,
baseRebuildLastEventVersion,
baseNextEventID,
resetRunID,
request.GetRequestId(),
newNDCWorkflow(
ctx,
e.shard.GetNamespaceCache(),
e.shard.GetClusterMetadata(),
currentContext,
currentMutableState,
currentReleaseFn,
),
request.GetReason(),
nil,
); err != nil {
return nil, err
}
return &historyservice.ResetWorkflowExecutionResponse{
RunId: resetRunID,
}, nil
}
func (e *historyEngineImpl) updateWorkflow(
ctx context.Context,
namespaceID string,
execution commonpb.WorkflowExecution,
action updateWorkflowActionFunc,
) (retError error) {
workflowContext, err := e.loadWorkflow(ctx, namespaceID, execution.GetWorkflowId(), execution.GetRunId())
if err != nil {
return err
}
defer func() { workflowContext.getReleaseFn()(retError) }()
return e.updateWorkflowHelper(workflowContext, action)
}
func (e *historyEngineImpl) updateWorkflowExecutionWithAction(
ctx context.Context,
namespaceID string,
execution commonpb.WorkflowExecution,
action updateWorkflowActionFunc,
) (retError error) {
workflowContext, err := e.loadWorkflowOnce(ctx, namespaceID, execution.GetWorkflowId(), execution.GetRunId())
if err != nil {
return err
}
defer func() { workflowContext.getReleaseFn()(retError) }()
return e.updateWorkflowHelper(workflowContext, action)
}
func (e *historyEngineImpl) updateWorkflowHelper(
workflowContext workflowContext,
action updateWorkflowActionFunc,
) (retError error) {
UpdateHistoryLoop:
for attempt := 1; attempt <= conditionalRetryCount; attempt++ {
weContext := workflowContext.getContext()
mutableState := workflowContext.getMutableState()
// conduct caller action
postActions, err := action(weContext, mutableState)
if err != nil {
if err == ErrStaleState {
// Handler detected that cached workflow mutable could potentially be stale
// Reload workflow execution history
workflowContext.getContext().clear()
if attempt != conditionalRetryCount {
_, err = workflowContext.reloadMutableState()
if err != nil {
return err
}
}
continue UpdateHistoryLoop
}
// Returned error back to the caller
return err
}
if postActions.noop {
return nil
}
if postActions.createWorkflowTask {
// Create a transfer task to schedule a workflow task
if !mutableState.HasPendingWorkflowTask() {
_, err := mutableState.AddWorkflowTaskScheduledEvent(false)
if err != nil {
return serviceerror.NewInternal("Failed to add workflow task scheduled event.")
}
}
}
err = workflowContext.getContext().updateWorkflowExecutionAsActive(e.shard.GetTimeSource().Now())
if err == ErrConflict {
if attempt != conditionalRetryCount {
_, err = workflowContext.reloadMutableState()
if err != nil {
return err
}
}
continue UpdateHistoryLoop
}
return err
}
return ErrMaxAttemptsExceeded
}
// TODO: remove and use updateWorkflowExecutionWithAction
func (e *historyEngineImpl) updateWorkflowExecution(
ctx context.Context,
namespaceID string,
execution commonpb.WorkflowExecution,
createWorkflowTask bool,
action func(context workflowExecutionContext, mutableState mutableState) error,
) error {
return e.updateWorkflowExecutionWithAction(
ctx,
namespaceID,
execution,
getUpdateWorkflowActionFunc(createWorkflowTask, action),
)
}
func getUpdateWorkflowActionFunc(
createWorkflowTask bool,
action func(context workflowExecutionContext, mutableState mutableState) error,
) updateWorkflowActionFunc {
return func(context workflowExecutionContext, mutableState mutableState) (*updateWorkflowAction, error) {
err := action(context, mutableState)
if err != nil {
return nil, err
}
postActions := &updateWorkflowAction{
createWorkflowTask: createWorkflowTask,
}
return postActions, nil
}
}
func (e *historyEngineImpl) failWorkflowTask(
context workflowExecutionContext,
scheduleID int64,
startedID int64,
workflowTaskFailedErr *workflowTaskFailedError,
request *workflowservice.RespondWorkflowTaskCompletedRequest,
) (mutableState, error) {
// Clear any updates we have accumulated so far
context.clear()
// Reload workflow execution so we can apply the workflow task failure event
mutableState, err := context.loadWorkflowExecution()
if err != nil {
return nil, err
}
if _, err = mutableState.AddWorkflowTaskFailedEvent(
scheduleID,
startedID,
workflowTaskFailedErr.failedCause,
failure.NewServerFailure(workflowTaskFailedErr.Error(), true),
request.GetIdentity(),
request.GetBinaryChecksum(),
"",
"",
0); err != nil {
return nil, err
}
// Return new builder back to the caller for further updates
return mutableState, nil
}
func (e *historyEngineImpl) NotifyNewHistoryEvent(
notification *events.Notification,
) {
e.eventNotifier.NotifyNewHistoryEvent(notification)
}
func (e *historyEngineImpl) NotifyNewTransferTasks(
tasks []persistence.Task,
) {
if len(tasks) > 0 {
task := tasks[0]
clusterName := e.clusterMetadata.ClusterNameForFailoverVersion(task.GetVersion())
e.txProcessor.NotifyNewTask(clusterName, tasks)
}
}
func (e *historyEngineImpl) NotifyNewReplicationTasks(
tasks []persistence.Task,
) {
if len(tasks) > 0 {
e.replicatorProcessor.notifyNewTask()
}
}
func (e *historyEngineImpl) NotifyNewTimerTasks(
tasks []persistence.Task,
) {
if len(tasks) > 0 {
task := tasks[0]
clusterName := e.clusterMetadata.ClusterNameForFailoverVersion(task.GetVersion())
e.timerProcessor.NotifyNewTimers(clusterName, tasks)
}
}
func validateStartWorkflowExecutionRequest(
request *workflowservice.StartWorkflowExecutionRequest,
maxIDLengthLimit int,
) error {
if len(request.GetRequestId()) == 0 {
return serviceerror.NewInvalidArgument("Missing request ID.")
}
if timestamp.DurationValue(request.GetWorkflowExecutionTimeout()) < 0 {
return serviceerror.NewInvalidArgument("Invalid WorkflowExecutionTimeoutSeconds.")
}
if timestamp.DurationValue(request.GetWorkflowRunTimeout()) < 0 {
return serviceerror.NewInvalidArgument("Invalid WorkflowRunTimeoutSeconds.")
}
if timestamp.DurationValue(request.GetWorkflowTaskTimeout()) < 0 {
return serviceerror.NewInvalidArgument("Invalid WorkflowTaskTimeoutSeconds.")
}
if request.TaskQueue == nil || request.TaskQueue.GetName() == "" {
return serviceerror.NewInvalidArgument("Missing Taskqueue.")
}
if request.WorkflowType == nil || request.WorkflowType.GetName() == "" {
return serviceerror.NewInvalidArgument("Missing WorkflowType.")
}
if len(request.GetNamespace()) > maxIDLengthLimit {
return serviceerror.NewInvalidArgument("Namespace exceeds length limit.")
}
if len(request.GetWorkflowId()) > maxIDLengthLimit {
return serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.")
}
if len(request.TaskQueue.GetName()) > maxIDLengthLimit {
return serviceerror.NewInvalidArgument("TaskQueue exceeds length limit.")
}
if len(request.WorkflowType.GetName()) > maxIDLengthLimit {
return serviceerror.NewInvalidArgument("WorkflowType exceeds length limit.")
}
return common.ValidateRetryPolicy(request.RetryPolicy)
}
func (e *historyEngineImpl) overrideStartWorkflowExecutionRequest(
namespaceEntry *cache.NamespaceCacheEntry,
request *workflowservice.StartWorkflowExecutionRequest,
metricsScope int,
) {
namespace := namespaceEntry.GetInfo().Name
// workflow execution timeout is left as is
// if workflow execution timeout == 0 -> infinity
workflowRunTimeout := common.OverrideWorkflowRunTimeout(
timestamp.DurationValue(request.GetWorkflowRunTimeout()),
timestamp.DurationValue(request.GetWorkflowExecutionTimeout()),
)
if workflowRunTimeout != timestamp.DurationValue(request.GetWorkflowRunTimeout()) {
request.WorkflowRunTimeout = timestamp.DurationPtr(workflowRunTimeout)
e.metricsClient.Scope(
metricsScope,
metrics.NamespaceTag(namespace),
).IncCounter(metrics.WorkflowRunTimeoutOverrideCount)
}
workflowTaskStartToCloseTimeout := common.OverrideWorkflowTaskTimeout(
request.GetNamespace(),
timestamp.DurationValue(request.GetWorkflowTaskTimeout()),
timestamp.DurationValue(request.GetWorkflowRunTimeout()),
e.config.DefaultWorkflowTaskTimeout,
)
if workflowTaskStartToCloseTimeout != timestamp.DurationValue(request.GetWorkflowTaskTimeout()) {
request.WorkflowTaskTimeout = timestamp.DurationPtr(workflowTaskStartToCloseTimeout)
e.metricsClient.Scope(
metricsScope,
metrics.NamespaceTag(namespace),
).IncCounter(metrics.WorkflowTaskTimeoutOverrideCount)
}
}
func validateNamespaceUUID(
namespaceUUID string,
) (string, error) {
if namespaceUUID == "" {
return "", serviceerror.NewInvalidArgument("Missing namespace UUID.")
} else if uuid.Parse(namespaceUUID) == nil {
return "", serviceerror.NewInvalidArgument("Invalid namespace UUID.")
}
return namespaceUUID, nil
}
func (e *historyEngineImpl) getActiveNamespaceEntry(
namespaceUUID string,
) (*cache.NamespaceCacheEntry, error) {
return getActiveNamespaceEntryFromShard(e.shard, namespaceUUID)
}
func getActiveNamespaceEntryFromShard(
shard shard.Context,
namespaceUUID string,
) (*cache.NamespaceCacheEntry, error) {
namespaceID, err := validateNamespaceUUID(namespaceUUID)
if err != nil {
return nil, err
}
namespaceEntry, err := shard.GetNamespaceCache().GetNamespaceByID(namespaceID)
if err != nil {
return nil, err
}
if err = namespaceEntry.GetNamespaceNotActiveErr(); err != nil {
return nil, err
}
return namespaceEntry, nil
}
func getScheduleID(
activityID string,
mutableState mutableState,
) (int64, error) {
if activityID == "" {
return 0, serviceerror.NewInvalidArgument("Neither ActivityID nor ScheduleID is provided")
}
activityInfo, ok := mutableState.GetActivityByActivityID(activityID)
if !ok {
return 0, serviceerror.NewInvalidArgument("Cannot locate Activity ScheduleID")
}
return activityInfo.ScheduleId, nil
}
func (e *historyEngineImpl) getStartRequest(
namespaceID string,
request *workflowservice.SignalWithStartWorkflowExecutionRequest,
) *historyservice.StartWorkflowExecutionRequest {
req := &workflowservice.StartWorkflowExecutionRequest{
Namespace: request.GetNamespace(),
WorkflowId: request.GetWorkflowId(),
WorkflowType: request.GetWorkflowType(),
TaskQueue: request.GetTaskQueue(),
Input: request.GetInput(),
WorkflowExecutionTimeout: request.GetWorkflowExecutionTimeout(),
WorkflowRunTimeout: request.GetWorkflowRunTimeout(),
WorkflowTaskTimeout: request.GetWorkflowTaskTimeout(),
Identity: request.GetIdentity(),
RequestId: request.GetRequestId(),
WorkflowIdReusePolicy: request.GetWorkflowIdReusePolicy(),
RetryPolicy: request.GetRetryPolicy(),
CronSchedule: request.GetCronSchedule(),
Memo: request.GetMemo(),
SearchAttributes: request.GetSearchAttributes(),
Header: request.GetHeader(),
}
return common.CreateHistoryStartWorkflowRequest(namespaceID, req, nil, e.shard.GetTimeSource().Now())
}
func setTaskInfo(
version int64,
timestamp time.Time,
transferTasks []persistence.Task,
timerTasks []persistence.Task,
) {
// set both the task version, as well as the timestamp on the transfer tasks
for _, task := range transferTasks {
task.SetVersion(version)
task.SetVisibilityTimestamp(timestamp)
}
for _, task := range timerTasks {
task.SetVersion(version)
}
}
// for startWorkflowExecution & signalWithStart to handle workflow reuse policy
func (e *historyEngineImpl) applyWorkflowIDReusePolicyForSigWithStart(
prevExecutionState *persistencespb.WorkflowExecutionState,
namespaceID string,
execution commonpb.WorkflowExecution,
wfIDReusePolicy enumspb.WorkflowIdReusePolicy,
) error {
prevStartRequestID := prevExecutionState.CreateRequestId
prevRunID := prevExecutionState.RunId
prevState := prevExecutionState.State
prevStatus := prevExecutionState.Status
return e.applyWorkflowIDReusePolicyHelper(
prevStartRequestID,
prevRunID,
prevState,
prevStatus,
namespaceID,
execution,
wfIDReusePolicy,
)
}
func (e *historyEngineImpl) applyWorkflowIDReusePolicyHelper(
prevStartRequestID,
prevRunID string,
prevState enumsspb.WorkflowExecutionState,
prevStatus enumspb.WorkflowExecutionStatus,
namespaceID string,
execution commonpb.WorkflowExecution,
wfIDReusePolicy enumspb.WorkflowIdReusePolicy,
) error {
// here we know there is some information about the prev workflow, i.e. either running right now
// or has history check if the this workflow is finished
switch prevState {
case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED,
enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING:
msg := "Workflow execution is already running. WorkflowId: %v, RunId: %v."
return getWorkflowAlreadyStartedError(msg, prevStartRequestID, execution.GetWorkflowId(), prevRunID)
case enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED:
// previous workflow completed, proceed
default:
// persistence.WorkflowStateZombie or unknown type
return serviceerror.NewInternal(fmt.Sprintf("Failed to process workflow, workflow has invalid state: %v.", prevState))
}
switch wfIDReusePolicy {
case enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY:
if _, ok := FailedWorkflowStatuses[prevStatus]; !ok {
msg := "Workflow execution already finished successfully. WorkflowId: %v, RunId: %v. Workflow Id reuse policy: allow duplicate workflow Id if last run failed."
return getWorkflowAlreadyStartedError(msg, prevStartRequestID, execution.GetWorkflowId(), prevRunID)
}
case enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE:
// as long as workflow not running, so this case has no check
case enumspb.WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE:
msg := "Workflow execution already finished. WorkflowId: %v, RunId: %v. Workflow Id reuse policy: reject duplicate workflow Id."
return getWorkflowAlreadyStartedError(msg, prevStartRequestID, execution.GetWorkflowId(), prevRunID)
default:
return serviceerror.NewInternal(fmt.Sprintf("Failed to process start workflow reuse policy: %v.", wfIDReusePolicy))
}
return nil
}
func getWorkflowAlreadyStartedError(errMsg string, createRequestID string, workflowID string, runID string) error {
return serviceerror.NewWorkflowExecutionAlreadyStarted(
fmt.Sprintf(errMsg, workflowID, runID),
createRequestID,
runID,
)
}
func (e *historyEngineImpl) GetReplicationMessages(
ctx context.Context,
pollingCluster string,
lastReadMessageID int64,
) (*replicationspb.ReplicationMessages, error) {
scope := metrics.HistoryGetReplicationMessagesScope
sw := e.metricsClient.StartTimer(scope, metrics.GetReplicationMessagesForShardLatency)
defer sw.Stop()
replicationMessages, err := e.replicatorProcessor.getTasks(
ctx,
pollingCluster,
lastReadMessageID,
)
if err != nil {
e.logger.Error("Failed to retrieve replication messages.", tag.Error(err))
return nil, err
}
// Set cluster status for sync shard info
replicationMessages.SyncShardStatus = &replicationspb.SyncShardStatus{
StatusTime: timestamp.TimePtr(e.timeSource.Now()),
}
e.logger.Debug("Successfully fetched replication messages.", tag.Counter(len(replicationMessages.ReplicationTasks)))
return replicationMessages, nil
}
func (e *historyEngineImpl) GetDLQReplicationMessages(
ctx context.Context,
taskInfos []*replicationspb.ReplicationTaskInfo,
) ([]*replicationspb.ReplicationTask, error) {
scope := metrics.HistoryGetDLQReplicationMessagesScope
sw := e.metricsClient.StartTimer(scope, metrics.GetDLQReplicationMessagesLatency)
defer sw.Stop()
tasks := make([]*replicationspb.ReplicationTask, 0, len(taskInfos))
for _, taskInfo := range taskInfos {
task, err := e.replicatorProcessor.getTask(ctx, taskInfo)
if err != nil {
e.logger.Error("Failed to fetch DLQ replication messages.", tag.Error(err))
return nil, err
}
tasks = append(tasks, task)
}
return tasks, nil
}
func (e *historyEngineImpl) ReapplyEvents(
ctx context.Context,
namespaceUUID string,
workflowID string,
runID string,
reapplyEvents []*historypb.HistoryEvent,
) error {
if e.config.SkipReapplicationByNamespaceId(namespaceUUID) {
return nil
}
namespaceEntry, err := e.getActiveNamespaceEntry(namespaceUUID)
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
// remove run id from the execution so that reapply events to the current run
currentExecution := commonpb.WorkflowExecution{
WorkflowId: workflowID,
}
return e.updateWorkflowExecutionWithAction(
ctx,
namespaceID,
currentExecution,
func(context workflowExecutionContext, mutableState mutableState) (*updateWorkflowAction, error) {
// Filter out reapply event from the same cluster
toReapplyEvents := make([]*historypb.HistoryEvent, 0, len(reapplyEvents))
lastWriteVersion, err := mutableState.GetLastWriteVersion()
if err != nil {
return nil, err
}
for _, event := range reapplyEvents {
if event.GetVersion() == lastWriteVersion {
// The reapply is from the same cluster. Ignoring.
continue
}
dedupResource := definition.NewEventReappliedID(runID, event.GetEventId(), event.GetVersion())
if mutableState.IsResourceDuplicated(dedupResource) {
// already apply the signal
continue
}
toReapplyEvents = append(toReapplyEvents, event)
}
if len(toReapplyEvents) == 0 {
return &updateWorkflowAction{
noop: true,
}, nil
}
if !mutableState.IsWorkflowExecutionRunning() {
// need to reset target workflow (which is also the current workflow)
// to accept events to be reapplied
baseRunID := mutableState.GetExecutionState().GetRunId()
resetRunID := uuid.New()
baseRebuildLastEventID := mutableState.GetPreviousStartedEventID()
// TODO when https://github.com/uber/cadence/issues/2420 is finished, remove this block,
// since cannot reapply event to a finished workflow which had no workflow tasks started
if baseRebuildLastEventID == common.EmptyEventID {
e.logger.Warn("cannot reapply event to a finished workflow",
tag.WorkflowNamespaceID(namespaceID),
tag.WorkflowID(currentExecution.GetWorkflowId()),
)
e.metricsClient.IncCounter(metrics.HistoryReapplyEventsScope, metrics.EventReapplySkippedCount)
return &updateWorkflowAction{noop: true}, nil
}
baseVersionHistories := mutableState.GetExecutionInfo().GetVersionHistories()
baseCurrentVersionHistory, err := versionhistory.GetCurrentVersionHistory(baseVersionHistories)
if err != nil {
return nil, err
}
baseRebuildLastEventVersion, err := versionhistory.GetVersionHistoryEventVersion(baseCurrentVersionHistory, baseRebuildLastEventID)
if err != nil {
return nil, err
}
baseCurrentBranchToken := baseCurrentVersionHistory.GetBranchToken()
baseNextEventID := mutableState.GetNextEventID()
if err = e.workflowResetter.resetWorkflow(
ctx,
namespaceID,
workflowID,
baseRunID,
baseCurrentBranchToken,
baseRebuildLastEventID,
baseRebuildLastEventVersion,
baseNextEventID,
resetRunID,
uuid.New(),
newNDCWorkflow(
ctx,
e.shard.GetNamespaceCache(),
e.shard.GetClusterMetadata(),
context,
mutableState,
noopReleaseFn,
),
eventsReapplicationResetWorkflowReason,
toReapplyEvents,
); err != nil {
return nil, err
}
return &updateWorkflowAction{
noop: true,
}, nil
}
postActions := &updateWorkflowAction{
createWorkflowTask: true,
}
// Do not create workflow task when the workflow is cron and the cron has not been started yet
if mutableState.GetExecutionInfo().CronSchedule != "" && !mutableState.HasProcessedOrPendingWorkflowTask() {
postActions.createWorkflowTask = false
}
reappliedEvents, err := e.eventsReapplier.reapplyEvents(
ctx,
mutableState,
toReapplyEvents,
runID,
)
if err != nil {
e.logger.Error("failed to re-apply stale events", tag.Error(err))
return nil, serviceerror.NewInternal("unable to re-apply stale events")
}
if len(reappliedEvents) == 0 {
return &updateWorkflowAction{
noop: true,
}, nil
}
return postActions, nil
})
}
func (e *historyEngineImpl) GetDLQMessages(
ctx context.Context,
request *historyservice.GetDLQMessagesRequest,
) (*historyservice.GetDLQMessagesResponse, error) {
_, ok := e.clusterMetadata.GetAllClusterInfo()[request.GetSourceCluster()]
if !ok {
return nil, ErrUnknownCluster
}
tasks, token, err := e.replicationDLQHandler.getMessages(
ctx,
request.GetSourceCluster(),
request.GetInclusiveEndMessageId(),
int(request.GetMaximumPageSize()),
request.GetNextPageToken(),
)
if err != nil {
return nil, err
}
return &historyservice.GetDLQMessagesResponse{
Type: request.GetType(),
ReplicationTasks: tasks,
NextPageToken: token,
}, nil
}
func (e *historyEngineImpl) PurgeDLQMessages(
ctx context.Context,
request *historyservice.PurgeDLQMessagesRequest,
) error {
_, ok := e.clusterMetadata.GetAllClusterInfo()[request.GetSourceCluster()]
if !ok {
return ErrUnknownCluster
}
return e.replicationDLQHandler.purgeMessages(
request.GetSourceCluster(),
request.GetInclusiveEndMessageId(),
)
}
func (e *historyEngineImpl) MergeDLQMessages(
ctx context.Context,
request *historyservice.MergeDLQMessagesRequest,
) (*historyservice.MergeDLQMessagesResponse, error) {
_, ok := e.clusterMetadata.GetAllClusterInfo()[request.GetSourceCluster()]
if !ok {
return nil, ErrUnknownCluster
}
token, err := e.replicationDLQHandler.mergeMessages(
ctx,
request.GetSourceCluster(),
request.GetInclusiveEndMessageId(),
int(request.GetMaximumPageSize()),
request.GetNextPageToken(),
)
if err != nil {
return nil, err
}
return &historyservice.MergeDLQMessagesResponse{
NextPageToken: token,
}, nil
}
func (e *historyEngineImpl) RefreshWorkflowTasks(
ctx context.Context,
namespaceUUID string,
execution commonpb.WorkflowExecution,
) (retError error) {
namespaceEntry, err := e.getActiveNamespaceEntry(namespaceUUID)
if err != nil {
return err
}
namespaceID := namespaceEntry.GetInfo().Id
context, release, err := e.historyCache.getOrCreateWorkflowExecution(ctx, namespaceID, execution)
if err != nil {
return err
}
defer func() { release(retError) }()
mutableState, err := context.loadWorkflowExecution()
if err != nil {
return err
}
if !mutableState.IsWorkflowExecutionRunning() {
return nil
}
mutableStateTaskRefresher := newMutableStateTaskRefresher(
e.shard.GetConfig(),
e.shard.GetNamespaceCache(),
e.shard.GetEventsCache(),
e.shard.GetLogger(),
)
now := e.shard.GetTimeSource().Now()
err = mutableStateTaskRefresher.refreshTasks(now, mutableState)
if err != nil {
return err
}
err = context.updateWorkflowExecutionAsActive(now)
if err != nil {
return err
}
return nil
}
func (e *historyEngineImpl) loadWorkflowOnce(
ctx context.Context,
namespaceID string,
workflowID string,
runID string,
) (workflowContext, error) {
context, release, err := e.historyCache.getOrCreateWorkflowExecution(
ctx,
namespaceID,
commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: runID,
},
)
if err != nil {
return nil, err
}
mutableState, err := context.loadWorkflowExecution()
if err != nil {
release(err)
return nil, err
}
return newWorkflowContext(context, release, mutableState), nil
}
func (e *historyEngineImpl) loadWorkflow(
ctx context.Context,
namespaceID string,
workflowID string,
runID string,
) (workflowContext, error) {
if runID != "" {
return e.loadWorkflowOnce(ctx, namespaceID, workflowID, runID)
}
for attempt := 1; attempt <= conditionalRetryCount; attempt++ {
workflowContext, err := e.loadWorkflowOnce(ctx, namespaceID, workflowID, "")
if err != nil {
return nil, err
}
if workflowContext.getMutableState().IsWorkflowExecutionRunning() {
return workflowContext, nil
}
// workflow not running, need to check current record
resp, err := e.shard.GetExecutionManager().GetCurrentExecution(
&persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
},
)
if err != nil {
workflowContext.getReleaseFn()(err)
return nil, err
}
if resp.RunID == workflowContext.getRunID() {
return workflowContext, nil
}
workflowContext.getReleaseFn()(nil)
}
return nil, serviceerror.NewInternal("unable to locate current workflow execution")
}
| 1 | 10,845 | FYI, visibility task processing is local only, meaning pushing something to local ES cluster / local archival endpoint | temporalio-temporal | go |
@@ -36,6 +36,8 @@ import org.springframework.http.HttpMethod;
import org.springframework.http.ResponseEntity;
import org.springframework.web.client.RestTemplate;
+import com.netflix.config.DynamicPropertyFactory;
+
public class SpringmvcClient {
private static RestTemplate templateUrlWithServiceName = new CseRestTemplate();
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.demo.springmvc.client;
import java.util.HashMap;
import java.util.Map;
import org.apache.servicecomb.core.CseContext;
import org.apache.servicecomb.demo.DemoConst;
import org.apache.servicecomb.demo.TestMgr;
import org.apache.servicecomb.demo.controller.Controller;
import org.apache.servicecomb.demo.controller.Person;
import org.apache.servicecomb.foundation.common.utils.BeanUtils;
import org.apache.servicecomb.foundation.common.utils.Log4jUtils;
import org.apache.servicecomb.provider.springmvc.reference.CseRestTemplate;
import org.apache.servicecomb.provider.springmvc.reference.RestTemplateBuilder;
import org.apache.servicecomb.provider.springmvc.reference.UrlWithServiceNameClientHttpRequestFactory;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.ResponseEntity;
import org.springframework.web.client.RestTemplate;
public class SpringmvcClient {
private static RestTemplate templateUrlWithServiceName = new CseRestTemplate();
private static RestTemplate restTemplate;
private static Controller controller;
public static void main(String[] args) throws Exception {
Log4jUtils.init();
BeanUtils.init();
run();
TestMgr.summary();
}
public static void run() {
templateUrlWithServiceName.setRequestFactory(new UrlWithServiceNameClientHttpRequestFactory());
restTemplate = RestTemplateBuilder.create();
controller = BeanUtils.getBean("controller");
String prefix = "cse://springmvc";
try {
// this test class is intended for retry hanging issue JAV-127
templateUrlWithServiceName.getForObject(prefix + "/controller/sayhi?name=throwexception", String.class);
TestMgr.check("true", "false");
} catch (Exception e) {
TestMgr.check("true", "true");
}
CodeFirstRestTemplateSpringmvc codeFirstClient =
BeanUtils.getContext().getBean(CodeFirstRestTemplateSpringmvc.class);
codeFirstClient.testCodeFirst(restTemplate, "springmvc", "/codeFirstSpringmvc/");
String microserviceName = "springmvc";
for (String transport : DemoConst.transports) {
CseContext.getInstance().getConsumerProviderManager().setTransport(microserviceName, transport);
TestMgr.setMsg(microserviceName, transport);
testController(templateUrlWithServiceName, microserviceName);
testController();
}
HttpHeaders headers = new HttpHeaders();
headers.set("Accept-Encoding", "gzip");
HttpEntity<String> entity = new HttpEntity<>(headers);
ResponseEntity<String> entityCompress =
restTemplate.exchange(prefix
+ "/codeFirstSpringmvc/sayhi/compressed/{name}/v2", HttpMethod.GET, entity, String.class, "Test");
TestMgr.check(
"Test sayhi compressed:This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text,This is a big text!",
entityCompress.getBody());
// if server response is compressed, the content-length header will be removed , so can't check this.
// the transfer-encoding header will be missing when the server is set to not compressed
if (entityCompress.getHeaders().get("transfer-encoding") != null) {
TestMgr.check("chunked", entityCompress.getHeaders().get("transfer-encoding").get(0));
}
//0.5.0 later version metrics integration test
@SuppressWarnings("unchecked")
Map<String, Double> metrics = restTemplate.getForObject(prefix + "/metrics", Map.class);
// TestMgr.check(true, metrics.get("jvm(name=heapUsed,statistic=gauge)") != 0);
TestMgr.check(true, metrics.size() > 0);
TestMgr.check(true,
metrics.get(
"servicecomb.invocation(operation=springmvc.codeFirst.saySomething,role=PRODUCER,stage=total,statistic=count,status=200,transport=highway)")
>= 0);
//prometheus integration test
try {
String content = restTemplate.getForObject("cse://springmvc/codeFirstSpringmvc/prometheusForTest", String.class);
TestMgr.check(true, content.contains("servicecomb_invocation{operation=\"springmvc.codeFirst.addDate"));
TestMgr.check(true, content.contains("servicecomb_invocation{operation=\"springmvc.codeFirst.sayHello"));
TestMgr.check(true, content.contains("servicecomb_invocation{operation=\"springmvc.codeFirst.fallbackFromCache"));
TestMgr.check(true, content.contains("servicecomb_invocation{operation=\"springmvc.codeFirst.isTrue"));
TestMgr.check(true, content.contains("servicecomb_invocation{operation=\"springmvc.codeFirst.add"));
TestMgr.check(true, content.contains("servicecomb_invocation{operation=\"springmvc.codeFirst.sayHi2"));
TestMgr.check(true, content.contains("servicecomb_invocation{operation=\"springmvc.codeFirst.saySomething"));
String[] metricLines = content.split("\n");
if (metricLines.length > 0) {
for (String metricLine : metricLines) {
if (!metricLine.startsWith("#")) {
String[] metricKeyAndValue = metricLine.split(" ");
if (!metricKeyAndValue[0].startsWith("jvm")) {
if (Double.parseDouble(metricKeyAndValue[1]) < 0) {
TestMgr.check("true", "false");
break;
}
}
}
}
} else {
TestMgr.check("true", "false");
}
} catch (Exception e) {
TestMgr.check("true", "false");
}
}
private static void testController(RestTemplate template, String microserviceName) {
String prefix = "cse://" + microserviceName;
TestMgr.check("hi world [world]",
template.getForObject(prefix + "/controller/sayhi?name=world",
String.class));
TestMgr.check("hi world1 [world1]",
template.getForObject(prefix + "/controller/sayhi?name={name}",
String.class,
"world1"));
TestMgr.check("hi hi 中国 [hi 中国]",
template.getForObject(prefix + "/controller/sayhi?name={name}",
String.class,
"hi 中国"));
Map<String, String> params = new HashMap<>();
params.put("name", "world2");
TestMgr.check("hi world2 [world2]",
template.getForObject(prefix + "/controller/sayhi?name={name}",
String.class,
params));
TestMgr.check("hello world",
template.postForObject(prefix + "/controller/sayhello/{name}",
null,
String.class,
"world"));
TestMgr.check("hello hello 中国",
template.postForObject(prefix + "/controller/sayhello/{name}",
null,
String.class,
"hello 中国"));
HttpHeaders headers = new HttpHeaders();
headers.add("name", "world");
@SuppressWarnings("rawtypes")
HttpEntity entity = new HttpEntity<>(null, headers);
ResponseEntity<String> response = template.exchange(prefix + "/controller/sayhei",
HttpMethod.GET,
entity,
String.class);
TestMgr.check("hei world", response.getBody());
Person user = new Person();
user.setName("world");
TestMgr.check("ha world",
template.postForObject(prefix + "/controller/saysomething?prefix={prefix}",
user,
String.class,
"ha"));
}
private static void testController() {
TestMgr.check("hi world [world]", controller.sayHi("world"));
Person user = new Person();
user.setName("world");
TestMgr.check("ha world", controller.saySomething("ha", user));
}
}
| 1 | 9,838 | This import can be removed. | apache-servicecomb-java-chassis | java |
@@ -48,6 +48,10 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err
return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
}
+ if strings.HasPrefix(a.Region, "fips-") || strings.HasSuffix(a.Region, "-fips") {
+ return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"}
+ }
+
if len(a.Resource) == 0 {
return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
} | 1 | package arn
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws/arn"
)
var supportedServiceARN = []string{
"s3",
"s3-outposts",
"s3-object-lambda",
}
func isSupportedServiceARN(service string) bool {
for _, name := range supportedServiceARN {
if name == service {
return true
}
}
return false
}
// Resource provides the interfaces abstracting ARNs of specific resource
// types.
type Resource interface {
GetARN() arn.ARN
String() string
}
// ResourceParser provides the function for parsing an ARN's resource
// component into a typed resource.
type ResourceParser func(arn.ARN) (Resource, error)
// ParseResource parses an AWS ARN into a typed resource for the S3 API.
func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) {
a, err := arn.Parse(s)
if err != nil {
return nil, err
}
if len(a.Partition) == 0 {
return nil, InvalidARNError{ARN: a, Reason: "partition not set"}
}
if !isSupportedServiceARN(a.Service) {
return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
}
if len(a.Resource) == 0 {
return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
}
return resParser(a)
}
// SplitResource splits the resource components by the ARN resource delimiters.
func SplitResource(v string) []string {
var parts []string
var offset int
for offset <= len(v) {
idx := strings.IndexAny(v[offset:], "/:")
if idx < 0 {
parts = append(parts, v[offset:])
break
}
parts = append(parts, v[offset:idx+offset])
offset += idx + 1
}
return parts
}
// IsARN returns whether the given string is an ARN
func IsARN(s string) bool {
return arn.IsARN(s)
}
// InvalidARNError provides the error for an invalid ARN error.
type InvalidARNError struct {
ARN arn.ARN
Reason string
}
// Error returns a string denoting the occurred InvalidARNError
func (e InvalidARNError) Error() string {
return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String())
}
| 1 | 10,340 | 1. Nit: We can have a general helper to identify if a region is pseudo region. We might already have one somewhere. 2. Does any type of ARN support FIPS? If not, we may just move this check in arn package? | aws-aws-sdk-go | go |
@@ -8,10 +8,14 @@ name of the service that is used by Boto 3.
This factory is used by the make_stubber fixture found in the set of common fixtures.
"""
-from test_tools.s3_stubber import S3Stubber
from test_tools.dynamodb_stubber import DynamoStubber
+from test_tools.iam_stubber import IamStubber
+from test_tools.lambda_stubber import LambdaStubber
from test_tools.pinpoint_stubber import PinpointStubber
+from test_tools.s3_stubber import S3Stubber
+from test_tools.s3control_stubber import S3ControlStubber
from test_tools.sqs_stubber import SqsStubber
+from test_tools.sts_stubber import StsStubber
class StubberFactoryNotImplemented(Exception): | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
A factory function that returns the stubber for an AWS service, based on the
name of the service that is used by Boto 3.
This factory is used by the make_stubber fixture found in the set of common fixtures.
"""
from test_tools.s3_stubber import S3Stubber
from test_tools.dynamodb_stubber import DynamoStubber
from test_tools.pinpoint_stubber import PinpointStubber
from test_tools.sqs_stubber import SqsStubber
class StubberFactoryNotImplemented(Exception):
pass
def stubber_factory(service_name):
if service_name == 's3':
return S3Stubber
elif service_name == 'dynamodb':
return DynamoStubber
elif service_name == 'pinpoint':
return PinpointStubber
elif service_name == 'sqs':
return SqsStubber
else:
raise StubberFactoryNotImplemented(
"If you see this exception, it probably means that you forgot to add "
"a new stubber to stubber_factory.py.")
| 1 | 17,033 | is this phrase completed in a next line? Or forgot to add... what? | awsdocs-aws-doc-sdk-examples | rb |
@@ -0,0 +1,11 @@
+_base_ = [
+ '../_base_/models/mask_rcnn_r50_fpn.py',
+ '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py',
+ '../_base_/default_runtime.py'
+]
+model = dict(
+ roi_head=dict(
+ bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15)))
+# runtime settings
+total_epochs = 12
+workflow = [('train', 5)] | 1 | 1 | 19,981 | `('train', 5)` and `('train', 1)` are actually the same. | open-mmlab-mmdetection | py |
|
@@ -205,8 +205,8 @@ func (bt *backpressureTracker) beforeDiskBlockCachePut(blockResources int64) (
return availableResources
}
-func (bt *backpressureTracker) getLimitInfo() (used int64, limit int64) {
- return bt.used, bt.limit
+func (bt *backpressureTracker) getLimitInfo() (used int64, limit float64) {
+ return bt.used, bt.currLimit()
}
type backpressureTrackerStatus struct { | 1 | // Copyright 2017 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"math"
"sync"
"time"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/kbfssync"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// backpressureTracker keeps track of the variables used to calculate
// backpressure. It keeps track of a generic resource (which can be
// either bytes or files).
//
// Let U be the (approximate) resource usage of the journal and F be
// the free resources. Then we want to enforce
//
// U <= min(k(U+F), L),
//
// where 0 < k <= 1 is some fraction, and L > 0 is the absolute
// resource usage limit. But in addition to that, we want to set
// thresholds 0 <= m <= M <= 1 such that we apply proportional
// backpressure (with a given maximum delay) when
//
// m <= max(U/(k(U+F)), U/L) <= M,
//
// which is equivalent to
//
// m <= U/min(k(U+F), L) <= M.
//
// Note that this type doesn't do any locking, so it's the caller's
// responsibility to do so.
type backpressureTracker struct {
// minThreshold is m in the above.
minThreshold float64
// maxThreshold is M in the above.
maxThreshold float64
// limitFrac is k in the above.
limitFrac float64
// limit is L in the above.
limit int64
// used is U in the above.
used int64
// free is F in the above.
free int64
// semaphoreMax is the last calculated value of currLimit(),
// which is min(k(U+F), L).
semaphoreMax int64
// The count of the semaphore is semaphoreMax - U - I, where I
// is the resource count that is currently "in-flight",
// i.e. between beforeBlockPut() and afterBlockPut() calls.
semaphore *kbfssync.Semaphore
}
func newBackpressureTracker(minThreshold, maxThreshold, limitFrac float64,
limit, initialFree int64) (*backpressureTracker, error) {
if minThreshold < 0.0 {
return nil, errors.Errorf("minThreshold=%f < 0.0",
minThreshold)
}
if maxThreshold < minThreshold {
return nil, errors.Errorf(
"maxThreshold=%f < minThreshold=%f",
maxThreshold, minThreshold)
}
if 1.0 < maxThreshold {
return nil, errors.Errorf("1.0 < maxThreshold=%f",
maxThreshold)
}
if limitFrac < 0.01 {
return nil, errors.Errorf("limitFrac=%f < 0.01", limitFrac)
}
if limitFrac > 1.0 {
return nil, errors.Errorf("limitFrac=%f > 1.0", limitFrac)
}
if limit < 0 {
return nil, errors.Errorf("limit=%d < 0", limit)
}
if initialFree < 0 {
return nil, errors.Errorf("initialFree=%d < 0", initialFree)
}
bt := &backpressureTracker{
minThreshold, maxThreshold, limitFrac, limit,
0, initialFree, 0, kbfssync.NewSemaphore(),
}
bt.updateSemaphoreMax()
return bt, nil
}
// currLimit returns the resource limit, taking into account the
// amount of free resources left. This is min(k(U+F), L).
func (bt backpressureTracker) currLimit() float64 {
// Calculate k(U+F), converting to float64 first to avoid
// overflow, although losing some precision in the process.
usedFloat := float64(bt.used)
freeFloat := float64(bt.free)
limit := bt.limitFrac * (usedFloat + freeFloat)
return math.Min(limit, float64(bt.limit))
}
func (bt backpressureTracker) usedFrac() float64 {
return float64(bt.used) / bt.currLimit()
}
// delayScale returns a number between 0 and 1, which should be
// multiplied with the maximum delay to get the backpressure delay to
// apply.
func (bt backpressureTracker) delayScale() float64 {
usedFrac := bt.usedFrac()
// We want the delay to be 0 if usedFrac <= m and the max
// delay if usedFrac >= M, so linearly interpolate the delay
// scale.
m := bt.minThreshold
M := bt.maxThreshold
return math.Min(1.0, math.Max(0.0, (usedFrac-m)/(M-m)))
}
// updateSemaphoreMax must be called whenever bt.used or bt.free
// changes.
func (bt *backpressureTracker) updateSemaphoreMax() {
newMax := int64(bt.currLimit())
delta := newMax - bt.semaphoreMax
// These operations are adjusting the *maximum* value of
// bt.semaphore.
if delta > 0 {
bt.semaphore.Release(delta)
} else if delta < 0 {
bt.semaphore.ForceAcquire(-delta)
}
bt.semaphoreMax = newMax
}
func (bt *backpressureTracker) onEnable(usedResources int64) (
availableResources int64) {
bt.used += usedResources
bt.updateSemaphoreMax()
if usedResources == 0 {
return bt.semaphore.Count()
}
return bt.semaphore.ForceAcquire(usedResources)
}
func (bt *backpressureTracker) onDisable(usedResources int64) {
bt.used -= usedResources
bt.updateSemaphoreMax()
if usedResources > 0 {
bt.semaphore.Release(usedResources)
}
}
func (bt *backpressureTracker) updateFree(freeResources int64) {
bt.free = freeResources
bt.updateSemaphoreMax()
}
func (bt *backpressureTracker) beforeBlockPut(
ctx context.Context, blockResources int64) (
availableResources int64, err error) {
return bt.semaphore.Acquire(ctx, blockResources)
}
func (bt *backpressureTracker) afterBlockPut(
blockResources int64, putData bool) {
if putData {
bt.used += blockResources
bt.updateSemaphoreMax()
} else {
bt.semaphore.Release(blockResources)
}
}
func (bt *backpressureTracker) onBlocksDelete(blockResources int64) {
if blockResources == 0 {
return
}
bt.semaphore.Release(blockResources)
bt.used -= blockResources
bt.updateSemaphoreMax()
}
func (bt *backpressureTracker) beforeDiskBlockCachePut(blockResources int64) (
availableResources int64) {
// TODO: Implement TryAcquire that automatically rolls back if it would go
// negative.
availableResources = bt.semaphore.ForceAcquire(blockResources)
if availableResources < 0 {
// We must roll back the acquisition of resources. We should still
// return the negative number, however, so the disk block cache
// knows how much to evict.
bt.afterBlockPut(blockResources, false)
}
return availableResources
}
func (bt *backpressureTracker) getLimitInfo() (used int64, limit int64) {
return bt.used, bt.limit
}
type backpressureTrackerStatus struct {
// Derived numbers.
UsedFrac float64
DelayScale float64
// Constants.
MinThreshold float64
MaxThreshold float64
LimitFrac float64
Limit int64
// Raw numbers.
Used int64
Free int64
Max int64
Count int64
}
func (bt *backpressureTracker) getStatus() backpressureTrackerStatus {
return backpressureTrackerStatus{
UsedFrac: bt.usedFrac(),
DelayScale: bt.delayScale(),
MinThreshold: bt.minThreshold,
MaxThreshold: bt.maxThreshold,
LimitFrac: bt.limitFrac,
Limit: bt.limit,
Used: bt.used,
Free: bt.free,
Max: bt.semaphoreMax,
Count: bt.semaphore.Count(),
}
}
// quotaBackpressureTracker keeps track of the variables used to
// calculate quota-related backpressure.
//
// Let U be the (approximate) unflushed bytes in the journal, R be the
// remote quota usage, and Q be the quota. Then we want to set
// thresholds 0 <= m <= M such that we apply proportional backpressure
// (with a given maximum delay) when
//
// m <= (U+R)/Q <= M.
//
// Note that this type doesn't do any locking, so it's the caller's
// responsibility to do so.
type quotaBackpressureTracker struct {
// minThreshold is m in the above.
minThreshold float64
// maxThreshold is M in the above.
maxThreshold float64
// unflushedBytes is U in the above.
unflushedBytes int64
// remoteUsedBytes is R in the above.
remoteUsedBytes int64
// quotaBytes is Q in the above.
quotaBytes int64
}
func newQuotaBackpressureTracker(minThreshold, maxThreshold float64) (
*quotaBackpressureTracker, error) {
if minThreshold < 0.0 {
return nil, errors.Errorf("minThreshold=%f < 0.0",
minThreshold)
}
if maxThreshold < minThreshold {
return nil, errors.Errorf(
"maxThreshold=%f < minThreshold=%f",
maxThreshold, minThreshold)
}
qbt := "aBackpressureTracker{
minThreshold, maxThreshold, 0, 0, math.MaxInt64,
}
return qbt, nil
}
func (qbt quotaBackpressureTracker) usedFrac() float64 {
return (float64(qbt.unflushedBytes) + float64(qbt.remoteUsedBytes)) /
float64(qbt.quotaBytes)
}
// delayScale returns a number between 0 and 1, which should be
// multiplied with the maximum delay to get the backpressure delay to
// apply.
func (qbt quotaBackpressureTracker) delayScale() float64 {
usedFrac := qbt.usedFrac()
// We want the delay to be 0 if usedFrac <= m and the max
// delay if usedFrac >= M, so linearly interpolate the delay
// scale.
m := qbt.minThreshold
M := qbt.maxThreshold
return math.Min(1.0, math.Max(0.0, (usedFrac-m)/(M-m)))
}
func (qbt quotaBackpressureTracker) getQuotaInfo() (
usedQuotaBytes, quotaBytes int64) {
usedQuotaBytes = qbt.unflushedBytes + qbt.remoteUsedBytes
quotaBytes = qbt.quotaBytes
return usedQuotaBytes, quotaBytes
}
func (qbt *quotaBackpressureTracker) onJournalEnable(unflushedBytes int64) {
qbt.unflushedBytes += unflushedBytes
}
func (qbt *quotaBackpressureTracker) onJournalDisable(unflushedBytes int64) {
qbt.unflushedBytes -= unflushedBytes
}
func (qbt *quotaBackpressureTracker) updateRemote(
remoteUsedBytes, quotaBytes int64) {
qbt.remoteUsedBytes = remoteUsedBytes
qbt.quotaBytes = quotaBytes
}
func (qbt *quotaBackpressureTracker) afterBlockPut(
blockBytes int64, putData bool) {
if putData {
qbt.unflushedBytes += blockBytes
}
}
func (qbt *quotaBackpressureTracker) onBlocksFlush(blockBytes int64) {
qbt.unflushedBytes -= blockBytes
}
type quotaBackpressureTrackerStatus struct {
// Derived numbers.
UsedFrac float64
DelayScale float64
// Constants.
MinThreshold float64
MaxThreshold float64
// Raw numbers.
UnflushedBytes int64
RemoteUsedBytes int64
QuotaBytes int64
}
func (qbt *quotaBackpressureTracker) getStatus() quotaBackpressureTrackerStatus {
return quotaBackpressureTrackerStatus{
UsedFrac: qbt.usedFrac(),
DelayScale: qbt.delayScale(),
MinThreshold: qbt.minThreshold,
MaxThreshold: qbt.maxThreshold,
UnflushedBytes: qbt.unflushedBytes,
RemoteUsedBytes: qbt.remoteUsedBytes,
QuotaBytes: qbt.quotaBytes,
}
}
// journalTracker aggregates all the journal trackers. This type also
// doesn't do any locking, so it's the caller's responsibility to do
// so.
type journalTracker struct {
byte, file *backpressureTracker
quota *quotaBackpressureTracker
}
func newJournalTracker(
minThreshold, maxThreshold, quotaMinThreshold, quotaMaxThreshold, journalFrac float64,
byteLimit, fileLimit, freeBytes, freeFiles int64) (
journalTracker, error) {
// byteLimit and fileLimit must be scaled by the proportion of
// the limit that the journal should consume. Add 0.5 to round
// up.
journalByteLimit := int64((float64(byteLimit) * journalFrac) + 0.5)
byteTracker, err := newBackpressureTracker(
minThreshold, maxThreshold, journalFrac, journalByteLimit,
freeBytes)
if err != nil {
return journalTracker{}, err
}
// the fileLimit is only used by the journal, so in theory we
// don't have to scale it by journalFrac, but in the interest
// of consistency with how we treat the byteLimit, we do so
// anyway. Add 0.5 to round up.
journalFileLimit := int64((float64(fileLimit) * journalFrac) + 0.5)
fileTracker, err := newBackpressureTracker(
minThreshold, maxThreshold, journalFrac, journalFileLimit,
freeFiles)
if err != nil {
return journalTracker{}, err
}
journalQuotaTracker, err := newQuotaBackpressureTracker(
quotaMinThreshold, quotaMaxThreshold)
if err != nil {
return journalTracker{}, err
}
return journalTracker{byteTracker, fileTracker, journalQuotaTracker}, nil
}
type jtSnapshot struct {
used int64
free int64
max int64
count int64
}
func (jt journalTracker) getSnapshotsForTest() (
byteSnapshot, fileSnapshot, quotaSnapshot jtSnapshot) {
byteSnapshot = jtSnapshot{jt.byte.used, jt.byte.free,
jt.byte.semaphoreMax, jt.byte.semaphore.Count()}
fileSnapshot = jtSnapshot{jt.file.used, jt.file.free,
jt.file.semaphoreMax, jt.file.semaphore.Count()}
usedQuotaBytes, quotaBytes := jt.quota.getQuotaInfo()
free := quotaBytes - usedQuotaBytes
quotaSnapshot = jtSnapshot{usedQuotaBytes, free, 0, 0}
return byteSnapshot, fileSnapshot, quotaSnapshot
}
func (jt journalTracker) onEnable(storedBytes, unflushedBytes, files int64) (
availableBytes, availableFiles int64) {
// storedBytes should be >= unflushedBytes. But it's not too
// bad to let it go through.
availableBytes = jt.byte.onEnable(storedBytes)
availableFiles = jt.file.onEnable(files)
jt.quota.onJournalEnable(unflushedBytes)
return availableBytes, availableFiles
}
func (jt journalTracker) onDisable(storedBytes, unflushedBytes, files int64) {
// As above, storedBytes should be >= unflushedBytes. Let it
// go through here, too.
jt.byte.onDisable(storedBytes)
jt.file.onDisable(files)
jt.quota.onJournalDisable(unflushedBytes)
}
func (jt journalTracker) getDelayScale() float64 {
byteDelayScale := jt.byte.delayScale()
fileDelayScale := jt.file.delayScale()
quotaDelayScale := jt.quota.delayScale()
delayScale := math.Max(
math.Max(byteDelayScale, fileDelayScale), quotaDelayScale)
return delayScale
}
func (jt journalTracker) updateFree(
freeBytes, otherUsedBytes, freeFiles int64) {
// We calculate the total free bytes by adding up the reported
// free bytes, the journal used bytes, *and* any other used
// bytes (e.g., the disk cache). For now, we hack this by
// lumping the other used bytes with the reported free bytes.
//
// TODO: Keep track of other used bytes separately.
jt.byte.updateFree(freeBytes + otherUsedBytes)
jt.file.updateFree(freeFiles)
}
func (jt journalTracker) updateRemote(remoteUsedBytes, quotaBytes int64) {
jt.quota.updateRemote(remoteUsedBytes, quotaBytes)
}
func (jt journalTracker) getSemaphoreCounts() (byteCount, fileCount int64) {
return jt.byte.semaphore.Count(), jt.file.semaphore.Count()
}
func (jt journalTracker) beforeBlockPut(
ctx context.Context, blockBytes, blockFiles int64) (
availableBytes, availableFiles int64, err error) {
availableBytes, err = jt.byte.beforeBlockPut(ctx, blockBytes)
if err != nil {
return availableBytes, jt.file.semaphore.Count(), err
}
defer func() {
if err != nil {
jt.byte.afterBlockPut(blockBytes, false)
availableBytes = jt.byte.semaphore.Count()
}
}()
availableFiles, err = jt.file.beforeBlockPut(ctx, blockFiles)
if err != nil {
return availableBytes, availableFiles, err
}
return availableBytes, availableFiles, nil
}
func (jt journalTracker) afterBlockPut(
blockBytes, blockFiles int64, putData bool) {
jt.byte.afterBlockPut(blockBytes, putData)
jt.file.afterBlockPut(blockFiles, putData)
jt.quota.afterBlockPut(blockBytes, putData)
}
func (jt journalTracker) onBlocksFlush(blockBytes int64) {
jt.quota.onBlocksFlush(blockBytes)
}
func (jt journalTracker) onBlocksDelete(blockBytes, blockFiles int64) {
jt.byte.onBlocksDelete(blockBytes)
jt.file.onBlocksDelete(blockFiles)
}
func (jt journalTracker) getUsedBytes() int64 {
return jt.byte.used
}
func (jt journalTracker) getStatusLine() string {
return fmt.Sprintf("journalBytes=%d, freeBytes=%d, "+
"journalFiles=%d, freeFiles=%d, "+
"quotaUnflushedBytes=%d, quotaRemoteUsedBytes=%d, "+
"quotaBytes=%d",
jt.byte.used, jt.byte.free,
jt.file.used, jt.file.free,
jt.quota.unflushedBytes, jt.quota.remoteUsedBytes,
jt.quota.quotaBytes)
}
func (jt journalTracker) getQuotaInfo() (usedQuotaBytes, quotaBytes int64) {
return jt.quota.getQuotaInfo()
}
func (jt journalTracker) getDiskLimitInfo() (
usedBytes, limitBytes, usedFiles, limitFiles int64) {
usedBytes, limitBytes = jt.byte.getLimitInfo()
usedFiles, limitFiles = jt.file.getLimitInfo()
return usedBytes, limitBytes, usedFiles, limitFiles
}
type journalTrackerStatus struct {
ByteStatus backpressureTrackerStatus
FileStatus backpressureTrackerStatus
QuotaStatus quotaBackpressureTrackerStatus
}
func (jt journalTracker) getStatus() journalTrackerStatus {
return journalTrackerStatus{
ByteStatus: jt.byte.getStatus(),
FileStatus: jt.file.getStatus(),
QuotaStatus: jt.quota.getStatus(),
}
}
// backpressureDiskLimiter is an implementation of diskLimiter that
// uses backpressure to slow down block puts before they hit the disk
// limits.
type backpressureDiskLimiter struct {
log logger.Logger
maxDelay time.Duration
delayFn func(context.Context, time.Duration) error
freeBytesAndFilesFn func() (int64, int64, error)
quotaFn func(ctx context.Context) (int64, int64)
// lock protects everything in journalTracker and
// diskCacheByteTracker, including the (implicit) maximum
// values of the semaphores, but not the actual semaphores
// themselves.
lock sync.RWMutex
journalTracker journalTracker
diskCacheByteTracker *backpressureTracker
}
var _ DiskLimiter = (*backpressureDiskLimiter)(nil)
type backpressureDiskLimiterParams struct {
// minThreshold is the fraction of the free bytes/files at
// which we start to apply backpressure.
minThreshold float64
// maxThreshold is the fraction of the free bytes/files at
// which we max out on backpressure.
maxThreshold float64
// quotaMinThreshold is the fraction of used quota at which we
// start to apply backpressure.
quotaMinThreshold float64
// quotaMaxThreshold is the fraction of used quota at which we
// max out on backpressure.
quotaMaxThreshold float64
// journalFrac is fraction of the free bytes/files that the
// journal is allowed to use.
journalFrac float64
// diskCacheFrac is the fraction of the free bytes that the
// disk cache is allowed to use. The disk cache doesn't store
// individual files.
diskCacheFrac float64
// byteLimit is the total cap for free bytes. The journal will
// be allowed to use at most journalFrac*byteLimit, and the
// disk cache will be allowed to use at most
// diskCacheFrac*byteLimit.
byteLimit int64
// maxFreeFiles is the cap for free files. The journal will be
// allowed to use at most journalFrac*fileLimit. This limit
// doesn't apply to the disk cache, since it doesn't store
// individual files.
fileLimit int64
// maxDelay is the maximum delay used for backpressure.
maxDelay time.Duration
// delayFn is a function that takes a context and a duration
// and returns after sleeping for that duration, or if the
// context is cancelled. Overridable for testing.
delayFn func(context.Context, time.Duration) error
// freeBytesAndFilesFn is a function that returns the current
// free bytes and files on the disk containing the
// journal/disk cache directory. Overridable for testing.
freeBytesAndFilesFn func() (int64, int64, error)
// quotaFn is a function that returns the current used and
// total quota bytes. Overridable for testing.
quotaFn func(context.Context) (int64, int64)
}
// defaultDiskLimitMaxDelay is the maximum amount to delay a block
// put. Exposed as a constant as it is used by
// tlfJournalConfigAdapter.
const defaultDiskLimitMaxDelay = 10 * time.Second
func makeDefaultBackpressureDiskLimiterParams(
storageRoot string,
quotaUsage *EventuallyConsistentQuotaUsage) backpressureDiskLimiterParams {
return backpressureDiskLimiterParams{
// Start backpressure when 50% of free bytes or files
// are used...
minThreshold: 0.5,
// ...and max it out at 95% (slightly less than 100%
// to allow for inaccuracies in estimates).
maxThreshold: 0.95,
// Start backpressure when we've used 100% of our quota...
quotaMinThreshold: 1.0,
// ...and max it out at 120% of quota.
quotaMaxThreshold: 1.2,
// Cap journal usage to 85% of free bytes and files...
journalFrac: 0.85,
// ...and cap disk cache usage to 10% of free
// bytes. The disk cache doesn't store individual
// files.
diskCacheFrac: 0.10,
// Set the byte limit to 200 GiB, which translates to
// having the journal take up at most 30 GiB, and the
// disk cache to take up at most 20 GiB.
byteLimit: 200 * 1024 * 1024 * 1024,
// Set the file limit to 6 million files, which
// translates to having the journal take up at most
// 900k files.
fileLimit: 6000000,
maxDelay: defaultDiskLimitMaxDelay,
delayFn: defaultDoDelay,
freeBytesAndFilesFn: func() (int64, int64, error) {
return defaultGetFreeBytesAndFiles(storageRoot)
},
quotaFn: func(ctx context.Context) (int64, int64) {
timestamp, usageBytes, limitBytes, err :=
quotaUsage.Get(ctx, 1*time.Minute, math.MaxInt64)
if err != nil {
return 0, math.MaxInt64
}
if timestamp.IsZero() {
return 0, math.MaxInt64
}
return usageBytes, limitBytes
},
}
}
// newBackpressureDiskLimiter constructs a new backpressureDiskLimiter
// with the given params.
func newBackpressureDiskLimiter(
log logger.Logger, params backpressureDiskLimiterParams) (
*backpressureDiskLimiter, error) {
freeBytes, freeFiles, err := params.freeBytesAndFilesFn()
if err != nil {
return nil, err
}
journalTracker, err := newJournalTracker(
params.minThreshold, params.maxThreshold,
params.quotaMinThreshold, params.quotaMaxThreshold,
params.journalFrac, params.byteLimit, params.fileLimit,
freeBytes, freeFiles)
if err != nil {
return nil, err
}
// byteLimit must be scaled by the proportion of the limit
// that the disk journal should consume. Add 0.5 to round up.
diskCacheByteLimit := int64((float64(params.byteLimit) * params.diskCacheFrac) + 0.5)
diskCacheByteTracker, err := newBackpressureTracker(
1.0, 1.0, params.diskCacheFrac, diskCacheByteLimit, freeBytes)
bdl := &backpressureDiskLimiter{
log, params.maxDelay, params.delayFn,
params.freeBytesAndFilesFn, params.quotaFn, sync.RWMutex{},
journalTracker, diskCacheByteTracker,
}
return bdl, nil
}
// defaultDoDelay uses a timer to delay by the given duration.
func defaultDoDelay(ctx context.Context, delay time.Duration) error {
if delay == 0 {
return nil
}
timer := time.NewTimer(delay)
select {
case <-timer.C:
return nil
case <-ctx.Done():
timer.Stop()
return errors.WithStack(ctx.Err())
}
}
func defaultGetFreeBytesAndFiles(path string) (int64, int64, error) {
// getDiskLimits returns availableBytes and availableFiles,
// but we want to avoid confusing that with availBytes and
// availFiles in the sense of the semaphore value.
freeBytes, freeFiles, err := getDiskLimits(path)
if err != nil {
return 0, 0, err
}
if freeBytes > uint64(math.MaxInt64) {
freeBytes = math.MaxInt64
}
if freeFiles > uint64(math.MaxInt64) {
freeFiles = math.MaxInt64
}
return int64(freeBytes), int64(freeFiles), nil
}
func (bdl *backpressureDiskLimiter) getJournalSnapshotsForTest() (
byteSnapshot, fileSnapshot, quotaSnapshot jtSnapshot) {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
return bdl.journalTracker.getSnapshotsForTest()
}
func (bdl *backpressureDiskLimiter) onJournalEnable(
ctx context.Context,
journalStoredBytes, journalUnflushedBytes, journalFiles int64) (
availableBytes, availableFiles int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
return bdl.journalTracker.onEnable(
journalStoredBytes, journalUnflushedBytes, journalFiles)
}
func (bdl *backpressureDiskLimiter) onJournalDisable(
ctx context.Context,
journalStoredBytes, journalUnflushedBytes, journalFiles int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.journalTracker.onDisable(
journalStoredBytes, journalUnflushedBytes, journalFiles)
}
func (bdl *backpressureDiskLimiter) onDiskBlockCacheEnable(ctx context.Context,
diskCacheBytes int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.diskCacheByteTracker.onEnable(diskCacheBytes)
}
func (bdl *backpressureDiskLimiter) onDiskBlockCacheDisable(ctx context.Context,
diskCacheBytes int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.diskCacheByteTracker.onDisable(diskCacheBytes)
}
func (bdl *backpressureDiskLimiter) getDelayLocked(
ctx context.Context, now time.Time) time.Duration {
delayScale := bdl.journalTracker.getDelayScale()
// Set maxDelay to min(bdl.maxDelay, time until deadline - 1s).
maxDelay := bdl.maxDelay
if deadline, ok := ctx.Deadline(); ok {
// Subtract a second to allow for some slack.
remainingTime := deadline.Sub(now) - time.Second
if remainingTime < maxDelay {
maxDelay = remainingTime
}
}
return time.Duration(delayScale * float64(maxDelay))
}
func (bdl *backpressureDiskLimiter) onBeforeBlockPutError(err error) (
availableBytes, availableFiles int64, _ error) {
availableBytes, availableFiles =
bdl.journalTracker.getSemaphoreCounts()
return availableBytes, availableFiles, err
}
func (bdl *backpressureDiskLimiter) beforeBlockPut(
ctx context.Context, blockBytes, blockFiles int64) (
availableBytes, availableFiles int64, err error) {
if blockBytes == 0 {
// Better to return an error than to panic in Acquire.
return bdl.onBeforeBlockPutError(errors.New(
"backpressureDiskLimiter.beforeBlockPut called with 0 blockBytes"))
}
if blockFiles == 0 {
// Better to return an error than to panic in Acquire.
return bdl.onBeforeBlockPutError(errors.New(
"backpressureDiskLimiter.beforeBlockPut called with 0 blockFiles"))
}
delay, err := func() (time.Duration, error) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
// Call this under lock to avoid problems with its
// return values going stale while blocking on
// bdl.lock.
freeBytes, freeFiles, err := bdl.freeBytesAndFilesFn()
if err != nil {
return 0, err
}
bdl.journalTracker.updateFree(
freeBytes, bdl.diskCacheByteTracker.used, freeFiles)
remoteUsedBytes, quotaBytes := bdl.quotaFn(ctx)
bdl.journalTracker.updateRemote(remoteUsedBytes, quotaBytes)
delay := bdl.getDelayLocked(ctx, time.Now())
if delay > 0 {
bdl.log.CDebugf(ctx, "Delaying block put of %d bytes and %d files by %f s (%s)",
blockBytes, blockFiles, delay.Seconds(),
bdl.journalTracker.getStatusLine())
}
return delay, nil
}()
if err != nil {
return bdl.onBeforeBlockPutError(err)
}
// TODO: Update delay if any variables change (i.e., we
// suddenly free up a lot of space).
err = bdl.delayFn(ctx, delay)
if err != nil {
return bdl.onBeforeBlockPutError(err)
}
return bdl.journalTracker.beforeBlockPut(ctx, blockBytes, blockFiles)
}
func (bdl *backpressureDiskLimiter) afterBlockPut(
ctx context.Context, blockBytes, blockFiles int64, putData bool) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.journalTracker.afterBlockPut(blockBytes, blockFiles, putData)
}
func (bdl *backpressureDiskLimiter) onBlocksFlush(
ctx context.Context, blockBytes int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.journalTracker.onBlocksFlush(blockBytes)
}
func (bdl *backpressureDiskLimiter) onBlocksDelete(
ctx context.Context, blockBytes, blockFiles int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.journalTracker.onBlocksDelete(blockBytes, blockFiles)
}
func (bdl *backpressureDiskLimiter) onDiskBlockCacheDelete(
ctx context.Context, blockBytes int64) {
if blockBytes == 0 {
return
}
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.diskCacheByteTracker.onBlocksDelete(blockBytes)
}
func (bdl *backpressureDiskLimiter) beforeDiskBlockCachePut(
ctx context.Context, blockBytes int64) (
availableBytes int64, err error) {
if blockBytes == 0 {
// Better to return an error than to panic in ForceAcquire.
return 0, errors.New("backpressureDiskLimiter.beforeDiskBlockCachePut" +
" called with 0 blockBytes")
}
bdl.lock.Lock()
defer bdl.lock.Unlock()
// Call this under lock to avoid problems with its return
// values going stale while blocking on bdl.lock.
freeBytes, _, err := bdl.freeBytesAndFilesFn()
if err != nil {
return 0, err
}
// We calculate the total free bytes by adding up the reported
// free bytes, the disk cache used bytes, *and* any other used
// bytes (e.g., the journal cache). For now, we hack this by
// lumping the other used bytes with the reported free bytes.
//
// TODO: Keep track of other used bytes separately.
bdl.diskCacheByteTracker.updateFree(
freeBytes + bdl.journalTracker.getUsedBytes())
return bdl.diskCacheByteTracker.beforeDiskBlockCachePut(blockBytes), nil
}
func (bdl *backpressureDiskLimiter) afterDiskBlockCachePut(
ctx context.Context, blockBytes int64, putData bool) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.diskCacheByteTracker.afterBlockPut(blockBytes, putData)
}
func (bdl *backpressureDiskLimiter) getQuotaInfo() (
usedQuotaBytes, quotaBytes int64) {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
return bdl.journalTracker.getQuotaInfo()
}
func (bdl *backpressureDiskLimiter) getDiskLimitInfo() (
usedBytes, limitBytes, usedFiles, limitFiles int64) {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
return bdl.journalTracker.getDiskLimitInfo()
}
type backpressureDiskLimiterStatus struct {
Type string
// Derived stats.
CurrentDelaySec float64
JournalTrackerStatus journalTrackerStatus
DiskCacheByteStatus backpressureTrackerStatus
}
func (bdl *backpressureDiskLimiter) getStatus() interface{} {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
currentDelay := bdl.getDelayLocked(context.Background(), time.Now())
return backpressureDiskLimiterStatus{
Type: "BackpressureDiskLimiter",
CurrentDelaySec: currentDelay.Seconds(),
JournalTrackerStatus: bdl.journalTracker.getStatus(),
DiskCacheByteStatus: bdl.diskCacheByteTracker.getStatus(),
}
}
| 1 | 16,812 | Would it be too complicated to refactor `currLimit` to be an `int64`? | keybase-kbfs | go |
@@ -23,6 +23,13 @@ module RSpec
def assertions
@assertions ||= 0
end
+
+ RSPEC_SKIP_IMPLEMENTATION = ::RSpec::Core::Pending.instance_method(:skip)
+ # Minitest::Assertions has it's own `skip`, we need to make sure
+ # RSpec::Core::Pending#skip is used instead.
+ def skip(*args)
+ RSPEC_SKIP_IMPLEMENTATION.bind(self).call(*args)
+ end
end
end
end | 1 | begin
# Only the minitest 5.x gem includes the minitest.rb and assertions.rb files.
require 'minitest'
require 'minitest/assertions'
rescue LoadError
# We must be using Ruby Core's MiniTest or the Minitest gem 4.x.
require 'minitest/unit'
Minitest = MiniTest
end
module RSpec
module Core
# @private
module MinitestAssertionsAdapter
include ::Minitest::Assertions
# Minitest 5.x requires this accessor to be available. See
# https://github.com/seattlerb/minitest/blob/38f0a5fcbd9c37c3f80a3eaad4ba84d3fc9947a0/lib/minitest/assertions.rb#L8
#
# It is not required for other extension libraries, and RSpec does not
# report or make this information available to formatters.
attr_writer :assertions
def assertions
@assertions ||= 0
end
end
end
end
| 1 | 14,439 | Would it be more performant to just `include ::RSpec::Core::Pending` here? /cc @myronmarston | rspec-rspec-core | rb |
@@ -92,6 +92,10 @@ type IoChaosSpec struct {
// IoChaosStatus defines the observed state of IoChaos
type IoChaosStatus struct {
ChaosStatus `json:",inline"`
+
+ // Instances always specifies podnetworkchaos generation or empty
+ // +optional
+ Instances map[string]int64 `json:"instances,omitempty"`
}
func (obj *IoChaos) GetSelectorSpecs() map[string]interface{} { | 1 | // Copyright 2019 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// +kubebuilder:object:root=true
// +chaos-mesh:base
// IoChaos is the Schema for the iochaos API
type IoChaos struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IoChaosSpec `json:"spec,omitempty"`
Status IoChaosStatus `json:"status,omitempty"`
}
// IoChaosSpec defines the desired state of IoChaos
type IoChaosSpec struct {
ContainerSelector `json:",inline"`
// Action defines the specific pod chaos action.
// Supported action: latency / fault / attrOverride / mistake
// +kubebuilder:validation:Enum=latency;fault;attrOverride;mistake
Action IoChaosType `json:"action"`
// Delay defines the value of I/O chaos action delay.
// A delay string is a possibly signed sequence of
// decimal numbers, each with optional fraction and a unit suffix,
// such as "300ms".
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
// +optional
Delay string `json:"delay,omitempty"`
// Errno defines the error code that returned by I/O action.
// refer to: https://www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html
// +optional
Errno uint32 `json:"errno,omitempty"`
// Attr defines the overrided attribution
// +optional
Attr *AttrOverrideSpec `json:"attr,omitempty"`
// Mistake defines what types of incorrectness are injected to IO operations
// +optional
Mistake *MistakeSpec `json:"mistake,omitempty"`
// Path defines the path of files for injecting I/O chaos action.
// +optional
Path string `json:"path,omitempty"`
// Methods defines the I/O methods for injecting I/O chaos action.
// default: all I/O methods.
// +optional
Methods []IoMethod `json:"methods,omitempty" faker:"ioMethods"`
// Percent defines the percentage of injection errors and provides a number from 0-100.
// default: 100.
// +optional
Percent int `json:"percent,omitempty"`
// VolumePath represents the mount path of injected volume
VolumePath string `json:"volumePath"`
// Duration represents the duration of the chaos action.
// It is required when the action is `PodFailureAction`.
// A duration string is a possibly signed sequence of
// decimal numbers, each with optional fraction and a unit suffix,
// such as "300ms", "-1.5h" or "2h45m".
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
// +optional
Duration *string `json:"duration,omitempty"`
}
// IoChaosStatus defines the observed state of IoChaos
type IoChaosStatus struct {
ChaosStatus `json:",inline"`
}
func (obj *IoChaos) GetSelectorSpecs() map[string]interface{} {
return map[string]interface{}{
".": &obj.Spec.ContainerSelector,
}
}
| 1 | 21,820 | Should be podiochaos? | chaos-mesh-chaos-mesh | go |
@@ -35,6 +35,10 @@ import api
import guiHelper
import winVersion
+# Temporary: #8599: add cp65001 codec
+# #7105: upgrading to python 3 should fix this issue. See https://bugs.python.org/issue13216
+codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
+
try:
import updateCheck
except RuntimeError: | 1 | # -*- coding: UTF-8 -*-
#gui/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Mesar Hameed, Joseph Lee, Thomas Stivers, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import time
import os
import sys
import threading
import codecs
import ctypes
import weakref
import wx
import wx.adv
import globalVars
import tones
import ui
from logHandler import log
import config
import versionInfo
import speech
import queueHandler
import core
import guiHelper
from settingsDialogs import *
import speechDictHandler
import languageHandler
import keyboardHandler
import logViewer
import speechViewer
import winUser
import api
import guiHelper
import winVersion
try:
import updateCheck
except RuntimeError:
updateCheck = None
### Constants
NVDA_PATH = os.getcwdu()
ICON_PATH=os.path.join(NVDA_PATH, "images", "nvda.ico")
DONATE_URL = "http://www.nvaccess.org/donate/"
### Globals
mainFrame = None
isInMessageBox = False
def getDocFilePath(fileName, localized=True):
if not getDocFilePath.rootPath:
if hasattr(sys, "frozen"):
getDocFilePath.rootPath = os.path.join(NVDA_PATH, "documentation")
else:
getDocFilePath.rootPath = os.path.abspath(os.path.join("..", "user_docs"))
if localized:
lang = languageHandler.getLanguage()
tryLangs = [lang]
if "_" in lang:
# This locale has a sub-locale, but documentation might not exist for the sub-locale, so try stripping it.
tryLangs.append(lang.split("_")[0])
# If all else fails, use English.
tryLangs.append("en")
fileName, fileExt = os.path.splitext(fileName)
for tryLang in tryLangs:
tryDir = os.path.join(getDocFilePath.rootPath, tryLang)
if not os.path.isdir(tryDir):
continue
# Some out of date translations might include .txt files which are now .html files in newer translations.
# Therefore, ignore the extension and try both .html and .txt.
for tryExt in ("html", "txt"):
tryPath = os.path.join(tryDir, "%s.%s" % (fileName, tryExt))
if os.path.isfile(tryPath):
return tryPath
else:
# Not localized.
if not hasattr(sys, "frozen") and fileName in ("copying.txt", "contributors.txt"):
# If running from source, these two files are in the root dir.
return os.path.join(NVDA_PATH, "..", fileName)
else:
return os.path.join(getDocFilePath.rootPath, fileName)
getDocFilePath.rootPath = None
class MainFrame(wx.Frame):
def __init__(self):
style = wx.DEFAULT_FRAME_STYLE ^ wx.MAXIMIZE_BOX ^ wx.MINIMIZE_BOX | wx.FRAME_NO_TASKBAR
super(MainFrame, self).__init__(None, wx.ID_ANY, versionInfo.name, size=(1,1), style=style)
self.Bind(wx.EVT_CLOSE, self.onExitCommand)
self.sysTrayIcon = SysTrayIcon(self)
#: The focus before the last popup or C{None} if unknown.
#: This is only valid before L{prePopup} is called,
#: so it should be used as early as possible in any popup that needs it.
#: @type: L{NVDAObject}
self.prevFocus = None
#: The focus ancestors before the last popup or C{None} if unknown.
#: @type: list of L{NVDAObject}
self.prevFocusAncestors = None
# If NVDA has the uiAccess privilege, it can always set the foreground window.
if not config.hasUiAccess():
# This makes Windows return to the previous foreground window and also seems to allow NVDA to be brought to the foreground.
self.Show()
self.Hide()
if winUser.isWindowVisible(self.Handle):
# HACK: Work around a wx bug where Hide() doesn't actually hide the window,
# but IsShown() returns False and Hide() again doesn't fix it.
# This seems to happen if the call takes too long.
self.Show()
self.Hide()
def Destroy(self):
self.sysTrayIcon.Destroy()
super(MainFrame, self).Destroy()
def prePopup(self):
"""Prepare for a popup.
This should be called before any dialog or menu which should pop up for the user.
L{postPopup} should be called after the dialog or menu has been shown.
@postcondition: A dialog or menu may be shown.
"""
nvdaPid = os.getpid()
focus = api.getFocusObject()
if focus.processID != nvdaPid:
self.prevFocus = focus
self.prevFocusAncestors = api.getFocusAncestors()
if winUser.getWindowThreadProcessID(winUser.getForegroundWindow())[0] != nvdaPid:
# This process is not the foreground process, so bring it to the foreground.
self.Raise()
def postPopup(self):
"""Clean up after a popup dialog or menu.
This should be called after a dialog or menu was popped up for the user.
"""
self.prevFocus = None
self.prevFocusAncestors = None
if not winUser.isWindowVisible(winUser.getForegroundWindow()):
# The current foreground window is invisible, so we want to return to the previous foreground window.
# Showing and hiding our main window seems to achieve this.
self.Show()
self.Hide()
def showGui(self):
# The menu pops up at the location of the mouse, which means it pops up at an unpredictable location.
# Therefore, move the mouse to the centre of the screen so that the menu will always pop up there.
left, top, width, height = api.getDesktopObject().location
x = width / 2
y = height / 2
winUser.setCursorPos(x, y)
self.evaluateUpdatePendingUpdateMenuItemCommand()
self.sysTrayIcon.onActivate(None)
def onRevertToSavedConfigurationCommand(self,evt):
queueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration)
# Translators: Reported when last saved configuration has been applied by using revert to saved configuration option in NVDA menu.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration applied"))
def onRevertToDefaultConfigurationCommand(self,evt):
queueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration,factoryDefaults=True)
# Translators: Reported when configuration has been restored to defaults by using restore configuration to factory defaults item in NVDA menu.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration restored to factory defaults"))
def onSaveConfigurationCommand(self,evt):
if globalVars.appArgs.secure:
# Translators: Reported when current configuration cannot be saved while NVDA is running in secure mode such as in Windows login screen.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Cannot save configuration - NVDA in secure mode"))
return
try:
config.conf.save()
# Translators: Reported when current configuration has been saved.
queueHandler.queueFunction(queueHandler.eventQueue,ui.message,_("Configuration saved"))
except:
# Translators: Message shown when current configuration cannot be saved such as when running NVDA from a CD.
messageBox(_("Could not save configuration - probably read only file system"),_("Error"),wx.OK | wx.ICON_ERROR)
def _popupSettingsDialog(self, dialog, *args, **kwargs):
if isInMessageBox:
return
self.prePopup()
try:
dialog(self, *args, **kwargs).Show()
except SettingsDialog.MultiInstanceError:
# Translators: Message shown when attempting to open another NVDA settings dialog when one is already open
# (example: when trying to open keyboard settings when general settings dialog is open).
messageBox(_("An NVDA settings dialog is already open. Please close it first."),_("Error"),style=wx.OK | wx.ICON_ERROR)
except MultiCategorySettingsDialog.CategoryUnavailableError:
# Translators: Message shown when trying to open an unavailable category of a multi category settings dialog
# (example: when trying to open touch interaction settings on an unsupported system).
messageBox(_("The settings panel you tried to open is unavailable on this system."),_("Error"),style=wx.OK | wx.ICON_ERROR)
self.postPopup()
def onDefaultDictionaryCommand(self,evt):
# Translators: Title for default speech dictionary dialog.
self._popupSettingsDialog(DictionaryDialog,_("Default dictionary"),speechDictHandler.dictionaries["default"])
def onVoiceDictionaryCommand(self,evt):
# Translators: Title for voice dictionary for the current voice such as current eSpeak variant.
self._popupSettingsDialog(DictionaryDialog,_("Voice dictionary (%s)")%speechDictHandler.dictionaries["voice"].fileName,speechDictHandler.dictionaries["voice"])
def onTemporaryDictionaryCommand(self,evt):
# Translators: Title for temporary speech dictionary dialog (the voice dictionary that is active as long as NvDA is running).
self._popupSettingsDialog(DictionaryDialog,_("Temporary dictionary"),speechDictHandler.dictionaries["temp"])
def onExecuteUpdateCommand(self, evt):
if updateCheck and updateCheck.isPendingUpdate():
updateCheck.executeUpdate()
def evaluateUpdatePendingUpdateMenuItemCommand(self):
try:
self.sysTrayIcon.menu.Remove(self.sysTrayIcon.installPendingUpdateMenuItem)
except:
log.debug("Error while removing pending update menu item", exc_info=True)
pass
if not globalVars.appArgs.secure and updateCheck and updateCheck.isPendingUpdate():
self.sysTrayIcon.menu.Insert(self.sysTrayIcon.installPendingUpdateMenuItemPos,self.sysTrayIcon.installPendingUpdateMenuItem)
def onExitCommand(self, evt):
if config.conf["general"]["askToExit"]:
self.prePopup()
d = ExitDialog(self)
d.Raise()
d.Show()
self.postPopup()
else:
wx.GetApp().ExitMainLoop()
def onNVDASettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog)
def onGeneralSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, GeneralSettingsPanel)
def onSelectSynthesizerCommand(self,evt):
self._popupSettingsDialog(SynthesizerSelectionDialog)
def onSpeechSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, SpeechSettingsPanel)
def onSelectBrailleDisplayCommand(self,evt):
self._popupSettingsDialog(BrailleDisplaySelectionDialog)
def onBrailleSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, BrailleSettingsPanel)
def onKeyboardSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, KeyboardSettingsPanel)
def onMouseSettingsCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, MouseSettingsPanel)
def onTouchInteractionCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, TouchInteractionPanel)
def onReviewCursorCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, ReviewCursorPanel)
def onInputCompositionCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, InputCompositionPanel)
def onObjectPresentationCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, ObjectPresentationPanel)
def onBrowseModeCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, BrowseModePanel)
def onDocumentFormattingCommand(self,evt):
self._popupSettingsDialog(NVDASettingsDialog, DocumentFormattingPanel)
def onUwpOcrCommand(self, evt):
self._popupSettingsDialog(NVDASettingsDialog, UwpOcrPanel)
def onSpeechSymbolsCommand(self, evt):
self._popupSettingsDialog(SpeechSymbolsDialog)
def onInputGesturesCommand(self, evt):
self._popupSettingsDialog(InputGesturesDialog)
def onAboutCommand(self,evt):
# Translators: The title of the dialog to show about info for NVDA.
messageBox(versionInfo.aboutMessage, _("About NVDA"), wx.OK)
def onCheckForUpdateCommand(self, evt):
updateCheck.UpdateChecker().check()
def onViewLogCommand(self, evt):
logViewer.activate()
def onSpeechViewerEnabled(self, isEnabled):
# its possible for this to be called after the sysTrayIcon is destroyed if we are exiting NVDA
if self.sysTrayIcon and self.sysTrayIcon.menu_tools_toggleSpeechViewer:
self.sysTrayIcon.menu_tools_toggleSpeechViewer.Check(isEnabled)
def onToggleSpeechViewerCommand(self, evt):
if not speechViewer.isActive:
speechViewer.activate()
else:
speechViewer.deactivate()
def onPythonConsoleCommand(self, evt):
import pythonConsole
if not pythonConsole.consoleUI:
pythonConsole.initialize()
pythonConsole.activate()
def onAddonsManagerCommand(self,evt):
if isInMessageBox:
return
self.prePopup()
from addonGui import AddonsDialog
d=AddonsDialog(gui.mainFrame)
d.Show()
self.postPopup()
def onReloadPluginsCommand(self, evt):
import appModuleHandler, globalPluginHandler
from NVDAObjects import NVDAObject
appModuleHandler.reloadAppModules()
globalPluginHandler.reloadGlobalPlugins()
NVDAObject.clearDynamicClassCache()
def onCreatePortableCopyCommand(self,evt):
if isInMessageBox:
return
self.prePopup()
import gui.installerGui
d=gui.installerGui.PortableCreaterDialog(gui.mainFrame)
d.Show()
self.postPopup()
def onInstallCommand(self, evt):
if isInMessageBox:
return
from gui import installerGui
installerGui.showInstallGui()
def onRunCOMRegistrationFixesCommand(self, evt):
if isInMessageBox:
return
if gui.messageBox(
# Translators: A message to warn the user when starting the COM Registration Fixing tool
_("You are about to run the COM Registration Fixing tool. This tool will try to fix common system problems that stop NVDA from being able to access content in many programs including Firefox and Internet Explorer. This tool must make changes to the System registry and therefore requires administrative access. Are you sure you wish to proceed?"),
# Translators: The title of the warning dialog displayed when launching the COM Registration Fixing tool
_("Warning"),wx.YES|wx.NO|wx.ICON_WARNING,self
)==wx.NO:
return
progressDialog = IndeterminateProgressDialog(mainFrame,
# Translators: The title of the dialog presented while NVDA is running the COM Registration fixing tool
_("COM Registration Fixing Tool"),
# Translators: The message displayed while NVDA is running the COM Registration fixing tool
_("Please wait while NVDA tries to fix your system's COM registrations.")
)
try:
config.execElevated(config.SLAVE_FILENAME,["fixCOMRegistrations"])
except:
log.error("Could not execute fixCOMRegistrations command",exc_info=True)
progressDialog.done()
del progressDialog
# Translators: The message displayed when the COM Registration Fixing tool completes.
gui.messageBox(_("COM Registration Fixing tool complete"),
# Translators: The title of a dialog presented when the COM Registration Fixing tool is complete.
_("COM Registration Fixing Tool"),
wx.OK)
def onConfigProfilesCommand(self, evt):
if isInMessageBox:
return
self.prePopup()
from configProfiles import ProfilesDialog
ProfilesDialog(gui.mainFrame).Show()
self.postPopup()
class SysTrayIcon(wx.adv.TaskBarIcon):
def __init__(self, frame):
super(SysTrayIcon, self).__init__()
icon=wx.Icon(ICON_PATH,wx.BITMAP_TYPE_ICO)
self.SetIcon(icon, versionInfo.name)
self.menu=wx.Menu()
menu_preferences=self.preferencesMenu=wx.Menu()
item = menu_preferences.Append(wx.ID_ANY,
# Translators: The label for the menu item to open NVDA Settings dialog.
_("&Settings..."),
# Translators: The description for the menu item to open NVDA Settings dialog.
_("NVDA settings"))
self.Bind(wx.EVT_MENU, frame.onNVDASettingsCommand, item)
subMenu_speechDicts = wx.Menu()
if not globalVars.appArgs.secure:
# Translators: The label for the menu item to open Default speech dictionary dialog.
item = subMenu_speechDicts.Append(wx.ID_ANY,_("&Default dictionary..."),_("A dialog where you can set default dictionary by adding dictionary entries to the list"))
self.Bind(wx.EVT_MENU, frame.onDefaultDictionaryCommand, item)
# Translators: The label for the menu item to open Voice specific speech dictionary dialog.
item = subMenu_speechDicts.Append(wx.ID_ANY,_("&Voice dictionary..."),_("A dialog where you can set voice-specific dictionary by adding dictionary entries to the list"))
self.Bind(wx.EVT_MENU, frame.onVoiceDictionaryCommand, item)
# Translators: The label for the menu item to open Temporary speech dictionary dialog.
item = subMenu_speechDicts.Append(wx.ID_ANY,_("&Temporary dictionary..."),_("A dialog where you can set temporary dictionary by adding dictionary entries to the edit box"))
self.Bind(wx.EVT_MENU, frame.onTemporaryDictionaryCommand, item)
# Translators: The label for a submenu under NvDA Preferences menu to select speech dictionaries.
menu_preferences.AppendSubMenu(subMenu_speechDicts,_("Speech &dictionaries"))
if not globalVars.appArgs.secure:
# Translators: The label for the menu item to open Punctuation/symbol pronunciation dialog.
item = menu_preferences.Append(wx.ID_ANY, _("&Punctuation/symbol pronunciation..."))
self.Bind(wx.EVT_MENU, frame.onSpeechSymbolsCommand, item)
# Translators: The label for the menu item to open the Input Gestures dialog.
item = menu_preferences.Append(wx.ID_ANY, _("I&nput gestures..."))
self.Bind(wx.EVT_MENU, frame.onInputGesturesCommand, item)
# Translators: The label for Preferences submenu in NVDA menu.
self.menu.AppendSubMenu(menu_preferences,_("&Preferences"))
menu_tools = self.toolsMenu = wx.Menu()
if not globalVars.appArgs.secure:
# Translators: The label for the menu item to open NVDA Log Viewer.
item = menu_tools.Append(wx.ID_ANY, _("View log"))
self.Bind(wx.EVT_MENU, frame.onViewLogCommand, item)
# Translators: The label for the menu item to toggle Speech Viewer.
item=self.menu_tools_toggleSpeechViewer = menu_tools.AppendCheckItem(wx.ID_ANY, _("Speech viewer"))
self.Bind(wx.EVT_MENU, frame.onToggleSpeechViewerCommand, item)
if not globalVars.appArgs.secure and not config.isAppX:
# Translators: The label for the menu item to open NVDA Python Console.
item = menu_tools.Append(wx.ID_ANY, _("Python console"))
self.Bind(wx.EVT_MENU, frame.onPythonConsoleCommand, item)
# Translators: The label of a menu item to open the Add-ons Manager.
item = menu_tools.Append(wx.ID_ANY, _("Manage &add-ons..."))
self.Bind(wx.EVT_MENU, frame.onAddonsManagerCommand, item)
if not globalVars.appArgs.secure and not config.isAppX and getattr(sys,'frozen',None):
# Translators: The label for the menu item to create a portable copy of NVDA from an installed or another portable version.
item = menu_tools.Append(wx.ID_ANY, _("Create portable copy..."))
self.Bind(wx.EVT_MENU, frame.onCreatePortableCopyCommand, item)
if not config.isInstalledCopy():
# Translators: The label for the menu item to install NVDA on the computer.
item = menu_tools.Append(wx.ID_ANY, _("&Install NVDA..."))
self.Bind(wx.EVT_MENU, frame.onInstallCommand, item)
# Translators: The label for the menu item to run the COM registration fix tool
item = menu_tools.Append(wx.ID_ANY, _("Run COM Registration Fixing tool..."))
self.Bind(wx.EVT_MENU, frame.onRunCOMRegistrationFixesCommand, item)
if not config.isAppX:
# Translators: The label for the menu item to reload plugins.
item = menu_tools.Append(wx.ID_ANY, _("Reload plugins"))
self.Bind(wx.EVT_MENU, frame.onReloadPluginsCommand, item)
# Translators: The label for the Tools submenu in NVDA menu.
self.menu.AppendSubMenu(menu_tools,_("Tools"))
menu_help = self.helpMenu = wx.Menu()
if not globalVars.appArgs.secure:
# Translators: The label of a menu item to open NVDA user guide.
item = menu_help.Append(wx.ID_ANY, _("&User Guide"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("userGuide.html")), item)
# Translators: The label of a menu item to open the Commands Quick Reference document.
item = menu_help.Append(wx.ID_ANY, _("Commands &Quick Reference"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("keyCommands.html")), item)
# Translators: The label for the menu item to open What's New document.
item = menu_help.Append(wx.ID_ANY, _("What's &new"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("changes.html")), item)
item = menu_help.Append(wx.ID_ANY, _("NVDA &web site"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile("http://www.nvda-project.org/"), item)
# Translators: The label for the menu item to view NVDA License document.
item = menu_help.Append(wx.ID_ANY, _("L&icense"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("copying.txt", False)), item)
# Translators: The label for the menu item to view NVDA Contributors list document.
item = menu_help.Append(wx.ID_ANY, _("C&ontributors"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath("contributors.txt", False)), item)
# Translators: The label for the menu item to open NVDA Welcome Dialog.
item = menu_help.Append(wx.ID_ANY, _("We&lcome dialog..."))
self.Bind(wx.EVT_MENU, lambda evt: WelcomeDialog.run(), item)
menu_help.AppendSeparator()
if updateCheck:
# Translators: The label of a menu item to manually check for an updated version of NVDA.
item = menu_help.Append(wx.ID_ANY, _("&Check for update..."))
self.Bind(wx.EVT_MENU, frame.onCheckForUpdateCommand, item)
# Translators: The label for the menu item to open About dialog to get information about NVDA.
item = menu_help.Append(wx.ID_ABOUT, _("About..."), _("About NVDA"))
self.Bind(wx.EVT_MENU, frame.onAboutCommand, item)
# Translators: The label for the Help submenu in NVDA menu.
self.menu.AppendSubMenu(menu_help,_("&Help"))
self.menu.AppendSeparator()
# Translators: The label for the menu item to open the Configuration Profiles dialog.
item = self.menu.Append(wx.ID_ANY, _("&Configuration profiles..."))
self.Bind(wx.EVT_MENU, frame.onConfigProfilesCommand, item)
# Translators: The label for the menu item to revert to saved configuration.
item = self.menu.Append(wx.ID_ANY, _("&Revert to saved configuration"),_("Reset all settings to saved state"))
self.Bind(wx.EVT_MENU, frame.onRevertToSavedConfigurationCommand, item)
if not globalVars.appArgs.secure:
# Translators: The label for the menu item to reset settings to default settings.
# Here, default settings means settings that were there when the user first used NVDA.
item = self.menu.Append(wx.ID_ANY, _("&Reset configuration to factory defaults"),_("Reset all settings to default state"))
self.Bind(wx.EVT_MENU, frame.onRevertToDefaultConfigurationCommand, item)
# Translators: The label for the menu item to save current settings.
item = self.menu.Append(wx.ID_SAVE, _("&Save configuration"), _("Write the current configuration to nvda.ini"))
self.Bind(wx.EVT_MENU, frame.onSaveConfigurationCommand, item)
self.menu.AppendSeparator()
# Translators: The label for the menu item to open donate page.
item = self.menu.Append(wx.ID_ANY, _("Donate"))
self.Bind(wx.EVT_MENU, lambda evt: os.startfile(DONATE_URL), item)
self.installPendingUpdateMenuItemPos = self.menu.GetMenuItemCount()
item = self.installPendingUpdateMenuItem = self.menu.Append(wx.ID_ANY,
# Translators: The label for the menu item to run a pending update.
_("Install pending &update"),
# Translators: The description for the menu item to run a pending update.
_("Execute a previously downloaded NVDA update"))
self.Bind(wx.EVT_MENU, frame.onExecuteUpdateCommand, item)
self.menu.AppendSeparator()
item = self.menu.Append(wx.ID_EXIT, _("E&xit"),_("Exit NVDA"))
self.Bind(wx.EVT_MENU, frame.onExitCommand, item)
self.Bind(wx.adv.EVT_TASKBAR_LEFT_DOWN, self.onActivate)
self.Bind(wx.adv.EVT_TASKBAR_RIGHT_DOWN, self.onActivate)
def Destroy(self):
self.menu.Destroy()
super(SysTrayIcon, self).Destroy()
def onActivate(self, evt):
mainFrame.prePopup()
import appModules.nvda
if not appModules.nvda.nvdaMenuIaIdentity:
# The NVDA app module doesn't know how to identify the NVDA menu yet.
# Signal that the NVDA menu has just been opened.
appModules.nvda.nvdaMenuIaIdentity = True
self.PopupMenu(self.menu)
if appModules.nvda.nvdaMenuIaIdentity is True:
# The NVDA menu didn't actually appear for some reason.
appModules.nvda.nvdaMenuIaIdentity = None
mainFrame.postPopup()
def initialize():
global mainFrame
if mainFrame:
raise RuntimeError("GUI already initialized")
mainFrame = MainFrame()
wx.GetApp().SetTopWindow(mainFrame)
def terminate():
global mainFrame
# This is called after the main loop exits because WM_QUIT exits the main loop
# without destroying all objects correctly and we need to support WM_QUIT.
# Therefore, any request to exit should exit the main loop.
wx.CallAfter(mainFrame.Destroy)
# #4460: We need another iteration of the main loop
# so that everything (especially the TaskBarIcon) is cleaned up properly.
# ProcessPendingEvents doesn't seem to work, but MainLoop does.
# Because the top window gets destroyed,
# MainLoop thankfully returns pretty quickly.
wx.GetApp().MainLoop()
mainFrame = None
def showGui():
wx.CallAfter(mainFrame.showGui)
def quit():
wx.CallAfter(mainFrame.onExitCommand, None)
def messageBox(message, caption=wx.MessageBoxCaptionStr, style=wx.OK | wx.CENTER, parent=None):
"""Display a message dialog.
This should be used for all message dialogs
rather than using C{wx.MessageDialog} and C{wx.MessageBox} directly.
@param message: The message text.
@type message: str
@param caption: The caption (title) of the dialog.
@type caption: str
@param style: Same as for wx.MessageBox.
@type style: int
@param parent: The parent window (optional).
@type parent: C{wx.Window}
@return: Same as for wx.MessageBox.
@rtype: int
"""
global isInMessageBox
wasAlready = isInMessageBox
isInMessageBox = True
if not parent:
mainFrame.prePopup()
res = wx.MessageBox(message, caption, style, parent or mainFrame)
if not parent:
mainFrame.postPopup()
if not wasAlready:
isInMessageBox = False
return res
def runScriptModalDialog(dialog, callback=None):
"""Run a modal dialog from a script.
This will not block the caller,
but will instead call C{callback} (if provided) with the result from the dialog.
The dialog will be destroyed once the callback has returned.
@param dialog: The dialog to show.
@type dialog: C{wx.Dialog}
@param callback: The optional callable to call with the result from the dialog.
@type callback: callable
"""
def run():
mainFrame.prePopup()
res = dialog.ShowModal()
mainFrame.postPopup()
if callback:
callback(res)
dialog.Destroy()
wx.CallAfter(run)
class WelcomeDialog(wx.Dialog):
"""The NVDA welcome dialog.
This provides essential information for new users, such as a description of the NVDA key and instructions on how to activate the NVDA menu.
It also provides quick access to some important configuration options.
This dialog is displayed the first time NVDA is started with a new configuration.
"""
# Translators: The main message for the Welcome dialog when the user starts NVDA for the first time.
WELCOME_MESSAGE_DETAIL = _(
"Most commands for controlling NVDA require you to hold down the NVDA key while pressing other keys.\n"
"By default, the numpad Insert and main Insert keys may both be used as the NVDA key.\n"
"You can also configure NVDA to use the CapsLock as the NVDA key.\n"
"Press NVDA+n at any time to activate the NVDA menu.\n"
"From this menu, you can configure NVDA, get help and access other NVDA functions."
)
def __init__(self, parent):
# Translators: The title of the Welcome dialog when user starts NVDA for the first time.
super(WelcomeDialog, self).__init__(parent, wx.ID_ANY, _("Welcome to NVDA"))
mainSizer=wx.BoxSizer(wx.VERTICAL)
# Translators: The header for the Welcome dialog when user starts NVDA for the first time. This is in larger,
# bold lettering
welcomeTextHeader = wx.StaticText(self, label=_("Welcome to NVDA!"))
welcomeTextHeader.SetFont(wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.BOLD))
mainSizer.AddSpacer(guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
mainSizer.Add(welcomeTextHeader,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT)
mainSizer.AddSpacer(guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
welcomeTextDetail = wx.StaticText(self, wx.ID_ANY, self.WELCOME_MESSAGE_DETAIL)
mainSizer.Add(welcomeTextDetail,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT)
optionsSizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, _("Options")), wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, sizer=optionsSizer)
# Translators: The label of a combobox in the Welcome dialog.
kbdLabelText = _("&Keyboard layout:")
layouts = keyboardHandler.KeyboardInputGesture.LAYOUTS
self.kbdNames = sorted(layouts)
kbdChoices = [layouts[layout] for layout in self.kbdNames]
self.kbdList = sHelper.addLabeledControl(kbdLabelText, wx.Choice, choices=kbdChoices)
try:
index = self.kbdNames.index(config.conf["keyboard"]["keyboardLayout"])
self.kbdList.SetSelection(index)
except:
log.error("Could not set Keyboard layout list to current layout",exc_info=True)
# Translators: The label of a checkbox in the Welcome dialog.
capsAsNVDAModifierText = _("&Use CapsLock as an NVDA modifier key")
self.capsAsNVDAModifierCheckBox = sHelper.addItem(wx.CheckBox(self, label=capsAsNVDAModifierText))
self.capsAsNVDAModifierCheckBox.SetValue(config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"])
# Translators: The label of a checkbox in the Welcome dialog.
startAfterLogonText = _("&Automatically start NVDA after I log on to Windows")
self.startAfterLogonCheckBox = sHelper.addItem(wx.CheckBox(self, label=startAfterLogonText))
self.startAfterLogonCheckBox.Value = config.getStartAfterLogon()
if globalVars.appArgs.secure or config.isAppX or not config.isInstalledCopy():
self.startAfterLogonCheckBox.Disable()
# Translators: The label of a checkbox in the Welcome dialog.
showWelcomeDialogAtStartupText = _("&Show this dialog when NVDA starts")
self.showWelcomeDialogAtStartupCheckBox = sHelper.addItem(wx.CheckBox(self, label=showWelcomeDialogAtStartupText))
self.showWelcomeDialogAtStartupCheckBox.SetValue(config.conf["general"]["showWelcomeDialogAtStartup"])
mainSizer.Add(optionsSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Add(self.CreateButtonSizer(wx.OK), border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL|wx.ALIGN_RIGHT)
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.kbdList.SetFocus()
self.CentreOnScreen()
def onOk(self, evt):
layout = self.kbdNames[self.kbdList.GetSelection()]
config.conf["keyboard"]["keyboardLayout"] = layout
config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"] = self.capsAsNVDAModifierCheckBox.IsChecked()
if self.startAfterLogonCheckBox.Enabled:
config.setStartAfterLogon(self.startAfterLogonCheckBox.Value)
config.conf["general"]["showWelcomeDialogAtStartup"] = self.showWelcomeDialogAtStartupCheckBox.IsChecked()
try:
config.conf.save()
except:
log.debugWarning("Could not save",exc_info=True)
self.EndModal(wx.ID_OK)
@classmethod
def run(cls):
"""Prepare and display an instance of this dialog.
This does not require the dialog to be instantiated.
"""
mainFrame.prePopup()
d = cls(mainFrame)
d.ShowModal()
d.Destroy()
mainFrame.postPopup()
class LauncherDialog(wx.Dialog):
"""The dialog that is displayed when NVDA is started from the launcher.
This displays the license and allows the user to install or create a portable copy of NVDA.
"""
def __init__(self, parent):
super(LauncherDialog, self).__init__(parent, title=versionInfo.name)
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: The label of the license text which will be shown when NVDA installation program starts.
groupLabel = _("License Agreement")
sizer = sHelper.addItem(wx.StaticBoxSizer(wx.StaticBox(self, label=groupLabel), wx.VERTICAL))
licenseTextCtrl = wx.TextCtrl(self, size=(500, 400), style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
licenseTextCtrl.Value = codecs.open(getDocFilePath("copying.txt", False), "r", encoding="UTF-8").read()
sizer.Add(licenseTextCtrl)
# Translators: The label for a checkbox in NvDA installation program to agree to the license agreement.
agreeText = _("I &agree")
self.licenseAgreeCheckbox = sHelper.addItem(wx.CheckBox(self, label=agreeText))
self.licenseAgreeCheckbox.Value = False
self.licenseAgreeCheckbox.Bind(wx.EVT_CHECKBOX, self.onLicenseAgree)
sizer = sHelper.addItem(wx.GridSizer(2, 2, 0, 0))
self.actionButtons = []
# Translators: The label of the button in NVDA installation program to install NvDA on the user's computer.
ctrl = wx.Button(self, label=_("&Install NVDA on this computer"))
sizer.Add(ctrl)
ctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onInstallCommand))
self.actionButtons.append(ctrl)
# Translators: The label of the button in NVDA installation program to create a portable version of NVDA.
ctrl = wx.Button(self, label=_("Create &portable copy"))
sizer.Add(ctrl)
ctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onCreatePortableCopyCommand))
self.actionButtons.append(ctrl)
# Translators: The label of the button in NVDA installation program to continue using the installation program as a temporary copy of NVDA.
ctrl = wx.Button(self, label=_("&Continue running"))
sizer.Add(ctrl)
ctrl.Bind(wx.EVT_BUTTON, self.onContinueRunning)
self.actionButtons.append(ctrl)
sizer.Add(wx.Button(self, label=_("E&xit"), id=wx.ID_CANCEL))
# If we bind this on the button, it fails to trigger when the dialog is closed.
self.Bind(wx.EVT_BUTTON, self.onExit, id=wx.ID_CANCEL)
for ctrl in self.actionButtons:
ctrl.Disable()
mainSizer.Add(sHelper.sizer, border = guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.CentreOnScreen()
def onLicenseAgree(self, evt):
for ctrl in self.actionButtons:
ctrl.Enable(evt.IsChecked())
def onAction(self, evt, func):
self.Destroy()
func(evt)
def onContinueRunning(self, evt):
self.Destroy()
core.doStartupDialogs()
def onExit(self, evt):
wx.GetApp().ExitMainLoop()
@classmethod
def run(cls):
"""Prepare and display an instance of this dialog.
This does not require the dialog to be instantiated.
"""
mainFrame.prePopup()
d = cls(mainFrame)
d.Show()
mainFrame.postPopup()
class ExitDialog(wx.Dialog):
_instance = None
def __new__(cls, parent):
# Make this a singleton.
inst = cls._instance() if cls._instance else None
if not inst:
return super(cls, cls).__new__(cls, parent)
return inst
def __init__(self, parent):
inst = ExitDialog._instance() if ExitDialog._instance else None
if inst:
return
# Use a weakref so the instance can die.
ExitDialog._instance = weakref.ref(self)
# Translators: The title of the dialog to exit NVDA
super(ExitDialog, self).__init__(parent, title=_("Exit NVDA"))
dialog = self
mainSizer = wx.BoxSizer(wx.VERTICAL)
contentSizerHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
if globalVars.appArgs.disableAddons:
# Translators: A message in the exit Dialog shown when all add-ons are disabled.
addonsDisabledText = _("All add-ons are now disabled. They will be re-enabled on the next restart unless you choose to disable them again.")
contentSizerHelper.addItem(wx.StaticText(self, wx.ID_ANY, label=addonsDisabledText))
# Translators: The label for actions list in the Exit dialog.
labelText=_("What would you like to &do?")
self.actions = [
# Translators: An option in the combo box to choose exit action.
_("Exit"),
# Translators: An option in the combo box to choose exit action.
_("Restart")
]
# Windows Store version of NVDA does not support add-ons yet.
if not config.isAppX:
# Translators: An option in the combo box to choose exit action.
self.actions.append(_("Restart with add-ons disabled"))
# Translators: An option in the combo box to choose exit action.
self.actions.append(_("Restart with debug logging enabled"))
if updateCheck and updateCheck.isPendingUpdate():
# Translators: An option in the combo box to choose exit action.
self.actions.append(_("Install pending update"))
self.actionsList = contentSizerHelper.addLabeledControl(labelText, wx.Choice, choices=self.actions)
self.actionsList.SetSelection(0)
contentSizerHelper.addItem( self.CreateButtonSizer(wx.OK | wx.CANCEL))
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
mainSizer.Add(contentSizerHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.Sizer = mainSizer
self.actionsList.SetFocus()
self.CentreOnScreen()
def onOk(self, evt):
action=self.actionsList.GetSelection()
# Because Windows Store version of NVDA does not support add-ons yet, add 1 if action is 2 or above if this is such a case.
if action >= 2 and config.isAppX:
action += 1
if action == 0:
wx.GetApp().ExitMainLoop()
elif action == 1:
queueHandler.queueFunction(queueHandler.eventQueue,core.restart)
elif action == 2:
queueHandler.queueFunction(queueHandler.eventQueue,core.restart,disableAddons=True)
elif action == 3:
queueHandler.queueFunction(queueHandler.eventQueue,core.restart,debugLogging=True)
elif action == 4:
if updateCheck:
updateCheck.executeUpdate()
self.Destroy()
def onCancel(self, evt):
self.Destroy()
class ExecAndPump(threading.Thread):
"""Executes the given function with given args and kwargs in a background thread while blocking and pumping in the current thread."""
def __init__(self,func,*args,**kwargs):
self.func=func
self.args=args
self.kwargs=kwargs
super(ExecAndPump,self).__init__()
self.threadExc=None
self.start()
time.sleep(0.1)
threadHandle=ctypes.c_int()
threadHandle.value=ctypes.windll.kernel32.OpenThread(0x100000,False,self.ident)
msg=ctypes.wintypes.MSG()
while ctypes.windll.user32.MsgWaitForMultipleObjects(1,ctypes.byref(threadHandle),False,-1,255)==1:
while ctypes.windll.user32.PeekMessageW(ctypes.byref(msg),None,0,0,1):
ctypes.windll.user32.TranslateMessage(ctypes.byref(msg))
ctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))
if self.threadExc:
raise self.threadExc
def run(self):
try:
self.func(*self.args,**self.kwargs)
except Exception as e:
self.threadExc=e
log.debugWarning("task had errors",exc_info=True)
class IndeterminateProgressDialog(wx.ProgressDialog):
def __init__(self, parent, title, message):
super(IndeterminateProgressDialog, self).__init__(title, message, parent=parent)
self._speechCounter = -1
self.timer = wx.PyTimer(self.Pulse)
self.timer.Start(1000)
self.Raise()
self.CentreOnScreen()
def Pulse(self):
super(IndeterminateProgressDialog, self).Pulse()
# We want progress to be spoken on the first pulse and every 10 pulses thereafter.
# Therefore, cycle from 0 to 9 inclusive.
self._speechCounter = (self._speechCounter + 1) % 10
pbConf = config.conf["presentation"]["progressBarUpdates"]
if pbConf["progressBarOutputMode"] == "off":
return
if not pbConf["reportBackgroundProgressBars"] and not self.IsActive():
return
if pbConf["progressBarOutputMode"] in ("beep", "both"):
tones.beep(440, 40)
if pbConf["progressBarOutputMode"] in ("speak", "both") and self._speechCounter == 0:
# Translators: Announced periodically to indicate progress for an indeterminate progress bar.
speech.speakMessage(_("Please wait"))
def IsActive(self):
#4714: In wxPython 3, ProgressDialog.IsActive always seems to return False.
return winUser.isDescendantWindow(winUser.getForegroundWindow(), self.Handle)
def done(self):
self.timer.Stop()
pbConf = config.conf["presentation"]["progressBarUpdates"]
if pbConf["progressBarOutputMode"] in ("beep", "both") and (pbConf["reportBackgroundProgressBars"] or self.IsActive()):
tones.beep(1760, 40)
self.Hide()
self.Destroy()
def shouldConfigProfileTriggersBeSuspended():
"""Determine whether configuration profile triggers should be suspended in relation to NVDA's GUI.
For NVDA configuration dialogs, the configuration should remain the same as it was before the GUI was popped up
so the user can change settings in the correct profile.
Top-level windows that require this behavior should have a C{shouldSuspendConfigProfileTriggers} attribute set to C{True}.
Because these dialogs are often opened via the NVDA menu, this applies to the NVDA menu as well.
"""
if winUser.getGUIThreadInfo(ctypes.windll.kernel32.GetCurrentThreadId()).flags & 0x00000010:
# The NVDA menu is active.
return True
for window in wx.GetTopLevelWindows():
if window.IsShown() and getattr(window, "shouldSuspendConfigProfileTriggers", False):
return True
return False
class NonReEntrantTimer(wx.Timer):
"""
Before WXPython 4, wx.Timer was nonre-entrant,
meaning that if code within its callback pumped messages (E.g. called wx.Yield) and this timer was ready to fire again,
the timer would not fire until the first callback had completed.
However, in WXPython 4, wx.Timer is now re-entrant.
Code in NVDA is not written to handle re-entrant timers, so this class provides a Timer with the old behaviour.
This should be used in place of wx.Timer and wx.PyTimer where the callback will directly or indirectly call wx.Yield or some how process the Windows window message queue.
For example, NVDA's core pump or other timers that run in NVDA's main thread.
Timers on braille display drivers for key detection don't need to use this as they only queue gestures rather than actually executing them.
"""
def __init__(self, run=None):
if run is not None:
self.run = run
self._inNotify = False
super(NonReEntrantTimer,self).__init__()
def run(self):
"""Subclasses can override or specify in constructor.
"""
raise NotImplementedError
def Notify(self):
if self._inNotify:
return
self._inNotify = True
try:
self.run()
finally:
self._inNotify = False
def _isDebug():
return config.conf["debugLog"]["gui"]
class AskAllowUsageStatsDialog(wx.Dialog):
"""A dialog asking if the user wishes to allow NVDA usage stats to be collected by NV Access."""
def __init__(self, parent):
# Translators: The title of the dialog asking if usage data can be collected
super(AskAllowUsageStatsDialog, self).__init__(parent, title=_("NVDA Usage Data Collection"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: A message asking the user if they want to allow usage stats gathering
message=_("In order to improve NVDA in the future, NV Access wishes to collect usage data from running copies of NVDA.\n\n"
"Data includes Operating System version, NVDA version, language, country of origin, plus certain NVDA configuration such as current synthesizer, braille display and braille table. "
"No spoken or braille content will be ever sent to NV Access. Please refer to the User Guide for a current list of all data collected.\n\n"
"Do you wish to allow NV Access to periodically collect this data in order to improve NVDA?")
sText=sHelper.addItem(wx.StaticText(self, label=message))
# the wx.Window must be constructed before we can get the handle.
import windowUtils
self.scaleFactor = windowUtils.getWindowScalingFactor(self.GetHandle())
sText.Wrap(self.scaleFactor*600) # 600 was fairly arbitrarily chosen by a visual user to look acceptable on their machine.
bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL))
# Translators: The label of a Yes button in a dialog
yesButton = bHelper.addButton(self, wx.ID_YES, label=_("&Yes"))
yesButton.Bind(wx.EVT_BUTTON, self.onYesButton)
# Translators: The label of a No button in a dialog
noButton = bHelper.addButton(self, wx.ID_NO, label=_("&No"))
noButton.Bind(wx.EVT_BUTTON, self.onNoButton)
# Translators: The label of a button to remind the user later about performing some action.
remindMeButton = bHelper.addButton(self, wx.ID_CANCEL, label=_("Remind me &later"))
remindMeButton.Bind(wx.EVT_BUTTON, self.onLaterButton)
remindMeButton.SetFocus()
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
def onYesButton(self,evt):
log.debug("Usage stats gathering has been allowed")
config.conf['update']['askedAllowUsageStats']=True
config.conf['update']['allowUsageStats']=True
self.EndModal(wx.ID_YES)
def onNoButton(self,evt):
log.debug("Usage stats gathering has been disallowed")
config.conf['update']['askedAllowUsageStats']=True
config.conf['update']['allowUsageStats']=False
self.EndModal(wx.ID_NO)
def onLaterButton(self,evt):
log.debug("Usage stats gathering question has been deferred")
# evt.Skip() is called since wx.ID_CANCEL is used as the ID for the Ask Later button,
# wx automatically ends the modal itself.
evt.Skip()
| 1 | 22,983 | Is the `codecs.register(lambda name: None)` gracefully handled by codecs? The lambda is expected to return a tuple of functions (encoder, decoder, stream_reader, stream_writer) (or a CodecInfo object), according to the docs. I've only checked python's built-in help, though. | nvaccess-nvda | py |
@@ -154,6 +154,18 @@ namespace OpenTelemetry.Metrics
{
var metricStreamConfig = metricStreamConfigs[i];
var metricStreamName = metricStreamConfig?.Name ?? instrument.Name;
+
+ if (!MeterProviderBuilderSdk.IsValidInstrumentName(metricStreamName))
+ {
+ OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(
+ metricStreamName,
+ instrument.Meter.Name,
+ "Metric name is invalid.",
+ "The name must comply with the OpenTelemetry specification.");
+
+ continue;
+ }
+
if (this.metricStreamNames.ContainsKey(metricStreamName))
{
// TODO: Log that instrument is ignored | 1 | // <copyright file="MeterProviderSdk.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.Metrics;
using System.Linq;
using System.Text.RegularExpressions;
using OpenTelemetry.Internal;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Metrics
{
internal sealed class MeterProviderSdk : MeterProvider
{
internal const int MaxMetrics = 1000;
internal int ShutdownCount;
private readonly Metric[] metrics;
private readonly List<object> instrumentations = new List<object>();
private readonly List<Func<Instrument, MetricStreamConfiguration>> viewConfigs;
private readonly object collectLock = new object();
private readonly object instrumentCreationLock = new object();
private readonly Dictionary<string, bool> metricStreamNames = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase);
private readonly MeterListener listener;
private readonly MetricReader reader;
private int metricIndex = -1;
internal MeterProviderSdk(
Resource resource,
IEnumerable<string> meterSources,
List<MeterProviderBuilderBase.InstrumentationFactory> instrumentationFactories,
List<Func<Instrument, MetricStreamConfiguration>> viewConfigs,
IEnumerable<MetricReader> readers)
{
this.Resource = resource;
this.viewConfigs = viewConfigs;
this.metrics = new Metric[MaxMetrics];
AggregationTemporality temporality = AggregationTemporality.Cumulative;
foreach (var reader in readers)
{
if (reader == null)
{
throw new ArgumentException("A null value was found.", nameof(readers));
}
reader.SetParentProvider(this);
// TODO: Actually support multiple readers.
// Currently the last reader's temporality wins.
temporality = reader.PreferredAggregationTemporality;
if (this.reader == null)
{
this.reader = reader;
}
else if (this.reader is CompositeMetricReader compositeReader)
{
compositeReader.AddReader(reader);
}
else
{
this.reader = new CompositeMetricReader(new[] { this.reader, reader });
}
}
if (instrumentationFactories.Any())
{
foreach (var instrumentationFactory in instrumentationFactories)
{
this.instrumentations.Add(instrumentationFactory.Factory());
}
}
// Setup Listener
Func<Instrument, bool> shouldListenTo = instrument => false;
if (meterSources.Any(s => s.Contains('*')))
{
var regex = GetWildcardRegex(meterSources);
shouldListenTo = instrument => regex.IsMatch(instrument.Meter.Name);
}
else if (meterSources.Any())
{
var meterSourcesToSubscribe = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
foreach (var meterSource in meterSources)
{
meterSourcesToSubscribe.Add(meterSource);
}
shouldListenTo = instrument => meterSourcesToSubscribe.Contains(instrument.Meter.Name);
}
this.listener = new MeterListener();
var viewConfigCount = this.viewConfigs.Count;
if (viewConfigCount > 0)
{
this.listener.InstrumentPublished = (instrument, listener) =>
{
if (!shouldListenTo(instrument))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "Instrument belongs to a Meter not subscribed by the provider.", "Use AddMeter to add the Meter to the provider.");
return;
}
// Creating list with initial capacity as the maximum
// possible size, to avoid any array resize/copy internally.
// There may be excess space wasted, but it'll eligible for
// GC right after this method.
var metricStreamConfigs = new List<MetricStreamConfiguration>(viewConfigCount);
foreach (var viewConfig in this.viewConfigs)
{
var metricStreamConfig = viewConfig(instrument);
if (metricStreamConfig != null)
{
metricStreamConfigs.Add(metricStreamConfig);
}
}
if (metricStreamConfigs.Count == 0)
{
// No views matched. Add null
// which will apply defaults.
// Users can turn off this default
// by adding a view like below as the last view.
// .AddView(instrumentName: "*", new MetricStreamConfiguration() { Aggregation = Aggregation.Drop })
metricStreamConfigs.Add(null);
}
var maxCountMetricsToBeCreated = metricStreamConfigs.Count;
// Create list with initial capacity as the max metric count.
// Due to duplicate/max limit, we may not end up using them
// all, and that memory is wasted until Meter disposed.
// TODO: Revisit to see if we need to do metrics.TrimExcess()
var metrics = new List<Metric>(maxCountMetricsToBeCreated);
lock (this.instrumentCreationLock)
{
for (int i = 0; i < maxCountMetricsToBeCreated; i++)
{
var metricStreamConfig = metricStreamConfigs[i];
var metricStreamName = metricStreamConfig?.Name ?? instrument.Name;
if (this.metricStreamNames.ContainsKey(metricStreamName))
{
// TODO: Log that instrument is ignored
// as the resulting Metric name is conflicting
// with existing name.
continue;
}
if (metricStreamConfig?.Aggregation == Aggregation.Drop)
{
// TODO: Log that instrument is ignored
// as user explicitly asked to drop it
// with View.
continue;
}
var index = ++this.metricIndex;
if (index >= MaxMetrics)
{
// TODO: Log that instrument is ignored
// as max number of Metrics have reached.
}
else
{
Metric metric;
var metricDescription = metricStreamConfig?.Description ?? instrument.Description;
string[] tagKeysInteresting = metricStreamConfig?.TagKeys;
double[] histogramBucketBounds = (metricStreamConfig is HistogramConfiguration histogramConfig
&& histogramConfig.BucketBounds != null) ? histogramConfig.BucketBounds : null;
metric = new Metric(instrument, temporality, metricStreamName, metricDescription, histogramBucketBounds, tagKeysInteresting);
this.metrics[index] = metric;
metrics.Add(metric);
this.metricStreamNames.Add(metricStreamName, true);
}
}
if (metrics.Count > 0)
{
listener.EnableMeasurementEvents(instrument, metrics);
}
}
};
// Everything double
this.listener.SetMeasurementEventCallback<double>(this.MeasurementRecordedDouble);
this.listener.SetMeasurementEventCallback<float>((instrument, value, tags, state) => this.MeasurementRecordedDouble(instrument, value, tags, state));
// Everything long
this.listener.SetMeasurementEventCallback<long>(this.MeasurementRecordedLong);
this.listener.SetMeasurementEventCallback<int>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<short>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<byte>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
}
else
{
this.listener.InstrumentPublished = (instrument, listener) =>
{
if (!shouldListenTo(instrument))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "Instrument belongs to a Meter not subscribed by the provider.", "Use AddMeter to add the Meter to the provider.");
return;
}
try
{
var metricName = instrument.Name;
Metric metric = null;
lock (this.instrumentCreationLock)
{
if (this.metricStreamNames.ContainsKey(metricName))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(metricName, instrument.Meter.Name, "Metric name conflicting with existing name.", "Either change the name of the instrument or change name using View.");
return;
}
var index = ++this.metricIndex;
if (index >= MaxMetrics)
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(metricName, instrument.Meter.Name, "Maximum allowed Metrics for the provider exceeded.", "Use views to drop unused instruments. Or configure Provider to allow higher limit.");
return;
}
else
{
metric = new Metric(instrument, temporality, metricName, instrument.Description);
this.metrics[index] = metric;
this.metricStreamNames.Add(metricName, true);
}
}
listener.EnableMeasurementEvents(instrument, metric);
}
catch (Exception)
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "SDK internal error occurred.", "Contact SDK owners.");
}
};
// Everything double
this.listener.SetMeasurementEventCallback<double>(this.MeasurementRecordedDoubleSingleStream);
this.listener.SetMeasurementEventCallback<float>((instrument, value, tags, state) => this.MeasurementRecordedDoubleSingleStream(instrument, value, tags, state));
// Everything long
this.listener.SetMeasurementEventCallback<long>(this.MeasurementRecordedLongSingleStream);
this.listener.SetMeasurementEventCallback<int>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<short>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<byte>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
}
this.listener.MeasurementsCompleted = (instrument, state) => this.MeasurementsCompleted(instrument, state);
this.listener.Start();
static Regex GetWildcardRegex(IEnumerable<string> collection)
{
var pattern = '^' + string.Join("|", from name in collection select "(?:" + Regex.Escape(name).Replace("\\*", ".*") + ')') + '$';
return new Regex(pattern, RegexOptions.Compiled | RegexOptions.IgnoreCase);
}
}
internal Resource Resource { get; }
internal List<object> Instrumentations => this.instrumentations;
internal MetricReader Reader => this.reader;
internal void MeasurementsCompleted(Instrument instrument, object state)
{
Console.WriteLine($"Instrument {instrument.Meter.Name}:{instrument.Name} completed.");
}
internal void MeasurementRecordedDouble(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metrics = state as List<Metric>;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metrics == null)
{
// TODO: log
return;
}
if (metrics.Count == 1)
{
// special casing the common path
// as this is faster than the
// foreach, when count is 1.
metrics[0].UpdateDouble(value, tagsRos);
}
else
{
foreach (var metric in metrics)
{
metric.UpdateDouble(value, tagsRos);
}
}
}
internal void MeasurementRecordedLong(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metrics = state as List<Metric>;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metrics == null)
{
// TODO: log
return;
}
if (metrics.Count == 1)
{
// special casing the common path
// as this is faster than the
// foreach, when count is 1.
metrics[0].UpdateLong(value, tagsRos);
}
else
{
foreach (var metric in metrics)
{
metric.UpdateLong(value, tagsRos);
}
}
}
internal void MeasurementRecordedLongSingleStream(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metric = state as Metric;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metric == null)
{
// TODO: log
return;
}
metric.UpdateLong(value, tagsRos);
}
internal void MeasurementRecordedDoubleSingleStream(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metric = state as Metric;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metric == null)
{
// TODO: log
return;
}
metric.UpdateDouble(value, tagsRos);
}
internal Batch<Metric> Collect()
{
lock (this.collectLock)
{
try
{
// Record all observable instruments
try
{
this.listener.RecordObservableInstruments();
}
catch (Exception exception)
{
// TODO:
// It doesn't looks like we can find which instrument callback
// threw.
OpenTelemetrySdkEventSource.Log.MetricObserverCallbackException(exception);
}
var indexSnapShot = Math.Min(this.metricIndex, MaxMetrics - 1);
var target = indexSnapShot + 1;
for (int i = 0; i < target; i++)
{
this.metrics[i].SnapShot();
}
return (target > 0) ? new Batch<Metric>(this.metrics, target) : default;
}
catch (Exception)
{
// TODO: Log
return default;
}
}
}
/// <summary>
/// Called by <c>ForceFlush</c>. This function should block the current
/// thread until flush completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number (non-negative) of milliseconds to wait, or
/// <c>Timeout.Infinite</c> to wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when flush succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>ForceFlush</c>. This function should not throw
/// exceptions.
/// </remarks>
internal bool OnForceFlush(int timeoutMilliseconds)
{
return this.reader?.Collect(timeoutMilliseconds) ?? true;
}
/// <summary>
/// Called by <c>Shutdown</c>. This function should block the current
/// thread until shutdown completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number (non-negative) of milliseconds to wait, or
/// <c>Timeout.Infinite</c> to wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when shutdown succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>Shutdown</c>. This function should not throw
/// exceptions.
/// </remarks>
internal bool OnShutdown(int timeoutMilliseconds)
{
return this.reader?.Shutdown(timeoutMilliseconds) ?? true;
}
protected override void Dispose(bool disposing)
{
if (this.instrumentations != null)
{
foreach (var item in this.instrumentations)
{
(item as IDisposable)?.Dispose();
}
this.instrumentations.Clear();
}
// Wait for up to 5 seconds grace period
this.reader?.Shutdown(5000);
this.reader?.Dispose();
this.listener.Dispose();
}
}
}
| 1 | 21,999 | in this case, it could be either instrument name or the view name, which could be invalid. might be better if we can offer very specific message. (not blocking this, we can add this as a follow up,if needed) | open-telemetry-opentelemetry-dotnet | .cs |
@@ -1654,9 +1654,10 @@ class TargetLocator {
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
+ let paramName = this.driver_.getExecutor().w3c ? 'handle' : 'name';
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
- setParameter('name', nameOrHandle),
+ setParameter(paramName, nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict';
const actions = require('./actions');
const by = require('./by');
const Capabilities = require('./capabilities').Capabilities;
const command = require('./command');
const error = require('./error');
const input = require('./input');
const logging = require('./logging');
const Session = require('./session').Session;
const Symbols = require('./symbols');
const promise = require('./promise');
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message;
/** @type {function(!WebDriver): OUT} */
this.fn = fn;
}
/** @return {string} A description of this condition. */
description() {
return this.description_;
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|promise.Promise<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|promise.Promise<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).
then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
});
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
function toWireValue(obj) {
if (promise.isPromise(obj)) {
return Promise.resolve(obj).then(toWireValue);
}
return Promise.resolve(convertValue(obj));
}
function convertValue(value) {
if (value === void 0 || value === null) {
return value;
}
if (typeof value === 'boolean'
|| typeof value === 'number'
|| typeof value === 'string') {
return value;
}
if (Array.isArray(value)) {
return convertKeys(value);
}
if (typeof value === 'function') {
return '' + value;
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]());
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON());
}
return convertKeys(value);
}
function convertKeys(obj) {
const isArray = Array.isArray(obj);
const numKeys = isArray ? obj.length : Object.keys(obj).length;
const ret = isArray ? new Array(numKeys) : {};
if (!numKeys) {
return Promise.resolve(ret);
}
let numResolved = 0;
function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
fn(obj[i], i);
}
} else {
for (let key in obj) {
fn(obj[key], key);
}
}
}
return new Promise(function(done, reject) {
forEachKey(obj, function(value, key) {
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
value = convertValue(value);
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
setValue(value);
}
}
function setValue(value) {
ret[key] = value;
maybeFulfill();
}
});
function maybeFulfill() {
if (++numResolved === numKeys) {
done(ret);
}
}
});
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map(v => fromWireValue(driver, v));
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value);
value = new WebElement(driver, id);
} else if (value && typeof value === 'object') {
let result = {};
for (let key in value) {
if (value.hasOwnProperty(key)) {
result[key] = fromWireValue(driver, value[key]);
}
}
value = result;
}
return value;
}
/**
* Creates a new WebDriver client, which provides control over a browser.
*
* Every command.Command returns a {@link promise.Promise} that
* represents the result of that command. Callbacks may be registered on this
* object to manipulate the command result or catch an expected error. Any
* commands scheduled with a callback are considered sub-commands and will
* execute before the next command in the current frame. For example:
*
* var message = [];
* driver.call(message.push, message, 'a').then(function() {
* driver.call(message.push, message, 'b');
* });
* driver.call(message.push, message, 'c');
* driver.call(function() {
* alert('message is abc? ' + (message.join('') == 'abc'));
* });
*
*/
class WebDriver {
/**
* @param {!(Session|promise.Promise<!Session>)} session Either a
* known session or a promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
*/
constructor(session, executor, opt_flow) {
/** @private {!promise.Promise<!Session>} */
this.session_ = promise.fulfilled(session);
/** @private {!command.Executor} */
this.executor_ = executor;
/** @private {!promise.ControlFlow} */
this.flow_ = opt_flow || promise.controlFlow();
/** @private {input.FileDetector} */
this.fileDetector_ = null;
}
/**
* Creates a new WebDriver client for an existing session.
* @param {!command.Executor} executor Command executor to use when querying
* for session details.
* @param {string} sessionId ID of the session to attach to.
* @param {promise.ControlFlow=} opt_flow The control flow all
* driver commands should execute under. Defaults to the
* {@link promise.controlFlow() currently active} control flow.
* @return {!WebDriver} A new client for the specified session.
*/
static attachToSession(executor, sessionId, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.DESCRIBE_SESSION)
.setParameter('sessionId', sessionId);
let session = flow.execute(
() => executeCommand(executor, cmd).catch(err => {
// The DESCRIBE_SESSION command is not supported by the W3C spec, so
// if we get back an unknown command, just return a session with
// unknown capabilities.
if (err instanceof error.UnknownCommandError) {
return new Session(sessionId, new Capabilities);
}
throw err;
}),
'WebDriver.attachToSession()');
return new WebDriver(session, executor, flow);
}
/**
* Creates a new WebDriver session.
*
* By default, the requested session `capabilities` are merely "desired" and
* the remote end will still create a new session even if it cannot satisfy
* all of the requested capabilities. You can query which capabilities a
* session actually has using the
* {@linkplain #getCapabilities() getCapabilities()} method on the returned
* WebDriver instance.
*
* To define _required capabilities_, provide the `capabilities` as an object
* literal with `required` and `desired` keys. The `desired` key may be
* omitted if all capabilities are required, and vice versa. If the server
* cannot create a session with all of the required capabilities, it will
* return an {@linkplain error.SessionNotCreatedError}.
*
* let required = new Capabilities().set('browserName', 'firefox');
* let desired = new Capabilities().set('version', '45');
* let driver = WebDriver.createSession(executor, {required, desired});
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* It is recommended that this promise is left _unhandled_ so it will
* propagate through the {@linkplain promise.ControlFlow control flow} and
* cause subsequent commands to fail.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {(!Capabilities|
* {desired: (Capabilities|undefined),
* required: (Capabilities|undefined)})} capabilities The desired
* capabilities for the new session.
* @param {promise.ControlFlow=} opt_flow The control flow all driver
* commands should execute under, including the initial session creation.
* Defaults to the {@link promise.controlFlow() currently active}
* control flow.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(executor, capabilities, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.NEW_SESSION);
if (capabilities && (capabilities.desired || capabilities.required)) {
cmd.setParameter('desiredCapabilities', capabilities.desired);
cmd.setParameter('requiredCapabilities', capabilities.required);
} else {
cmd.setParameter('desiredCapabilities', capabilities);
}
let session = flow.execute(
() => executeCommand(executor, cmd),
'WebDriver.createSession()');
return new WebDriver(session, executor, flow);
}
/**
* @return {!promise.ControlFlow} The control flow used by this
* instance.
*/
controlFlow() {
return this.flow_;
}
/**
* Schedules a {@link command.Command} to be executed by this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
*/
schedule(command, description) {
var self = this;
checkHasNotQuit();
command.setParameter('sessionId', this.session_);
// If any of the command parameters are rejected promises, those
// rejections may be reported as unhandled before the control flow
// attempts to execute the command. To ensure parameters errors
// propagate through the command itself, we resolve all of the
// command parameters now, but suppress any errors until the ControlFlow
// actually executes the command. This addresses scenarios like catching
// an element not found error in:
//
// driver.findElement(By.id('foo')).click().catch(function(e) {
// if (e instanceof NoSuchElementError) {
// // Do something.
// }
// });
var prepCommand = toWireValue(command.getParameters());
prepCommand.catch(function() {});
var flow = this.flow_;
var executor = this.executor_;
return flow.execute(function() {
// A call to WebDriver.quit() may have been scheduled in the same event
// loop as this |command|, which would prevent us from detecting that the
// driver has quit above. Therefore, we need to make another quick check.
// We still check above so we can fail as early as possible.
checkHasNotQuit();
// Retrieve resolved command parameters; any previously suppressed errors
// will now propagate up through the control flow as part of the command
// execution.
return prepCommand.then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
}).then(value => fromWireValue(self, value));
}, description);
function checkHasNotQuit() {
if (!self.session_) {
throw new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be ' +
'used.');
}
}
}
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or {@code null}.
*/
setFileDetector(detector) {
this.fileDetector_ = detector;
}
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() {
return this.executor_;
}
/**
* @return {!promise.Promise<!Session>} A promise for this client's
* session.
*/
getSession() {
return this.session_;
}
/**
* @return {!promise.Promise<!Capabilities>} A promise
* that will resolve with the this instance's capabilities.
*/
getCapabilities() {
return this.session_.then(session => session.getCapabilities());
}
/**
* Schedules a command to quit the current session. After calling quit, this
* instance will be invalidated and may no longer be used to issue commands
* against the browser.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
quit() {
var result = this.schedule(
new command.Command(command.Name.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attemnpting to use a driver post-quit.
return result.finally(() => delete this.session_);
}
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.ActionSequence#perform} is
* called. Example:
*
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
*
* @return {!actions.ActionSequence} A new action sequence for this instance.
*/
actions() {
return new actions.ActionSequence(this);
}
/**
* Creates a new touch sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.TouchSequence#perform} is
* called. Example:
*
* driver.touchActions().
* tap(element1).
* doubleTap(element2).
* perform();
*
* @return {!actions.TouchSequence} A new touch sequence for this instance.
*/
touchActions() {
return new actions.TouchSequence(this);
}
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@linkplain WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args =
arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : [];
return this.schedule(
new command.Command(command.Name.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are finished
* by invoking the provided callback. This callback will always be injected
* into the executed function as the last argument, and thus may be referenced
* with {@code arguments[arguments.length - 1]}. The following steps will be
* taken for resolving this functions return value against the first argument
* to the script's callback function:
*
* - For a HTML element, the value will resolve to a
* {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('[email protected]');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeAsyncScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args = Array.prototype.slice.call(arguments, 1);
return this.schedule(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute a custom function.
* @param {function(...): (T|promise.Promise<T>)} fn The function to
* execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!promise.Promise<T>} A promise that will be resolved'
* with the function's result.
* @template T
*/
call(fn, opt_scope, var_args) {
let args = Array.prototype.slice.call(arguments, 2);
let flow = this.flow_;
return flow.execute(function() {
return promise.fullyResolved(args).then(function(args) {
if (promise.isGenerator(fn)) {
args.unshift(fn, opt_scope);
return promise.consume.apply(null, args);
}
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
}
/**
* Schedules a command to wait for a condition to hold. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@link promise.Promise promise}, the polling
* loop will wait for it to be resolved and use the resolved value for whether
* the condition has been satisified. Note the resolution time for a promise
* is factored into whether a wait has timed out.
*
* Note, if the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisified the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* var button = driver.wait(until.elementLocated(By.id('foo')), 10000);
* button.click();
*
* This function may also be used to block the command flow on the resolution
* of any thenable promise object. When given a promise, the command will
* simply wait for its resolution before completing. A timeout may be provided
* to fail the command if the promise does not resolve before the timeout
* expires.
*
* _Example:_ Suppose you have a function, `startTestServer`, that returns a
* promise for when a server is ready for requests. You can block a WebDriver
* client on this promise with:
*
* var started = startTestServer();
* driver.wait(started, 5 * 1000, 'Server should start within 5 seconds');
* driver.get(getServerUrl());
*
* @param {!(promise.Promise<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} opt_timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!(promise.Promise<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @template T
*/
wait(condition, opt_timeout, opt_message) {
if (promise.isPromise(condition)) {
return this.flow_.wait(
/** @type {!promise.Promise} */(condition),
opt_timeout, opt_message);
}
var message = opt_message;
var fn = /** @type {!Function} */(condition);
if (condition instanceof Condition) {
message = message || condition.description();
fn = condition.fn;
}
var driver = this;
var result = this.flow_.wait(function() {
if (promise.isGenerator(fn)) {
return promise.consume(fn, null, [driver]);
}
return fn(driver);
}, opt_timeout, message);
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(this, result.then(function(value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: '
+ Object.prototype.toString.call(value));
}
return value;
}));
}
return result;
}
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the sleep has finished.
*/
sleep(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
}
/**
* Schedules a command to retrieve the current window handle.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current window handle.
*/
getWindowHandle() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
}
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!promise.Promise.<!Array<string>>} A promise that will
* be resolved with an array of window handles.
*/
getAllWindowHandles() {
return this.schedule(
new command.Command(command.Name.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
}
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page source.
*/
getPageSource() {
return this.schedule(
new command.Command(command.Name.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
}
/**
* Schedules a command to close the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
close() {
return this.schedule(new command.Command(command.Name.CLOSE),
'WebDriver.close()');
}
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the document has finished loading.
*/
get(url) {
return this.navigate().to(url);
}
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current URL.
*/
getCurrentUrl() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
}
/**
* Schedules a command to retrieve the current page's title.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page's title.
*/
getTitle() {
return this.schedule(new command.Command(command.Name.GET_TITLE),
'WebDriver.getTitle()');
}
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #isElementPresent} instead.
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
let id;
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')');
}
return new WebElementPromise(this, id);
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search
* context.
* @return {!promise.Promise.<!WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
findElementInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (Array.isArray(result)) {
result = result[0];
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
}
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!promise.Promise.<!Array.<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')');
return res.catch(function(e) {
if (e instanceof error.NoSuchElementError) {
return [];
}
throw e;
});
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!promise.Promise<!Array<!WebElement>>} A promise that
* will resolve to an array of WebElements.
* @private
*/
findElementsInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (result instanceof WebElement) {
return [result];
}
if (!Array.isArray(result)) {
return [];
}
return result.filter(function(item) {
return item instanceof WebElement;
});
});
}
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {
return this.schedule(new command.Command(command.Name.SCREENSHOT),
'WebDriver.takeScreenshot()');
}
/**
* @return {!Options} The options interface for this instance.
*/
manage() {
return new Options(this);
}
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() {
return new Navigation(this);
}
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() {
return new TargetLocator(this);
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the URL has been loaded.
*/
to(url) {
return this.driver_.schedule(
new command.Command(command.Name.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
}
/**
* Schedules a command to move backwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
back() {
return this.driver_.schedule(
new command.Command(command.Name.GO_BACK),
'WebDriver.navigate().back()');
}
/**
* Schedules a command to move forwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
forward() {
return this.driver_.schedule(
new command.Command(command.Name.GO_FORWARD),
'WebDriver.navigate().forward()');
}
/**
* Schedules a command to refresh the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
refresh() {
return this.driver_.schedule(
new command.Command(command.Name.REFRESH),
'WebDriver.navigate().refresh()');
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to add a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.options().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.options().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.options().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie(spec) {
if (!spec || typeof spec !== 'object') {
throw TypeError('addCookie called with non-cookie parameter');
}
// We do not allow '=' or ';' in the name.
let name = spec.name;
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError(
'Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
let value = spec.value;
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"');
}
let cookieString = name + '=' + value +
(spec.domain ? ';domain=' + spec.domain : '') +
(spec.path ? ';path=' + spec.path : '') +
(spec.secure ? ';secure' : '');
let expiry;
if (typeof spec.expiry === 'number') {
expiry = Math.floor(spec.expiry);
cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString();
} else if (spec.expiry instanceof Date) {
let date = /** @type {!Date} */(spec.expiry);
expiry = Math.floor(date.getTime() / 1000);
cookieString += ';expires=' + date.toUTCString();
}
return this.driver_.schedule(
new command.Command(command.Name.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': spec.path,
'domain': spec.domain,
'secure': !!spec.secure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
}
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
}
/**
* Schedules a command to delete the cookie with the given name. This command
* is a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
}
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!promise.Promise<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.schedule(
new command.Command(command.Name.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
}
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!promise.Promise<?Options.Cookie>} A promise that will be resolved
* with the named cookie, or `null` if there is no such cookie.
*/
getCookie(name) {
return this.getCookies().then(function(cookies) {
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie;
}
}
return null;
});
}
/**
* @return {!Logs} The interface for managing driver
* logs.
*/
logs() {
return new Logs(this.driver_);
}
/**
* @return {!Timeouts} The interface for managing driver timeouts.
*/
timeouts() {
return new Timeouts(this.driver_);
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_);
}
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function() {};
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name;
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value;
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path;
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain;
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure;
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly;
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to
* 20 years in the future if omitted.
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry;
/**
* An interface for managing timeout behavior for WebDriver instances.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().timeouts()
*
* @see WebDriver#manage()
* @see Options#timeouts()
*/
class Timeouts {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
*
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
*
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
*
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the implicit wait timeout has been set.
*/
implicitlyWait(ms) {
return this._scheduleCommand(ms, 'implicit', 'implicitlyWait');
}
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous
* script to finish execution before returning an error. If the timeout is
* less than or equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the script timeout has been set.
*/
setScriptTimeout(ms) {
return this._scheduleCommand(ms, 'script', 'setScriptTimeout');
}
/**
* Sets the amount of time to wait for a page load to complete before
* returning an error. If the timeout is negative, page loads may be
* indefinite.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the timeout has been set.
*/
pageLoadTimeout(ms) {
return this._scheduleCommand(ms, 'page load', 'pageLoadTimeout');
}
_scheduleCommand(ms, timeoutIdentifier, timeoutName) {
return this.driver_.schedule(
new command.Command(command.Name.SET_TIMEOUT).
setParameter('type', timeoutIdentifier).
setParameter('ms', ms),
`WebDriver.manage().timeouts().${timeoutName}(${ms})`);
}
}
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!promise.Promise.<{x: number, y: number}>} A promise
* that will be resolved with the window's position in the form of a
* {x:number, y:number} object literal.
*/
getPosition() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
}
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left
* side of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setPosition(x, y) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
}
/**
* Retrieves the window's current size.
* @return {!promise.Promise<{width: number, height: number}>} A
* promise that will be resolved with the window's size in the form of a
* {width:number, height:number} object literal.
*/
getSize() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
}
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setSize(width, height) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
}
/**
* Maximizes the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
maximize() {
return this.driver_.schedule(
new command.Command(command.Name.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!promise.Promise.<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).
setParameter('type', type);
return this.driver_.schedule(
cmd, 'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return entries.map(function(entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'], entry['message'], entry['timestamp'],
entry['type']);
}
return entry;
});
});
}
/**
* Retrieves the log types available to this driver.
* @return {!promise.Promise<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.schedule(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.schedule(
new command.Command(command.Name.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to switch focus of all future commands to the topmost
* frame on the page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
}
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page. The target frame may be specified as one of the
* following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', id),
'WebDriver.switchTo().frame(' + id + ')');
}
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
setParameter('name', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
/**
* Schedules a command to change focus to the active modal dialog, such as
* those opened by `window.alert()`, `window.confirm()`, and
* `window.prompt()`. The returned promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.schedule(
new command.Command(command.Name.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
var driver = this.driver_;
return new AlertPromise(driver, text.then(function(text) {
return new Alert(driver, text);
}));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT';
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf';
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.id_ = promise.fulfilled(id);
}
/**
* @param {string} id The raw ID.
* @param {boolean=} opt_noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, opt_noLegacy) {
return opt_noLegacy
? {[ELEMENT_ID_KEY]: id}
: {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id};
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY];
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY];
}
}
throw new TypeError('object is not a WebElement ID');
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return obj && typeof obj === 'object'
&& (typeof obj[ELEMENT_ID_KEY] === 'string'
|| typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string');
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static equals(a, b) {
if (a === b) {
return promise.fulfilled(true);
}
let ids = [a.getId(), b.getId()];
return promise.all(ids).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0] === ids[1]) {
return true;
}
let cmd = new command.Command(command.Name.ELEMENT_EQUALS);
cmd.setParameter('id', ids[0]);
cmd.setParameter('other', ids[1]);
return a.driver_.schedule(cmd, 'WebElement.equals()');
});
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_;
}
/**
* @return {!promise.Promise<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_;
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId);
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
* @see WebDriver#schedule
* @private
*/
schedule_(command, description) {
command.setParameter('id', this.getId());
return this.driver_.schedule(command, description);
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')');
}
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!promise.Promise<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this);
} else {
var cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')');
}
}
/**
* Schedules a command to click on this element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the click command has completed.
*/
click() {
return this.schedule_(
new command.Command(command.Name.CLICK_ELEMENT),
'WebElement.click()');
}
/**
* Schedules a command to type a sequence on the DOM element represented by
* this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the keysequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the keysequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analgous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctionation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} var_args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all keys have been typed.
*/
sendKeys(var_args) {
let keys = Promise.all(Array.prototype.slice.call(arguments, 0)).
then(keys => {
let ret = [];
keys.forEach(key => {
let type = typeof key;
if (type === 'number') {
key = String(key);
} else if (type !== 'string') {
throw TypeError(
'each key must be a number of string; got ' + type);
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
ret.push.apply(ret, key.split(''));
});
return ret;
});
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys),
'WebElement.sendKeys()');
}
// Suppress unhandled rejection errors until the flow executes the command.
keys.catch(function() {});
var element = this;
return this.driver_.flow_.execute(function() {
return keys.then(function(keys) {
return element.driver_.fileDetector_
.handleFile(element.driver_, keys.join(''));
}).then(function(keys) {
return element.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys.split('')),
'WebElement.sendKeys()');
});
}, 'WebElement.sendKeys()');
}
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's tag name.
*/
getTagName() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
}
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead
* of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new command.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
}
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!promise.Promise<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TEXT),
'WebElement.getText()');
}
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!promise.Promise.<{width: number, height: number}>} A
* promise that will be resolved with the element's size as a
* {@code {width:number, height:number}} object.
*/
getSize() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_SIZE),
'WebElement.getSize()');
}
/**
* Schedules a command to compute the location of this element in page space.
* @return {!promise.Promise.<{x: number, y: number}>} A promise that
* will be resolved to the element's location as a
* {@code {x:number, y:number}} object.
*/
getLocation() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
}
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dicted by the {@code disabled} attribute.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
}
/**
* Schedules a command to query whether this element is selected.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
}
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.schedule_(
new command.Command(command.Name.SUBMIT_ELEMENT),
'WebElement.submit()');
}
/**
* Schedules a command to clear the `value` of this element. This command has
* no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.schedule_(
new command.Command(command.Name.CLEAR_ELEMENT),
'WebElement.clear()');
}
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @param {boolean=} opt_scroll Optional argument that indicates whether the
* element should be scrolled into view before taking a screenshot.
* Defaults to false.
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot(opt_scroll) {
var scroll = !!opt_scroll;
return this.schedule_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
.setParameter('scroll', scroll),
'WebElement.takeScreenshot(' + scroll + ')');
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {promise.Thenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!promise.Promise<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused');
/** @override */
this.cancel = el.cancel.bind(el);
/** @override */
this.isPending = el.isPending.bind(el);
/** @override */
this.then = el.then.bind(el);
/** @override */
this.catch = el.catch.bind(el);
/** @override */
this.finally = el.finally.bind(el);
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function() {
return el.then(function(el) {
return el.getId();
});
};
}
}
promise.Thenable.addImplementation(WebElementPromise);
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.text_ = promise.fulfilled(text);
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_;
}
/**
* Sets the username and password in an alert prompting for credentials (such
* as a Basic HTTP Auth prompt). This method will implicitly
* {@linkplain #accept() submit} the dialog.
*
* @param {string} username The username to send.
* @param {string} password The password to send.
* @return {!promise.Promise<void>} A promise that will be resolved when this
* command has completed.
*/
authenticateAs(username, password) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_CREDENTIALS),
'WebDriver.switchTo().alert()'
+ `.authenticateAs("${username}", "${password}")`);
}
/**
* Accepts this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.schedule(
new command.Command(command.Name.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
}
/**
* Dismisses this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.schedule(
new command.Command(command.Name.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {promise.Thenable.<!webdriver.Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!promise.Thenable<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused');
/** @override */
this.cancel = alert.cancel.bind(alert);
/** @override */
this.isPending = alert.isPending.bind(alert);
/** @override */
this.then = alert.then.bind(alert);
/** @override */
this.catch = alert.catch.bind(alert);
/** @override */
this.finally = alert.finally.bind(alert);
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function() {
return alert.then(function(alert) {
return alert.getText();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.authenticateAs = function(username, password) {
return alert.then(function(alert) {
return alert.authenticateAs(username, password);
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function() {
return alert.then(function(alert) {
return alert.accept();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function() {
return alert.then(function(alert) {
return alert.dismiss();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function(text) {
return alert.then(function(alert) {
return alert.sendKeys(text);
});
};
}
}
promise.Thenable.addImplementation(AlertPromise);
// PUBLIC API
module.exports = {
Alert: Alert,
AlertPromise: AlertPromise,
Condition: Condition,
Logs: Logs,
Navigation: Navigation,
Options: Options,
TargetLocator: TargetLocator,
Timeouts: Timeouts,
WebDriver: WebDriver,
WebElement: WebElement,
WebElementCondition: WebElementCondition,
WebElementPromise: WebElementPromise,
Window: Window
};
| 1 | 13,590 | I'd rather just send the parameter twice than break encapsulation here. There's already precedence with webelement IDs | SeleniumHQ-selenium | py |
@@ -0,0 +1,5 @@
+_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py'
+model = dict(
+ pretrained= # NOQA
+ 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net101_v1b_26w_4s_mmdetv2-f0a600f9.pth', # NOQA
+ backbone=dict(type='Res2Net', depth=101, scale=4, base_width=26)) | 1 | 1 | 18,747 | You may put 'open-mmlab://res2net101_v1d_26w_4s' here. MMCV will be updated later. | open-mmlab-mmdetection | py |
|
@@ -17,3 +17,12 @@
*/
export const STORE_NAME = 'modules/pagespeed-insights';
+
+// Form ID for PageSpeed widget.
+export const FORM_DASH_WIDGET = 'pagespeedWidget';
+// Report strategies.
+export const STRATEGY_MOBILE = 'mobile';
+export const STRATEGY_DESKTOP = 'desktop';
+// Report data sources.
+export const DATA_SRC_LAB = 'data_lab';
+export const DATA_SRC_FIELD = 'data_field'; | 1 | /**
* PageSpeed Insights Constants.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export const STORE_NAME = 'modules/pagespeed-insights';
| 1 | 29,336 | May be worth making this more specific, to avoid potential conflicts, maybe `pagespeedDashboardWidget`? Or `webVitalsDashboardWidget`? | google-site-kit-wp | js |
@@ -151,7 +151,7 @@ module.exports = class AwsS3Multipart extends Plugin {
const onError = (err) => {
this.uppy.log(err)
this.uppy.emit('upload-error', file, err)
- err.message = `Failed because: ${err.message}`
+ err.message = `${err.message}`
queuedRequest.done()
this.resetUploaderReferences(file.id) | 1 | const { Plugin } = require('@uppy/core')
const { Socket, Provider, RequestClient } = require('@uppy/companion-client')
const EventTracker = require('@uppy/utils/lib/EventTracker')
const emitSocketProgress = require('@uppy/utils/lib/emitSocketProgress')
const getSocketHost = require('@uppy/utils/lib/getSocketHost')
const RateLimitedQueue = require('@uppy/utils/lib/RateLimitedQueue')
const Uploader = require('./MultipartUploader')
function assertServerError (res) {
if (res && res.error) {
const error = new Error(res.message)
Object.assign(error, res.error)
throw error
}
return res
}
module.exports = class AwsS3Multipart extends Plugin {
static VERSION = require('../package.json').version
constructor (uppy, opts) {
super(uppy, opts)
this.type = 'uploader'
this.id = this.opts.id || 'AwsS3Multipart'
this.title = 'AWS S3 Multipart'
this.client = new RequestClient(uppy, opts)
const defaultOptions = {
timeout: 30 * 1000,
limit: 0,
createMultipartUpload: this.createMultipartUpload.bind(this),
listParts: this.listParts.bind(this),
prepareUploadPart: this.prepareUploadPart.bind(this),
abortMultipartUpload: this.abortMultipartUpload.bind(this),
completeMultipartUpload: this.completeMultipartUpload.bind(this)
}
this.opts = { ...defaultOptions, ...opts }
this.upload = this.upload.bind(this)
this.requests = new RateLimitedQueue(this.opts.limit)
this.uploaders = Object.create(null)
this.uploaderEvents = Object.create(null)
this.uploaderSockets = Object.create(null)
}
/**
* Clean up all references for a file's upload: the MultipartUploader instance,
* any events related to the file, and the Companion WebSocket connection.
*
* Set `opts.abort` to tell S3 that the multipart upload is cancelled and must be removed.
* This should be done when the user cancels the upload, not when the upload is completed or errored.
*/
resetUploaderReferences (fileID, opts = {}) {
if (this.uploaders[fileID]) {
this.uploaders[fileID].abort({ really: opts.abort || false })
this.uploaders[fileID] = null
}
if (this.uploaderEvents[fileID]) {
this.uploaderEvents[fileID].remove()
this.uploaderEvents[fileID] = null
}
if (this.uploaderSockets[fileID]) {
this.uploaderSockets[fileID].close()
this.uploaderSockets[fileID] = null
}
}
assertHost () {
if (!this.opts.companionUrl) {
throw new Error('Expected a `companionUrl` option containing a Companion address.')
}
}
createMultipartUpload (file) {
this.assertHost()
const metadata = {}
Object.keys(file.meta).map(key => {
if (file.meta[key] != null) {
metadata[key] = file.meta[key].toString()
}
})
return this.client.post('s3/multipart', {
filename: file.name,
type: file.type,
metadata
}).then(assertServerError)
}
listParts (file, { key, uploadId }) {
this.assertHost()
const filename = encodeURIComponent(key)
return this.client.get(`s3/multipart/${uploadId}?key=${filename}`)
.then(assertServerError)
}
prepareUploadPart (file, { key, uploadId, number }) {
this.assertHost()
const filename = encodeURIComponent(key)
return this.client.get(`s3/multipart/${uploadId}/${number}?key=${filename}`)
.then(assertServerError)
}
completeMultipartUpload (file, { key, uploadId, parts }) {
this.assertHost()
const filename = encodeURIComponent(key)
const uploadIdEnc = encodeURIComponent(uploadId)
return this.client.post(`s3/multipart/${uploadIdEnc}/complete?key=${filename}`, { parts })
.then(assertServerError)
}
abortMultipartUpload (file, { key, uploadId }) {
this.assertHost()
const filename = encodeURIComponent(key)
const uploadIdEnc = encodeURIComponent(uploadId)
return this.client.delete(`s3/multipart/${uploadIdEnc}?key=${filename}`)
.then(assertServerError)
}
uploadFile (file) {
return new Promise((resolve, reject) => {
const onStart = (data) => {
const cFile = this.uppy.getFile(file.id)
this.uppy.setFileState(file.id, {
s3Multipart: {
...cFile.s3Multipart,
key: data.key,
uploadId: data.uploadId,
parts: []
}
})
}
const onProgress = (bytesUploaded, bytesTotal) => {
this.uppy.emit('upload-progress', file, {
uploader: this,
bytesUploaded: bytesUploaded,
bytesTotal: bytesTotal
})
}
const onError = (err) => {
this.uppy.log(err)
this.uppy.emit('upload-error', file, err)
err.message = `Failed because: ${err.message}`
queuedRequest.done()
this.resetUploaderReferences(file.id)
reject(err)
}
const onSuccess = (result) => {
const uploadResp = {
uploadURL: result.location
}
queuedRequest.done()
this.resetUploaderReferences(file.id)
this.uppy.emit('upload-success', file, uploadResp)
if (result.location) {
this.uppy.log('Download ' + upload.file.name + ' from ' + result.location)
}
resolve(upload)
}
const onPartComplete = (part) => {
// Store completed parts in state.
const cFile = this.uppy.getFile(file.id)
if (!cFile) {
return
}
this.uppy.setFileState(file.id, {
s3Multipart: {
...cFile.s3Multipart,
parts: [
...cFile.s3Multipart.parts,
part
]
}
})
this.uppy.emit('s3-multipart:part-uploaded', cFile, part)
}
const upload = new Uploader(file.data, {
// .bind to pass the file object to each handler.
createMultipartUpload: this.opts.createMultipartUpload.bind(this, file),
listParts: this.opts.listParts.bind(this, file),
prepareUploadPart: this.opts.prepareUploadPart.bind(this, file),
completeMultipartUpload: this.opts.completeMultipartUpload.bind(this, file),
abortMultipartUpload: this.opts.abortMultipartUpload.bind(this, file),
onStart,
onProgress,
onError,
onSuccess,
onPartComplete,
limit: this.opts.limit || 5,
...file.s3Multipart
})
this.uploaders[file.id] = upload
this.uploaderEvents[file.id] = new EventTracker(this.uppy)
let queuedRequest = this.requests.run(() => {
if (!file.isPaused) {
upload.start()
}
// Don't do anything here, the caller will take care of cancelling the upload itself
// using resetUploaderReferences(). This is because resetUploaderReferences() has to be
// called when this request is still in the queue, and has not been started yet, too. At
// that point this cancellation function is not going to be called.
return () => {}
})
this.onFileRemove(file.id, (removed) => {
queuedRequest.abort()
this.resetUploaderReferences(file.id, { abort: true })
resolve(`upload ${removed.id} was removed`)
})
this.onCancelAll(file.id, () => {
queuedRequest.abort()
this.resetUploaderReferences(file.id, { abort: true })
resolve(`upload ${file.id} was canceled`)
})
this.onFilePause(file.id, (isPaused) => {
if (isPaused) {
// Remove this file from the queue so another file can start in its place.
queuedRequest.abort()
upload.pause()
} else {
// Resuming an upload should be queued, else you could pause and then resume a queued upload to make it skip the queue.
queuedRequest.abort()
queuedRequest = this.requests.run(() => {
upload.start()
return () => {}
})
}
})
this.onPauseAll(file.id, () => {
queuedRequest.abort()
upload.pause()
})
this.onResumeAll(file.id, () => {
queuedRequest.abort()
if (file.error) {
upload.abort()
}
queuedRequest = this.requests.run(() => {
upload.start()
return () => {}
})
})
if (!file.isRestored) {
this.uppy.emit('upload-started', file, upload)
}
})
}
uploadRemote (file) {
this.resetUploaderReferences(file.id)
this.uppy.emit('upload-started', file)
if (file.serverToken) {
return this.connectToServerSocket(file)
}
return new Promise((resolve, reject) => {
const Client = file.remote.providerOptions.provider ? Provider : RequestClient
const client = new Client(this.uppy, file.remote.providerOptions)
client.post(
file.remote.url,
{
...file.remote.body,
protocol: 's3-multipart',
size: file.data.size,
metadata: file.meta
}
).then((res) => {
this.uppy.setFileState(file.id, { serverToken: res.token })
file = this.uppy.getFile(file.id)
return file
}).then((file) => {
return this.connectToServerSocket(file)
}).then(() => {
resolve()
}).catch((err) => {
reject(new Error(err))
})
})
}
connectToServerSocket (file) {
return new Promise((resolve, reject) => {
const token = file.serverToken
const host = getSocketHost(file.remote.companionUrl)
const socket = new Socket({ target: `${host}/api/${token}`, autoOpen: false })
this.uploaderSockets[file.id] = socket
this.uploaderEvents[file.id] = new EventTracker(this.uppy)
this.onFileRemove(file.id, (removed) => {
queuedRequest.abort()
socket.send('pause', {})
this.resetUploaderReferences(file.id, { abort: true })
resolve(`upload ${file.id} was removed`)
})
this.onFilePause(file.id, (isPaused) => {
if (isPaused) {
// Remove this file from the queue so another file can start in its place.
queuedRequest.abort()
socket.send('pause', {})
} else {
// Resuming an upload should be queued, else you could pause and then resume a queued upload to make it skip the queue.
queuedRequest.abort()
queuedRequest = this.requests.run(() => {
socket.send('resume', {})
return () => {}
})
}
})
this.onPauseAll(file.id, () => {
queuedRequest.abort()
socket.send('pause', {})
})
this.onCancelAll(file.id, () => {
queuedRequest.abort()
socket.send('pause', {})
this.resetUploaderReferences(file.id)
resolve(`upload ${file.id} was canceled`)
})
this.onResumeAll(file.id, () => {
queuedRequest.abort()
if (file.error) {
socket.send('pause', {})
}
queuedRequest = this.requests.run(() => {
socket.send('resume', {})
})
})
this.onRetry(file.id, () => {
// Only do the retry if the upload is actually in progress;
// else we could try to send these messages when the upload is still queued.
// We may need a better check for this since the socket may also be closed
// for other reasons, like network failures.
if (socket.isOpen) {
socket.send('pause', {})
socket.send('resume', {})
}
})
this.onRetryAll(file.id, () => {
if (socket.isOpen) {
socket.send('pause', {})
socket.send('resume', {})
}
})
socket.on('progress', (progressData) => emitSocketProgress(this, progressData, file))
socket.on('error', (errData) => {
this.uppy.emit('upload-error', file, new Error(errData.error))
this.resetUploaderReferences(file.id)
queuedRequest.done()
reject(new Error(errData.error))
})
socket.on('success', (data) => {
const uploadResp = {
uploadURL: data.url
}
this.uppy.emit('upload-success', file, uploadResp)
this.resetUploaderReferences(file.id)
queuedRequest.done()
resolve()
})
let queuedRequest = this.requests.run(() => {
socket.open()
if (file.isPaused) {
socket.send('pause', {})
}
return () => {}
})
})
}
upload (fileIDs) {
if (fileIDs.length === 0) return Promise.resolve()
const promises = fileIDs.map((id) => {
const file = this.uppy.getFile(id)
if (file.isRemote) {
return this.uploadRemote(file)
}
return this.uploadFile(file)
})
return Promise.all(promises)
}
onFileRemove (fileID, cb) {
this.uploaderEvents[fileID].on('file-removed', (file) => {
if (fileID === file.id) cb(file.id)
})
}
onFilePause (fileID, cb) {
this.uploaderEvents[fileID].on('upload-pause', (targetFileID, isPaused) => {
if (fileID === targetFileID) {
// const isPaused = this.uppy.pauseResume(fileID)
cb(isPaused)
}
})
}
onRetry (fileID, cb) {
this.uploaderEvents[fileID].on('upload-retry', (targetFileID) => {
if (fileID === targetFileID) {
cb()
}
})
}
onRetryAll (fileID, cb) {
this.uploaderEvents[fileID].on('retry-all', (filesToRetry) => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
onPauseAll (fileID, cb) {
this.uploaderEvents[fileID].on('pause-all', () => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
onCancelAll (fileID, cb) {
this.uploaderEvents[fileID].on('cancel-all', () => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
onResumeAll (fileID, cb) {
this.uploaderEvents[fileID].on('resume-all', () => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
install () {
const { capabilities } = this.uppy.getState()
this.uppy.setState({
capabilities: {
...capabilities,
resumableUploads: true
}
})
this.uppy.addUploader(this.upload)
}
uninstall () {
const { capabilities } = this.uppy.getState()
this.uppy.setState({
capabilities: {
...capabilities,
resumableUploads: false
}
})
this.uppy.removeUploader(this.upload)
}
}
| 1 | 12,860 | I guess this assignment is now redundant? | transloadit-uppy | js |
@@ -218,7 +218,9 @@ func (s *Server) handleSingleRPC(ctx context.Context, req *RPCReq) *RPCRes {
return backendRes
}
- backendRes, err = s.backendGroups[group].Forward(ctx, req)
+ // NOTE: We call into the specific backend here to ensure that the RPCRes is synchronized with the blockNum.
+ var blockNum uint64
+ backendRes, blockNum, err = s.backendGroups[group].Forward(ctx, req)
if err != nil {
log.Error(
"error forwarding RPC request", | 1 | package proxyd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/cors"
)
const (
ContextKeyAuth = "authorization"
ContextKeyReqID = "req_id"
ContextKeyXForwardedFor = "x_forwarded_for"
MaxBatchRPCCalls = 100
)
type Server struct {
backendGroups map[string]*BackendGroup
wsBackendGroup *BackendGroup
wsMethodWhitelist *StringSet
rpcMethodMappings map[string]string
maxBodySize int64
authenticatedPaths map[string]string
upgrader *websocket.Upgrader
rpcServer *http.Server
wsServer *http.Server
cache RPCCache
}
func NewServer(
backendGroups map[string]*BackendGroup,
wsBackendGroup *BackendGroup,
wsMethodWhitelist *StringSet,
rpcMethodMappings map[string]string,
maxBodySize int64,
authenticatedPaths map[string]string,
cache RPCCache,
) *Server {
if cache == nil {
cache = &NoopRPCCache{}
}
if maxBodySize == 0 {
maxBodySize = math.MaxInt64
}
return &Server{
backendGroups: backendGroups,
wsBackendGroup: wsBackendGroup,
wsMethodWhitelist: wsMethodWhitelist,
rpcMethodMappings: rpcMethodMappings,
maxBodySize: maxBodySize,
authenticatedPaths: authenticatedPaths,
cache: cache,
upgrader: &websocket.Upgrader{
HandshakeTimeout: 5 * time.Second,
},
}
}
func (s *Server) RPCListenAndServe(host string, port int) error {
hdlr := mux.NewRouter()
hdlr.HandleFunc("/healthz", s.HandleHealthz).Methods("GET")
hdlr.HandleFunc("/", s.HandleRPC).Methods("POST")
hdlr.HandleFunc("/{authorization}", s.HandleRPC).Methods("POST")
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
})
addr := fmt.Sprintf("%s:%d", host, port)
s.rpcServer = &http.Server{
Handler: instrumentedHdlr(c.Handler(hdlr)),
Addr: addr,
}
log.Info("starting HTTP server", "addr", addr)
return s.rpcServer.ListenAndServe()
}
func (s *Server) WSListenAndServe(host string, port int) error {
hdlr := mux.NewRouter()
hdlr.HandleFunc("/", s.HandleWS)
hdlr.HandleFunc("/{authorization}", s.HandleWS)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
})
addr := fmt.Sprintf("%s:%d", host, port)
s.wsServer = &http.Server{
Handler: instrumentedHdlr(c.Handler(hdlr)),
Addr: addr,
}
log.Info("starting WS server", "addr", addr)
return s.wsServer.ListenAndServe()
}
func (s *Server) Shutdown() {
if s.rpcServer != nil {
s.rpcServer.Shutdown(context.Background())
}
if s.wsServer != nil {
s.wsServer.Shutdown(context.Background())
}
}
func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("OK"))
}
func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
ctx := s.populateContext(w, r)
if ctx == nil {
return
}
log.Info(
"received RPC request",
"req_id", GetReqID(ctx),
"auth", GetAuthCtx(ctx),
"user_agent", r.Header.Get("user-agent"),
)
body, err := ioutil.ReadAll(io.LimitReader(r.Body, s.maxBodySize))
if err != nil {
log.Error("error reading request body", "err", err)
writeRPCError(ctx, w, nil, ErrInternal)
return
}
RecordRequestPayloadSize(ctx, len(body))
if IsBatch(body) {
reqs, err := ParseBatchRPCReq(body)
if err != nil {
log.Error("error parsing batch RPC request", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
writeRPCError(ctx, w, nil, ErrParseErr)
return
}
if len(reqs) > MaxBatchRPCCalls {
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests)
writeRPCError(ctx, w, nil, ErrTooManyBatchRequests)
return
}
if len(reqs) == 0 {
writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call"))
return
}
batchRes := make([]*RPCRes, len(reqs), len(reqs))
for i := 0; i < len(reqs); i++ {
req, err := ParseRPCReq(reqs[i])
if err != nil {
log.Info("error parsing RPC call", "source", "rpc", "err", err)
batchRes[i] = NewRPCErrorRes(nil, err)
continue
}
batchRes[i] = s.handleSingleRPC(ctx, req)
}
writeBatchRPCRes(ctx, w, batchRes)
return
}
req, err := ParseRPCReq(body)
if err != nil {
log.Info("error parsing RPC call", "source", "rpc", "err", err)
writeRPCError(ctx, w, nil, err)
return
}
backendRes := s.handleSingleRPC(ctx, req)
writeRPCRes(ctx, w, backendRes)
}
func (s *Server) handleSingleRPC(ctx context.Context, req *RPCReq) *RPCRes {
if err := ValidateRPCReq(req); err != nil {
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return NewRPCErrorRes(nil, err)
}
group := s.rpcMethodMappings[req.Method]
if group == "" {
// use unknown below to prevent DOS vector that fills up memory
// with arbitrary method names.
log.Info(
"blocked request for non-whitelisted method",
"source", "rpc",
"req_id", GetReqID(ctx),
"method", req.Method,
)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted)
return NewRPCErrorRes(req.ID, ErrMethodNotWhitelisted)
}
var backendRes *RPCRes
backendRes, err := s.cache.GetRPC(ctx, req)
if err != nil {
log.Warn(
"cache lookup error",
"req_id", GetReqID(ctx),
"err", err,
)
}
if backendRes != nil {
return backendRes
}
backendRes, err = s.backendGroups[group].Forward(ctx, req)
if err != nil {
log.Error(
"error forwarding RPC request",
"method", req.Method,
"req_id", GetReqID(ctx),
"err", err,
)
return NewRPCErrorRes(req.ID, err)
}
if backendRes.Error == nil {
if err = s.cache.PutRPC(ctx, req, backendRes); err != nil {
log.Warn(
"cache put error",
"req_id", GetReqID(ctx),
"err", err,
)
}
}
return backendRes
}
func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) {
ctx := s.populateContext(w, r)
if ctx == nil {
return
}
log.Info("received WS connection", "req_id", GetReqID(ctx))
clientConn, err := s.upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error("error upgrading client conn", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
return
}
proxier, err := s.wsBackendGroup.ProxyWS(ctx, clientConn, s.wsMethodWhitelist)
if err != nil {
if errors.Is(err, ErrNoBackends) {
RecordUnserviceableRequest(ctx, RPCRequestSourceWS)
}
log.Error("error dialing ws backend", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
clientConn.Close()
return
}
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Inc()
go func() {
// Below call blocks so run it in a goroutine.
if err := proxier.Proxy(ctx); err != nil {
log.Error("error proxying websocket", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
}
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Dec()
}()
log.Info("accepted WS connection", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx))
}
func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context.Context {
vars := mux.Vars(r)
authorization := vars["authorization"]
if s.authenticatedPaths == nil {
// handle the edge case where auth is disabled
// but someone sends in an auth key anyway
if authorization != "" {
log.Info("blocked authenticated request against unauthenticated proxy")
httpResponseCodesTotal.WithLabelValues("404").Inc()
w.WriteHeader(404)
return nil
}
return context.WithValue(
r.Context(),
ContextKeyReqID,
randStr(10),
)
}
if authorization == "" || s.authenticatedPaths[authorization] == "" {
log.Info("blocked unauthorized request", "authorization", authorization)
httpResponseCodesTotal.WithLabelValues("401").Inc()
w.WriteHeader(401)
return nil
}
xff := r.Header.Get("X-Forwarded-For")
if xff == "" {
ipPort := strings.Split(r.RemoteAddr, ":")
if len(ipPort) == 2 {
xff = ipPort[0]
}
}
ctx := context.WithValue(r.Context(), ContextKeyAuth, s.authenticatedPaths[authorization])
ctx = context.WithValue(ctx, ContextKeyXForwardedFor, xff)
return context.WithValue(
ctx,
ContextKeyReqID,
randStr(10),
)
}
func writeRPCError(ctx context.Context, w http.ResponseWriter, id json.RawMessage, err error) {
var res *RPCRes
if r, ok := err.(*RPCErr); ok {
res = NewRPCErrorRes(id, r)
} else {
res = NewRPCErrorRes(id, ErrInternal)
}
writeRPCRes(ctx, w, res)
}
func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
statusCode := 200
if res.IsError() && res.Error.HTTPErrorCode != 0 {
statusCode = res.Error.HTTPErrorCode
}
w.Header().Set("content-type", "application/json")
w.WriteHeader(statusCode)
ww := &recordLenWriter{Writer: w}
enc := json.NewEncoder(ww)
if err := enc.Encode(res); err != nil {
log.Error("error writing rpc response", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return
}
httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc()
RecordResponsePayloadSize(ctx, ww.Len)
}
func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) {
w.Header().Set("content-type", "application/json")
w.WriteHeader(200)
ww := &recordLenWriter{Writer: w}
enc := json.NewEncoder(ww)
if err := enc.Encode(res); err != nil {
log.Error("error writing batch rpc response", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return
}
RecordResponsePayloadSize(ctx, ww.Len)
}
func instrumentedHdlr(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
respTimer := prometheus.NewTimer(httpRequestDurationSumm)
h.ServeHTTP(w, r)
respTimer.ObserveDuration()
}
}
func GetAuthCtx(ctx context.Context) string {
authUser, ok := ctx.Value(ContextKeyAuth).(string)
if !ok {
return "none"
}
return authUser
}
func GetReqID(ctx context.Context) string {
reqId, ok := ctx.Value(ContextKeyReqID).(string)
if !ok {
return ""
}
return reqId
}
func GetXForwardedFor(ctx context.Context) string {
xff, ok := ctx.Value(ContextKeyXForwardedFor).(string)
if !ok {
return ""
}
return xff
}
type recordLenReader struct {
io.Reader
Len int
}
func (r *recordLenReader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
r.Len += n
return
}
type recordLenWriter struct {
io.Writer
Len int
}
func (w *recordLenWriter) Write(p []byte) (n int, err error) {
n, err = w.Writer.Write(p)
w.Len += n
return
}
type NoopRPCCache struct{}
func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) {
return nil, nil
}
func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error {
return nil
}
| 1 | 21,654 | See above note re: blockNum synchronization. | ethereum-optimism-optimism | go |
@@ -10,8 +10,10 @@ import (
"time"
"github.com/andres-erbsen/clock"
+ "github.com/cenkalti/backoff/v3"
observer "github.com/imkira/go-observer"
"github.com/spiffe/spire/pkg/agent/client"
+ spire_backoff "github.com/spiffe/spire/pkg/agent/common/backoff"
"github.com/spiffe/spire/pkg/agent/manager/cache"
"github.com/spiffe/spire/pkg/agent/svid"
"github.com/spiffe/spire/pkg/common/bundleutil" | 1 | package manager
import (
"context"
"crypto/ecdsa"
"crypto/x509"
"errors"
"fmt"
"sync"
"time"
"github.com/andres-erbsen/clock"
observer "github.com/imkira/go-observer"
"github.com/spiffe/spire/pkg/agent/client"
"github.com/spiffe/spire/pkg/agent/manager/cache"
"github.com/spiffe/spire/pkg/agent/svid"
"github.com/spiffe/spire/pkg/common/bundleutil"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/proto/spire/agent/keymanager"
"github.com/spiffe/spire/proto/spire/api/node"
"github.com/spiffe/spire/proto/spire/common"
)
// Cache Manager errors
var (
ErrNotCached = errors.New("not cached")
)
// Manager provides cache management functionalities for agents.
type Manager interface {
// Initialize initializes the manager.
Initialize(ctx context.Context) error
// Run runs the manager. It will block until the context is cancelled.
Run(ctx context.Context) error
// SubscribeToCacheChanges returns a Subscriber on which cache entry updates are sent
// for a particular set of selectors.
SubscribeToCacheChanges(key cache.Selectors) cache.Subscriber
// SubscribeToSVIDChanges returns a new observer.Stream on which svid.State instances are received
// each time an SVID rotation finishes.
SubscribeToSVIDChanges() observer.Stream
// SubscribeToBundleChanges returns a new bundle stream on which
// map[string][]*x509.Certificate instances are received each time the
// bundle changes.
SubscribeToBundleChanges() *cache.BundleStream
// GetRotationMtx returns a mutex that locks in SVIDs rotations
GetRotationMtx() *sync.RWMutex
// GetCurrentCredentials returns the current SVID and key
GetCurrentCredentials() svid.State
// SetRotationFinishedHook sets a hook that will be called when a rotation finished
SetRotationFinishedHook(func())
// MatchingIdentities returns all of the cached identities whose
// registration entry selectors are a subset of the passed selectors.
MatchingIdentities(selectors []*common.Selector) []cache.Identity
// FetchWorkloadUpdates gets the latest workload update for the selectors
FetchWorkloadUpdate(selectors []*common.Selector) *cache.WorkloadUpdate
// FetchJWTSVID returns a JWT SVID for the specified SPIFFEID and audience. If there
// is no JWT cached, the manager will get one signed upstream.
FetchJWTSVID(ctx context.Context, spiffeID string, audience []string) (*client.JWTSVID, error)
}
type manager struct {
c *Config
// Fields protected by mtx mutex.
mtx *sync.RWMutex
cache *cache.Cache
svid svid.Rotator
spiffeID string
svidCachePath string
bundleCachePath string
client client.Client
clk clock.Clock
}
func (m *manager) Initialize(ctx context.Context) error {
m.storeSVID(m.svid.State().SVID)
m.storeBundle(m.cache.Bundle())
err := m.storePrivateKey(ctx, m.c.SVIDKey)
if err != nil {
return fmt.Errorf("fail to store private key: %v", err)
}
return m.synchronize(ctx)
}
func (m *manager) Run(ctx context.Context) error {
defer m.client.Release()
err := util.RunTasks(ctx,
m.runSynchronizer,
m.runSVIDObserver,
m.runBundleObserver,
m.svid.Run)
if err != nil && err != context.Canceled {
m.c.Log.WithError(err).Error("cache manager crashed")
return err
}
m.c.Log.Info("cache manager stopped")
return nil
}
func (m *manager) SubscribeToCacheChanges(selectors cache.Selectors) cache.Subscriber {
return m.cache.SubscribeToWorkloadUpdates(selectors)
}
func (m *manager) SubscribeToSVIDChanges() observer.Stream {
return m.svid.Subscribe()
}
func (m *manager) SubscribeToBundleChanges() *cache.BundleStream {
return m.cache.SubscribeToBundleChanges()
}
func (m *manager) GetRotationMtx() *sync.RWMutex {
return m.svid.GetRotationMtx()
}
func (m *manager) GetCurrentCredentials() svid.State {
return m.svid.State()
}
func (m *manager) SetRotationFinishedHook(f func()) {
m.svid.SetRotationFinishedHook(f)
}
func (m *manager) MatchingIdentities(selectors []*common.Selector) []cache.Identity {
return m.cache.MatchingIdentities(selectors)
}
// FetchWorkloadUpdates gets the latest workload update for the selectors
func (m *manager) FetchWorkloadUpdate(selectors []*common.Selector) *cache.WorkloadUpdate {
return m.cache.FetchWorkloadUpdate(selectors)
}
func (m *manager) FetchJWTSVID(ctx context.Context, spiffeID string, audience []string) (*client.JWTSVID, error) {
now := m.clk.Now()
cachedSVID, ok := m.cache.GetJWTSVID(spiffeID, audience)
if ok && !jwtSVIDExpiresSoon(cachedSVID, now) {
return cachedSVID, nil
}
newSVID, err := m.client.FetchJWTSVID(ctx, &node.JSR{
SpiffeId: spiffeID,
Audience: audience,
})
switch {
case err == nil:
case cachedSVID == nil:
return nil, err
case jwtSVIDExpired(cachedSVID, now):
return nil, fmt.Errorf("unable to renew JWT for %q (err=%v)", spiffeID, err)
default:
m.c.Log.WithError(err).WithField(telemetry.SPIFFEID, spiffeID).Warn("unable to renew JWT; returning cached copy")
return cachedSVID, nil
}
m.cache.SetJWTSVID(spiffeID, audience, newSVID)
return newSVID, nil
}
func (m *manager) runSynchronizer(ctx context.Context) error {
for {
select {
case <-m.clk.After(m.c.SyncInterval):
case <-ctx.Done():
return nil
}
err := m.synchronize(ctx)
if err != nil {
// Just log the error and wait for next synchronization
m.c.Log.WithError(err).Error("synchronize failed")
}
}
}
func (m *manager) runSVIDObserver(ctx context.Context) error {
svidStream := m.SubscribeToSVIDChanges()
for {
select {
case <-ctx.Done():
return nil
case <-svidStream.Changes():
s := svidStream.Next().(svid.State)
err := m.storePrivateKey(ctx, s.Key)
if err != nil {
m.c.Log.WithError(err).Error("failed to store private key")
continue
}
m.storeSVID(s.SVID)
}
}
}
func (m *manager) runBundleObserver(ctx context.Context) error {
bundleStream := m.SubscribeToBundleChanges()
for {
select {
case <-ctx.Done():
return nil
case <-bundleStream.Changes():
b := bundleStream.Next()
m.storeBundle(b[m.c.TrustDomain.String()])
}
}
}
func (m *manager) storeSVID(svidChain []*x509.Certificate) {
err := StoreSVID(m.svidCachePath, svidChain)
if err != nil {
m.c.Log.WithError(err).Warn("could not store SVID")
}
}
func (m *manager) storeBundle(bundle *bundleutil.Bundle) {
var rootCAs []*x509.Certificate
if bundle != nil {
rootCAs = bundle.RootCAs()
}
err := StoreBundle(m.bundleCachePath, rootCAs)
if err != nil {
m.c.Log.WithError(err).Error("could not store bundle")
}
}
func (m *manager) storePrivateKey(ctx context.Context, key *ecdsa.PrivateKey) error {
km := m.c.Catalog.GetKeyManager()
keyBytes, err := x509.MarshalECPrivateKey(key)
if err != nil {
return err
}
if _, err := km.StorePrivateKey(ctx, &keymanager.StorePrivateKeyRequest{PrivateKey: keyBytes}); err != nil {
m.c.Log.WithError(err).Error("could not store new agent key pair")
m.c.Log.Warn("Error encountered while storing new agent key pair. Is your KeyManager plugin is up-to-date?")
// This error is not returned, to preserve backwards-compatibility with KeyManagers that were built against the old interface.
// If the StorePrivateKey() method isn't available on the plugin, we get a "not implemented" error, which we
// should ignore for now, but log an error and warning.
// return fmt.Errorf("store key pair: %v", err)
}
return nil
}
func jwtSVIDExpiresSoon(svid *client.JWTSVID, now time.Time) bool {
if jwtSVIDExpired(svid, now) {
return true
}
// if the SVID has less than half of its lifetime left, consider it
// as expiring soon
if !now.Before(svid.IssuedAt.Add(svid.ExpiresAt.Sub(svid.IssuedAt) / 2)) {
return true
}
return false
}
func jwtSVIDExpired(svid *client.JWTSVID, now time.Time) bool {
return !now.Before(svid.ExpiresAt)
}
| 1 | 12,390 | wonder if we could avoid doing named imports if we type aliased `backoff.Backoff` in the `.../agent/common/backoff` package? | spiffe-spire | go |
@@ -44,7 +44,6 @@
#include "disassemble.h"
#include "instr.h"
#include "instr_create.h"
-
#include "codec.h"
| 1 | /* **********************************************************
* Copyright (c) 2017 Google, Inc. All rights reserved.
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* AArch64 decoder and encoder functions.
* This file is rather large and should perhaps be split up, but there are many
* opportunities for inlining which could be lost if it were split into separate
* translation units, and it is helpful to have the per-operand-type decode/encode
* functions next to each other.
*/
#include "../globals.h"
#include "arch.h"
#include "decode.h"
#include "disassemble.h"
#include "instr.h"
#include "instr_create.h"
#include "codec.h"
/* Decode immediate argument of bitwise operations.
* Returns zero if the encoding is invalid.
*/
static ptr_uint_t
decode_bitmask(uint enc)
{
uint pos = enc >> 6 & 63;
uint len = enc & 63;
ptr_uint_t x;
if (TEST(1U << 12, enc)) {
if (len == 63)
return 0;
x = ((ptr_uint_t)1 << (len + 1)) - 1;
return x >> pos | x << 1 << (63 - pos);
} else {
uint i, t = 32;
while ((t & len) != 0)
t >>= 1;
if (t < 2)
return 0;
x = len & (t - 1);
if (x == t - 1)
return 0;
x = ((ptr_uint_t)1 << (x + 1)) - 1;
pos &= t - 1;
x = x >> pos | x << (t - pos);
for (i = 2; i < 64; i *= 2) {
if (t <= i)
x |= x << i;
}
return x;
}
}
/* Encode immediate argument of bitwise operations.
* Returns -1 if the value cannot be encoded.
*/
static int
encode_bitmask(ptr_uint_t x)
{
int neg, rep, pos, len;
neg = 0;
if ((x & 1) != 0)
neg = 1, x = ~x;
if (x == 0)
return -1;
if (x >> 2 == (x & (((ptr_uint_t)1 << (64 - 2)) - 1)))
rep = 2, x &= ((ptr_uint_t)1 << 2) - 1;
else if (x >> 4 == (x & (((ptr_uint_t)1 << (64 - 4)) - 1)))
rep = 4, x &= ((ptr_uint_t)1 << 4) - 1;
else if (x >> 8 == (x & (((ptr_uint_t)1 << (64 - 8)) - 1)))
rep = 8, x &= ((ptr_uint_t)1 << 8) - 1;
else if (x >> 16 == (x & (((ptr_uint_t)1 << (64 - 16)) - 1)))
rep = 16, x &= ((ptr_uint_t)1 << 16) - 1;
else if (x >> 32 == (x & (((ptr_uint_t)1 << (64 - 32)) - 1)))
rep = 32, x &= ((ptr_uint_t)1 << 32) - 1;
else
rep = 64;
pos = 0;
(x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, pos += 32);
(x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, pos += 16);
(x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, pos += 8);
(x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, pos += 4);
(x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, pos += 2);
(x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, pos += 1);
len = 0;
(~x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, len += 32);
(~x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, len += 16);
(~x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, len += 8);
(~x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, len += 4);
(~x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, len += 2);
(~x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, len += 1);
if (x != 0)
return -1;
if (neg) {
pos = (pos + len) & (rep - 1);
len = rep - len;
}
return (0x1000 & rep << 6) | (((rep - 1) ^ 31) << 1 & 63) |
((rep - pos) & (rep - 1)) << 6 | (len - 1);
}
/* Extract signed integer from subfield of word. */
static inline ptr_int_t
extract_int(uint enc, int pos, int len)
{
uint u = ((enc >> pos & (((uint)1 << (len - 1)) - 1)) -
(enc >> pos & ((uint)1 << (len - 1))));
return u << 1 < u ? -(ptr_int_t)~u - 1 : u;
}
/* Extract unsigned integer from subfield of word. */
static inline ptr_uint_t
extract_uint(uint enc, int pos, int len)
{
return enc >> pos & (((uint)1 << len) - 1);
}
static inline bool
try_encode_int(OUT uint *bits, int len, int scale, ptr_int_t val)
{
/* If any of lowest 'scale' bits are set, or 'val' is out of range, fail. */
if (((ptr_uint_t)val & ((1U << scale) - 1)) != 0 ||
val < -((ptr_int_t)1 << (len + scale - 1)) ||
val >= (ptr_int_t)1 << (len + scale - 1))
return false;
*bits = (ptr_uint_t)val >> scale & ((1U << len) - 1);
return true;
}
static inline bool
try_encode_imm(OUT uint *imm, int bits, opnd_t opnd)
{
ptr_int_t value;
if (!opnd_is_immed_int(opnd))
return false;
value = opnd_get_immed_int(opnd);
if (!(0 <= value && value < (uint)1 << bits))
return false;
*imm = value;
return true;
}
static inline bool
encode_pc_off(OUT uint *poff, int bits, byte *pc, instr_t *instr, opnd_t opnd,
decode_info_t *di)
{
ptr_uint_t off, range;
ASSERT(0 < bits && bits <= 32);
if (opnd.kind == PC_kind)
off = opnd.value.pc - pc;
else if (opnd.kind == INSTR_kind)
off = (byte *)opnd_get_instr(opnd)->note - (byte *)instr->note;
else
return false;
range = (ptr_uint_t)1 << bits;
if (!TEST(~((range - 1) << 2), off + (range << 1))) {
*poff = off >> 2 & (range - 1);
return true;
}
/* If !di->check_reachable we still require correct alignment. */
if (!di->check_reachable && ALIGNED(off, 4)) {
*poff = 0;
return true;
}
return false;
}
static inline opnd_t
decode_sysreg(uint imm15)
{
reg_t sysreg;
switch (imm15) {
case 0x5a10: sysreg = DR_REG_NZCV; break;
case 0x5a20: sysreg = DR_REG_FPCR; break;
case 0x5a21: sysreg = DR_REG_FPSR; break;
case 0x5e82: sysreg = DR_REG_TPIDR_EL0; break;
default:
return opnd_create_immed_uint(imm15, OPSZ_2);
}
return opnd_create_reg(sysreg);
}
static inline bool
encode_sysreg(OUT uint *imm15, opnd_t opnd)
{
if (opnd_is_reg(opnd)) {
switch (opnd_get_reg(opnd)) {
case DR_REG_NZCV: *imm15 = 0x5a10; break;
case DR_REG_FPCR: *imm15 = 0x5a20; break;
case DR_REG_FPSR: *imm15 = 0x5a21; break;
case DR_REG_TPIDR_EL0: *imm15 = 0x5e82; break;
default:
return false;
}
return true;
}
if (opnd_is_immed_int(opnd)) {
uint imm;
if (try_encode_imm(&imm, 15, opnd) && !opnd_is_reg(decode_sysreg(imm))) {
*imm15 = imm;
return true;
}
return false;
}
return false;
}
/* Decode integer register. Input 'n' is number from 0 to 31, where
* 31 can mean stack pointer or zero register, depending on 'is_sp'.
*/
static inline reg_id_t
decode_reg(uint n, bool is_x, bool is_sp)
{
return (n < 31 ? (is_x ? DR_REG_X0 : DR_REG_W0) + n :
is_sp ? (is_x ? DR_REG_XSP : DR_REG_WSP) : (is_x ? DR_REG_XZR : DR_REG_WZR));
}
/* Encode integer register. */
static inline bool
encode_reg(OUT uint *num, OUT bool *is_x, reg_id_t reg, bool is_sp)
{
if (DR_REG_X0 <= reg && reg <= DR_REG_X30) {
*num = reg - DR_REG_X0;
*is_x = true;
return true;
}
if (DR_REG_W0 <= reg && reg <= DR_REG_W30) {
*num = reg - DR_REG_W0;
*is_x = false;
return true;
}
if (is_sp && (reg == DR_REG_XSP || reg == DR_REG_WSP)) {
*num = 31;
*is_x = (reg == DR_REG_XSP);
return true;
}
if (!is_sp && (reg == DR_REG_XZR || reg == DR_REG_WZR)) {
*num = 31;
*is_x = (reg == DR_REG_XZR);
return true;
}
return false;
}
/* Decode SIMD/FP register. */
static inline opnd_t
decode_vreg(uint scale, uint n)
{
reg_id_t reg = DR_REG_NULL;
ASSERT(n < 32 && scale < 5);
switch (scale) {
case 0: reg = DR_REG_B0 + n; break;
case 1: reg = DR_REG_H0 + n; break;
case 2: reg = DR_REG_S0 + n; break;
case 3: reg = DR_REG_D0 + n; break;
case 4: reg = DR_REG_Q0 + n; break;
}
return opnd_create_reg(reg);
}
/* Encode SIMD/FP register. */
static inline bool
encode_vreg(INOUT opnd_size_t *x, OUT uint *r, opnd_t opnd)
{
reg_id_t reg;
opnd_size_t sz;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
if ((uint)(reg - DR_REG_B0) < 32) {
n = reg - DR_REG_B0;
sz = OPSZ_1;
} else if ((uint)(reg - DR_REG_H0) < 32) {
n = reg - DR_REG_H0;
sz = OPSZ_2;
} else if ((uint)(reg - DR_REG_S0) < 32) {
n = reg - DR_REG_S0;
sz = OPSZ_4;
} else if ((uint)(reg - DR_REG_D0) < 32) {
n = reg - DR_REG_D0;
sz = OPSZ_8;
} else if ((uint)(reg - DR_REG_Q0) < 32) {
n = reg - DR_REG_Q0;
sz = OPSZ_16;
} else
return false;
if (*x == OPSZ_NA)
*x = sz;
else if (*x != sz)
return false;
*r = n;
return true;
}
static opnd_t
create_base_imm(uint enc, int disp, int bytes)
{
/* The base register number comes from bits 5 to 9. It may be SP. */
return opnd_create_base_disp(decode_reg(extract_uint(enc, 5, 5), true, true),
DR_REG_NULL, 0, disp, opnd_size_from_bytes(bytes));
}
static bool
is_base_imm(opnd_t opnd, OUT uint *regnum)
{
uint n;
bool is_x;
if (!opnd_is_base_disp(opnd) || opnd_get_index(opnd) != DR_REG_NULL ||
!encode_reg(&n, &is_x, opnd_get_base(opnd), true) || !is_x)
return false;
*regnum = n;
return true;
}
/* Used for mem7* operand types, which have a 7-bit offset and are used by
* load/store (pair) instructions. Returns the scale (log base 2 of number
* of bytes) of the memory argument, a function of bits 26, 30 and 31.
*/
static int
mem7_scale(uint enc)
{
return 2 + (TEST(1U << 26, enc) ?
extract_uint(enc, 30, 2) : extract_uint(enc, 31, 1));
}
/* Used for memlit operand type, used by load (literal). Returns the size
* of the memory operand, a function of bits 26, 30 and 31.
*/
static opnd_size_t
memlit_size(uint enc)
{
opnd_size_t size = OPSZ_0;
switch (extract_uint(enc, 30, 2)) {
case 0: size = OPSZ_4; break;
case 1: size = OPSZ_8; break;
case 2: size = TEST(1U << 26, enc) ? OPSZ_16 : OPSZ_4;
}
return size;
}
/* Returns the number of registers accessed by SIMD load structure and replicate,
* a function of bits 13 and 21.
*/
static int
memvr_regcount(uint enc)
{
return ((enc >> 13 & 1) << 1 | (enc >> 21 & 1)) + 1;
}
/* Used for memvs operand type, used by SIMD load/store single structure.
* Returns the number of bytes read or written, which is a function of
* bits 10, 11, 13, 14, 15 and 21.
*/
static int
memvs_size(uint enc)
{
int scale = extract_uint(enc, 14, 2);
/* Number of elements in structure, 1 to 4. */
int elems = memvr_regcount(enc);
int size = extract_uint(enc, 10, 2);
if (scale == 2 && size == 1)
scale = 3;
return elems * (1 << scale);
}
/* Returns the number of registers accessed by SIMD load/store multiple structures,
* a function of bits 12-15.
*/
static int
multistruct_regcount(uint enc)
{
switch (extract_uint(enc, 12, 4)) {
case 0: return 4;
case 2: return 4;
case 4: return 3;
case 6: return 3;
case 7: return 1;
case 8: return 2;
case 10: return 2;
}
ASSERT(false);
return 0;
}
/*******************************************************************************
* Pairs of functions for decoding and encoding a generalised type of operand.
*/
/* adr_page: used for adr, adrp */
static bool
decode_opnd_adr_page(int scale, uint enc, byte *pc, OUT opnd_t *opnd)
{
uint bits = (enc >> 3 & 0x1ffffc) | (enc >> 29 & 3);
byte *addr = ((byte *)((ptr_uint_t)pc >> scale << scale) +
extract_int(bits, 0, 21) * ((ptr_int_t)1 << scale));
*opnd = opnd_create_rel_addr(addr, OPSZ_0);
return true;
}
static bool
encode_opnd_adr_page(int scale, byte *pc, opnd_t opnd, OUT uint *enc_out,
instr_t *instr, decode_info_t *di)
{
ptr_int_t offset;
uint bits;
if (opnd_is_rel_addr(opnd)) {
offset = (ptr_int_t)opnd_get_addr(opnd) -
(ptr_int_t)((ptr_uint_t)pc >> scale << scale);
} else if (opnd_is_instr(opnd)) {
offset = (ptr_int_t)
((byte *)opnd_get_instr(opnd)->note - (byte *)instr->note);
} else
return false;
if (try_encode_int(&bits, 21, scale, offset)) {
*enc_out = (bits & 3) << 29 | (bits & 0x1ffffc) << 3;
return true;
}
/* If !di->check_reachable we still require correct alignment. */
if (!di->check_reachable && ALIGNED(offset, 1ULL << scale)) {
*enc_out = 0;
return true;
}
return false;
}
/* dq_plus: used for dq0, dq5, dq16, dq0p1, dq0p2, dq0p3 */
static inline bool
decode_opnd_dq_plus(int add, int rpos, int qpos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg((TEST(1U << qpos, enc) ? DR_REG_Q0 : DR_REG_D0) +
(extract_uint(enc, rpos, rpos+5) + add) % 32);
return true;
}
static inline bool
encode_opnd_dq_plus(int add, int rpos, int qpos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
bool q;
if (!opnd_is_reg(opnd))
return false;
q = (uint)(opnd_get_reg(opnd) - DR_REG_Q0) < 32;
num = opnd_get_reg(opnd) - (q ? DR_REG_Q0 : DR_REG_D0);
if (num >= 32)
return false;
*enc_out = ((num - add) % 32) << rpos | (uint)q << qpos;
return true;
}
/* index: used for opnd_index0, ..., opnd_index3 */
static bool
decode_opnd_index(int n, uint enc, OUT opnd_t *opnd)
{
uint bits = (enc >> 30 & 1) << 3 | (enc >> 10 & 7);
*opnd = opnd_create_immed_int(bits >> n, OPSZ_4b);
return true;
}
static bool
encode_opnd_index(int n, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t val;
uint bits;
if (!opnd_is_immed_int(opnd))
return false;
val = opnd_get_immed_int(opnd);
if (val < 0 || val >= 16 >> n)
return false;
bits = val << n;
*enc_out = (bits >> 3 & 1) << 30 | (bits & 7) << 10;
return true;
}
/* int: used for almost every operand type that is an immediate integer */
static bool
decode_opnd_int(int pos, int len, bool signd, int scale, opnd_size_t size,
dr_opnd_flags_t flags, uint enc, OUT opnd_t *opnd)
{
ptr_int_t val = signd ? extract_int(enc, pos, len) : extract_uint(enc, pos, len);
*opnd = opnd_add_flags(opnd_create_immed_int(val * ((ptr_int_t)1 << scale), size),
flags);
return true;
}
static bool
encode_opnd_int(int pos, int len, bool signd, int scale,
dr_opnd_flags_t flags, opnd_t opnd, OUT uint *enc_out)
{
ptr_uint_t val;
if (!opnd_is_immed_int(opnd) || (opnd_get_flags(opnd) & flags) != flags)
return false;
val = opnd_get_immed_int(opnd);
if ((val & (((ptr_uint_t)1 << scale) - 1)) != 0)
return false;
if ((val + (signd ? ((ptr_uint_t)1 << (len + scale - 1)) : 0)) >> (len + scale) != 0)
return false;
*enc_out = (val >> scale & (((ptr_uint_t)1 << (len - 1)) * 2 - 1)) << pos;
return true;
}
/* imm_bf: used for bitfield immediate operands */
static bool
decode_opnd_imm_bf(int pos, uint enc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32)
return false;
return decode_opnd_int(pos, 6, false, 0, OPSZ_6b, 0, enc, opnd);
}
static bool
encode_opnd_imm_bf(int pos, uint enc, opnd_t opnd, uint *enc_out)
{
if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32)
return false;
return encode_opnd_int(pos, 6, false, 0, 0, opnd, enc_out);
}
/* mem0_scale: used for mem0, mem0p */
static inline bool
decode_opnd_mem0_scale(int scale, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, 0, 1 << scale);
return true;
}
static inline bool
encode_opnd_mem0_scale(int scale, opnd_t opnd, OUT uint *enc_out)
{
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != opnd_size_from_bytes(1 << scale) ||
opnd_get_disp(opnd) != 0)
return false;
*enc_out = xn << 5;
return true;
}
/* mem12_scale: used for mem12, mem12q, prf12 */
static inline bool
decode_opnd_mem12_scale(int scale, bool prfm, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, extract_uint(enc, 10, 12) << scale,
prfm ? 0 : 1 << scale);
return true;
}
static inline bool
encode_opnd_mem12_scale(int scale, bool prfm, opnd_t opnd, OUT uint *enc_out)
{
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != (prfm ? OPSZ_0 : opnd_size_from_bytes(1 << scale)))
return false;
disp = opnd_get_disp(opnd);
if (disp < 0 || disp >> scale > 0xfff || disp >> scale << scale != disp)
return false;
*enc_out = xn << 5 | (uint)disp >> scale << 10;
return true;
}
/* mem7_postindex: used for mem7, mem7post */
static inline bool
decode_opnd_mem7_postindex(bool post, uint enc, OUT opnd_t *opnd)
{
int scale = mem7_scale(enc);
*opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 15, 7) * (1 << scale),
2 << scale);
opnd->value.base_disp.pre_index = !post;
return true;
}
static inline bool
encode_opnd_mem7_postindex(bool post, uint enc, opnd_t opnd, OUT uint *enc_out)
{
int scale = mem7_scale(enc);
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != opnd_size_from_bytes(2 << scale))
return false;
disp = opnd_get_disp(opnd);
if (disp == 0 && opnd.value.base_disp.pre_index == post)
return false;
if (post ? disp != 0 :
((uint)disp & ((1 << scale) - 1)) != 0 ||
(uint)disp + (0x40 << scale) >= (0x80 << scale))
return false;
*enc_out = xn << 5 | ((uint)disp >> scale & 0x7f) << 15;
return true;
}
/* mem9_bytes: used for mem9, mem9post, mem9q, mem9qpost, prf9 */
static inline bool
decode_opnd_mem9_bytes(int bytes, bool post, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 12, 9), bytes);
opnd->value.base_disp.pre_index = !post;
return true;
}
static inline bool
encode_opnd_mem9_bytes(int bytes, bool post, opnd_t opnd, OUT uint *enc_out)
{
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) || opnd_get_size(opnd) != opnd_size_from_bytes(bytes))
return false;
disp = opnd_get_disp(opnd);
if (disp == 0 && opnd.value.base_disp.pre_index == post)
return false;
if (post ? (disp != 0) : (disp < -256 || disp > 255))
return false;
*enc_out = xn << 5 | ((uint)disp & 0x1ff) << 12;
return true;
}
/* memreg_size: used for memreg, memregq, prfreg */
static inline bool
decode_opnd_memreg_size(opnd_size_t size, uint enc, OUT opnd_t *opnd)
{
if (!TEST(1U << 14, enc))
return false;
*opnd = opnd_create_base_disp_aarch64(decode_reg(enc >> 5 & 31, true, true),
decode_reg(enc >> 16 & 31, true, false),
enc >> 13 & 7, TEST(1U << 12, enc),
0, 0, size);
return true;
}
static inline bool
encode_opnd_memreg_size(opnd_size_t size, opnd_t opnd, OUT uint *enc_out)
{
uint rn, rm, option;
bool xn, xm, scaled;
if (!opnd_is_base_disp(opnd) || opnd_get_size(opnd) != size ||
opnd_get_disp(opnd) != 0)
return false;
option = opnd_get_index_extend(opnd, &scaled, NULL);
if (!TEST(2, option))
return false;
if (!encode_reg(&rn, &xn, opnd_get_base(opnd), true) || !xn ||
!encode_reg(&rm, &xm, opnd_get_index(opnd), false) || !xm)
return false;
*enc_out = rn << 5 | rm << 16 | option << 13 | (uint)scaled << 12;
return true;
}
/* q0p: used for q0p1, q0p2, q0p3 */
static bool
decode_opnd_q0p(int add, uint enc, OUT opnd_t *opnd)
{
*opnd = decode_vreg(4, (extract_uint(enc, 0, 5) + add) % 32);
return true;
}
static bool
encode_opnd_q0p(int add, opnd_t opnd, OUT uint *enc_out)
{
opnd_size_t size = OPSZ_NA;
uint r;
if (!encode_vreg(&size, &r, opnd) || size != OPSZ_16)
return false;
*enc_out = (r - add) % 32;
return true;
}
/* rn: used for many integer register operands where bit 31 specifies W or X */
static inline bool
decode_opnd_rn(bool is_sp, int pos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(decode_reg(extract_uint(enc, pos, 5),
TEST(1U << 31, enc), is_sp));
return true;
}
static inline bool
encode_opnd_rn(bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
bool is_x;
if (!opnd_is_reg(opnd) || !encode_reg(&num, &is_x, opnd_get_reg(opnd), is_sp))
return false;
*enc_out = (uint)is_x << 31 | num << pos;
return true;
}
/* vector_reg: used for many FP/SIMD register operands */
static bool
decode_opnd_vector_reg(int pos, int scale, uint enc, OUT opnd_t *opnd)
{
*opnd = decode_vreg(scale, extract_uint(enc, pos, 5));
return true;
}
static bool
encode_opnd_vector_reg(int pos, int scale, opnd_t opnd, OUT uint *enc_out)
{
opnd_size_t size = OPSZ_NA;
uint r;
if (!encode_vreg(&size, &r, opnd) || size != opnd_size_from_bytes(1 << scale))
return false;
*enc_out = r << pos;
return true;
}
/* vtn: used for vt0, ..., vt3 */
static bool
decode_opnd_vtn(int add, uint enc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 10, 2) == 3 && extract_uint(enc, 30, 1) == 0)
return false;
*opnd = opnd_create_reg((TEST(1U << 30, enc) ? DR_REG_Q0 : DR_REG_D0) +
((extract_uint(enc, 0, 5) + add) % 32));
return true;
}
static bool
encode_opnd_vtn(int add, uint enc, opnd_t opnd, OUT uint *enc_out)
{
reg_t reg;
uint num;
bool q;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
q = (uint)(reg - DR_REG_Q0) < 32;
if (extract_uint(enc, 10, 2) == 3 && !q)
return false;
num = reg - (q ? DR_REG_Q0 : DR_REG_D0);
if (num >= 32)
return false;
*enc_out = (num - add) % 32 | (uint)q << 30;
return true;
}
/* wxn: used for many integer register operands with fixed size (W or X) */
static bool
decode_opnd_wxn(bool is_x, bool is_sp, int pos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(decode_reg(enc >> pos & 31, is_x, is_sp));
return true;
}
static bool
encode_opnd_wxn(bool is_x, bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out)
{
reg_id_t reg;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
n = reg - (is_x ? DR_REG_X0 : DR_REG_W0);
if (n < 31) {
*enc_out = n << pos;
return true;
}
if (reg == (is_sp ?
(is_x ? DR_REG_XSP : DR_REG_WSP) :
(is_x ? DR_REG_XZR : DR_REG_WZR))) {
*enc_out = (uint)31 << pos;
return true;
}
return false;
}
/* wxnp: used for CASP, even/odd register pairs */
static bool
decode_opnd_wxnp(bool is_x, int plus, int pos, uint enc, OUT opnd_t *opnd)
{
if ((enc >> pos & 1) != 0)
return false;
*opnd = opnd_create_reg(decode_reg(((enc >> pos) + plus) & 31, is_x, false));
return true;
}
static bool
encode_opnd_wxnp(bool is_x, int plus, int pos, opnd_t opnd, OUT uint *enc_out)
{
reg_id_t reg;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
n = reg - (is_x ? DR_REG_X0 : DR_REG_W0);
if (n < 31 && (n - plus) % 2 == 0) {
*enc_out = ((n - plus) & 31) << pos;
return true;
}
if (reg == (is_x ? DR_REG_XZR : DR_REG_WZR) && ((uint)31 - plus) % 2 == 0) {
*enc_out = (((uint)31 - plus) & 31) << pos;
return true;
}
return false;
}
/*******************************************************************************
* Pairs of functions for decoding and encoding each type of operand, as listed in
* "codec.txt". Try to keep these short: perhaps a tail call to a function in the
* previous section.
*/
/* b0: B register at bit position 0 */
static inline bool
decode_opnd_b0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 0, enc, opnd);
}
static inline bool
encode_opnd_b0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 0, opnd, enc_out);
}
/* cond: condition operand for conditional compare */
static inline bool
decode_opnd_cond(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(12, 4, false, 0, OPSZ_4b, DR_OPND_IS_CONDITION, enc, opnd);
}
static inline bool
encode_opnd_cond(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(12, 4, false, 0, 0, opnd, enc_out);
}
/* d0: D register at bit position 0 */
static inline bool
decode_opnd_d0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 3, enc, opnd);
}
static inline bool
encode_opnd_d0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 3, opnd, enc_out);
}
/* d10: D register at bit position 10 */
static inline bool
decode_opnd_d10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 3, enc, opnd);
}
static inline bool
encode_opnd_d10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 3, opnd, enc_out);
}
/* dq0: D/Q register at bit position 0; bit 30 selects Q reg */
static inline bool
decode_opnd_dq0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 0, 30, opnd, enc_out);
}
/* dq5: D/Q register at bit position 5; bit 30 selects Q reg */
static inline bool
decode_opnd_dq5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 5, 30, enc, opnd);
}
static inline bool
encode_opnd_dq5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 5, 30, opnd, enc_out);
}
/* dq16: D/Q register at bit position 16; bit 30 selects Q reg */
static inline bool
decode_opnd_dq16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 16, 30, enc, opnd);
}
static inline bool
encode_opnd_dq16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 16, 30, opnd, enc_out);
}
/* dq0p1: as dq0 but add 1 mod 32 to reg number */
static inline bool
decode_opnd_dq0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(1, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(1, 0, 30, opnd, enc_out);
}
/* dq0p2: as dq0 but add 2 mod 32 to reg number */
static inline bool
decode_opnd_dq0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(2, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(2, 0, 30, opnd, enc_out);
}
/* dq0p3: as dq0 but add 3 mod 32 to reg number */
static inline bool
decode_opnd_dq0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(3, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(3, 0, 30, opnd, enc_out);
}
/* ext: extend type, dr_extend_type_t */
static inline bool
decode_opnd_ext(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(13, 3, false, 0, OPSZ_3b, DR_OPND_IS_EXTEND, enc, opnd);
}
static inline bool
encode_opnd_ext(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(13, 3, false, 0, DR_OPND_IS_EXTEND, opnd, enc_out);
}
/* extam: extend amount, a left shift from 0 to 4 */
static inline bool
decode_opnd_extam(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 10, 3) > 4) /* shift amount must be <= 4 */
return false;
return decode_opnd_int(10, 3, false, 0, OPSZ_3b, 0, enc, opnd);
}
static inline bool
encode_opnd_extam(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(10, 3, false, 0, 0, opnd, &t) ||
extract_uint(t, 10, 3) > 4) /* shift amount must be <= 4 */
return false;
*enc_out = t;
return true;
}
static inline reg_id_t
decode_float_reg(uint n, uint type, reg_id_t *reg)
{
switch (type) {
case 3:
/* Half precision operands are only supported in Armv8.2+. */
*reg = DR_REG_H0 + n;
return true;
case 0:
*reg = DR_REG_S0 + n;
return true;
case 1:
*reg = DR_REG_D0 + n;
return true;
default:
return false;
}
}
static inline bool
decode_opnd_float_reg(int pos, uint enc, OUT opnd_t *opnd)
{
reg_id_t reg;
if (!decode_float_reg(extract_uint(enc, pos, 5), extract_uint(enc, 22, 2), ®))
return false;
*opnd = opnd_create_reg(reg);
return true;
}
static inline bool
encode_opnd_float_reg(int pos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
uint type;
opnd_size_t size = OPSZ_NA;
if (!encode_vreg(&size, &num, opnd))
return false;
switch (size) {
case OPSZ_2:
/* Half precision operands are only supported in Armv8.2+. */
type = 3;
break;
case OPSZ_4:
type = 0;
break;
case OPSZ_8:
type = 1;
break;
default:
return false;
}
*enc_out = type << 22 | num << pos;
return true;
}
static inline bool
decode_opnd_float_reg0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(0, enc, opnd);
}
static inline bool
encode_opnd_float_reg0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(0, opnd, enc_out);
}
static inline bool
decode_opnd_float_reg5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(5, enc, opnd);
}
static inline bool
encode_opnd_float_reg5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(5, opnd, enc_out);
}
static inline bool
decode_opnd_float_reg10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(10, enc, opnd);
}
static inline bool
encode_opnd_float_reg10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(10, opnd, enc_out);
}
static inline bool
decode_opnd_float_reg16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(16, enc, opnd);
}
static inline bool
encode_opnd_float_reg16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(16, opnd, enc_out);
}
/* h0: H register at bit position 0 */
static inline bool
decode_opnd_h0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 1, enc, opnd);
}
static inline bool
encode_opnd_h0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 1, opnd, enc_out);
}
/* ign10: ignored register field at bit position 10 in load/store exclusive */
static inline bool
decode_opnd_ign10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_ign10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 5, false, 0, 0, opnd, enc_out);
}
/* ign10: ignored register field at bit position 16 in load/store exclusive */
static inline bool
decode_opnd_ign16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(16, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_ign16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(16, 5, false, 0, 0, opnd, enc_out);
}
/* imm12: 12-bit immediate operand of ADD/SUB */
static inline bool
decode_opnd_imm12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 12, false, 0, OPSZ_12b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 12, false, 0, 0, opnd, enc_out);
}
/* imm12sh: shift amount for 12-bit immediate of ADD/SUB, 0 or 16 */
static inline bool
decode_opnd_imm12sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(22, 1, false, 4, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm12sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(22, 1, false, 4, 0, opnd, enc_out);
}
/* imm16: 16-bit immediate operand of MOVK/MOVN/MOVZ/SVC */
static inline bool
decode_opnd_imm16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(5, 16, false, 0, OPSZ_12b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(5, 16, false, 0, 0, opnd, enc_out);
}
/* imm16sh: shift amount for 16-bit immediate of MOVK/MOVN/MOVZ/SVC */
static inline bool
decode_opnd_imm16sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && TEST(1U << 22, enc))
return false;
return decode_opnd_int(21, 2, false, 4, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm16sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(21, 2, false, 4, 0, opnd, &t) ||
(!TEST(1U << 31, enc) && TEST(1U << 22, t)))
return false;
*enc_out = t;
return true;
}
/* imm4: immediate operand for some system instructions */
static inline bool
decode_opnd_imm4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(8, 4, false, 0, OPSZ_4b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(8, 4, false, 0, 0, opnd, enc_out);
}
/* imm5: immediate operand for conditional compare (immediate) */
static inline bool
decode_opnd_imm5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(16, 5, false, 0, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(16, 5, false, 0, 0, opnd, enc_out);
}
/* imm6: shift amount for logical and arithmetical instructions */
static inline bool
decode_opnd_imm6(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && TEST(1U << 15, enc))
return false;
return decode_opnd_int(10, 6, false, 0, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm6(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!TEST(1U << 31, enc) && TEST(1U << 15, enc))
return false;
return encode_opnd_int(10, 6, false, 0, 0, opnd, enc_out);
}
/* immr: first immediate operand for bitfield operation */
static inline bool
decode_opnd_immr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_imm_bf(16, enc, opnd);
}
static inline bool
encode_opnd_immr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_imm_bf(16, enc, opnd, enc_out);
}
/* imms: second immediate operand for bitfield operation */
static inline bool
decode_opnd_imms(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_imm_bf(10, enc, opnd);
}
static inline bool
encode_opnd_imms(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_imm_bf(10, enc, opnd, enc_out);
}
/* impx30: implicit X30 operand, used by BLR */
static inline bool
decode_opnd_impx30(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(DR_REG_X30);
return true;
}
static inline bool
encode_opnd_impx30(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_reg(opnd) || opnd_get_reg(opnd) != DR_REG_X30)
return false;
*enc_out = 0;
return true;
}
/* index0: index of B subreg in Q register: 0-15 */
static inline bool
decode_opnd_index0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(0, enc, opnd);
}
static inline bool
encode_opnd_index0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(0, opnd, enc_out);
}
/* index1: index of H subreg in Q register: 0-7 */
static inline bool
decode_opnd_index1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(1, enc, opnd);
}
static inline bool
encode_opnd_index1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(1, opnd, enc_out);
}
/* index2: index of S subreg in Q register: 0-3 */
static inline bool
decode_opnd_index2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(2, enc, opnd);
}
static inline bool
encode_opnd_index2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(2, opnd, enc_out);
}
/* index3: index of D subreg in Q register: 0-1 */
static inline bool
decode_opnd_index3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(3, enc, opnd);
}
static inline bool
encode_opnd_index3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(3, opnd, enc_out);
}
/* lsl: constant LSL for ADD/MOV, no encoding bits */
static inline bool
decode_opnd_lsl(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint t = DR_SHIFT_LSL;
return decode_opnd_int(0, 2, false, 0, OPSZ_2b, DR_OPND_IS_SHIFT, t, opnd);
}
static inline bool
encode_opnd_lsl(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(0, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) ||
t != DR_SHIFT_LSL)
return false;
*enc_out = 0;
return true;
}
/* mem0: memory operand with no offset, gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem0_scale(extract_uint(enc, 30, 2), enc, opnd);
}
static inline bool
encode_opnd_mem0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem0_scale(extract_uint(enc, 30, 2), opnd, enc_out);
}
/* mem0p: as mem0, but a pair of registers, so double size */
static inline bool
decode_opnd_mem0p(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, enc, opnd);
}
static inline bool
encode_opnd_mem0p(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, opnd, enc_out);
}
/* mem12: memory operand with 12-bit offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, enc, opnd);
}
static inline bool
encode_opnd_mem12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, opnd, enc_out);
}
/* mem12: memory operand with 12-bit offset; size is 16 bytes */
static inline bool
decode_opnd_mem12q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(4, false, enc, opnd);
}
static inline bool
encode_opnd_mem12q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(4, false, opnd, enc_out);
}
/* mem7: memory operand with 7-bit offset; gets size from bits 26, 30 and 31 */
static inline bool
decode_opnd_mem7(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem7_postindex(false, enc, opnd);
}
static inline bool
encode_opnd_mem7(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem7_postindex(false, enc, opnd, enc_out);
}
/* mem7off: just the 7-bit offset from mem7 */
static inline bool
decode_opnd_mem7off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(15, 7, true, mem7_scale(enc), OPSZ_PTR, 0, enc, opnd);
}
static inline bool
encode_opnd_mem7off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(15, 7, true, mem7_scale(enc), 0, opnd, enc_out);
}
/* mem7off: post-indexed mem7, so offset is zero */
static inline bool
decode_opnd_mem7post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem7_postindex(true, enc, opnd);
}
static inline bool
encode_opnd_mem7post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem7_postindex(true, enc, opnd, enc_out);
}
/* mem9: memory operand with 9-bit offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, enc, opnd);
}
static inline bool
encode_opnd_mem9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, opnd, enc_out);
}
/* mem9off: just the 9-bit offset from mem9 */
static inline bool
decode_opnd_mem9off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(12, 9, true, 0, OPSZ_PTR, 0, enc, opnd);
}
static inline bool
encode_opnd_mem9off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(12, 9, true, 0, 0, opnd, enc_out);
}
/* mem9post: post-indexed mem9, so offset is zero */
static inline bool
decode_opnd_mem9post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, enc, opnd);
}
static inline bool
encode_opnd_mem9post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, opnd, enc_out);
}
/* mem9q: memory operand with 9-bit offset; size is 16 bytes */
static inline bool
decode_opnd_mem9q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(16, false, enc, opnd);
}
static inline bool
encode_opnd_mem9q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(16, false, opnd, enc_out);
}
/* mem9qpost: post-indexed mem9q, so offset is zero */
static inline bool
decode_opnd_mem9qpost(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(16, true, enc, opnd);
}
static inline bool
encode_opnd_mem9qpost(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(16, true, opnd, enc_out);
}
/* memlit: memory operand for literal load; gets size from bits 26, 30 and 31 */
static inline bool
decode_opnd_memlit(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_rel_addr(pc + 4 * extract_int(enc, 5, 19), memlit_size(enc));
return true;
}
static inline bool
encode_opnd_memlit(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_uint_t off;
if (!opnd_is_rel_addr(opnd) || opnd_get_size(opnd) != memlit_size(enc))
return false;
off = (byte *)opnd_get_addr(opnd) - pc;
if ((off & 3) != 0 || off + (1U << 20) >= 1U << 21)
return false;
*enc_out = (off >> 2 & 0x7ffff) << 5;
return true;
}
/* memreg: memory operand with register offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_memreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)),
enc, opnd);
}
static inline bool
encode_opnd_memreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)),
opnd, enc_out);
}
/* memreqq: memory operand with register offset; size is 16 bytes */
static inline bool
decode_opnd_memregq(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(OPSZ_16, enc, opnd);
}
static inline bool
encode_opnd_memregq(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(OPSZ_16, opnd, enc_out);
}
/* memvm: memory operand for SIMD load/store multiple structures */
static inline bool
decode_opnd_memvm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
int regs = multistruct_regcount(enc);
opnd_size_t size;
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
size = opnd_get_size(opnd);
if (size != opnd_size_from_bytes(regs * 8) &&
size != opnd_size_from_bytes(regs * 16))
return false;
*enc_out = rn << 5 | (uint)(size == opnd_size_from_bytes(regs * 16)) << 30;
return true;
}
/* memvr: memory operand for SIMD load structure and replicate */
static inline bool
decode_opnd_memvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
int regcount;
uint bytes, rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
bytes = opnd_size_in_bytes(opnd_get_size(opnd));
regcount = memvr_regcount(enc);
if (bytes % regcount != 0)
return false;
bytes /= regcount;
if (bytes < 1 || bytes > 8 || (bytes & (bytes - 1)) != 0 ||
opnd_size_from_bytes(bytes * regcount) != opnd_get_size(opnd))
return false;
*enc_out = (rn << 5 |
(bytes == 1 ? 0 : bytes == 2 ? 1 : bytes == 4 ? 2 : 3) << 10);
return true;
}
/* memvs: memory operand for SIMD load/store single structure */
static inline bool
decode_opnd_memvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = memvs_size(enc);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
if (opnd_get_size(opnd) != opnd_size_from_bytes(memvs_size(enc)))
return false;
*enc_out = rn << 5;
return true;
}
/* nzcv: flag bit specifier for conditional compare */
static inline bool
decode_opnd_nzcv(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(0, 4, false, 0, OPSZ_4b, 0, enc, opnd);
}
static inline bool
encode_opnd_nzcv(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(0, 4, false, 0, 0, opnd, enc_out);
}
/* prf12: prefetch variant of mem12 */
static inline bool
decode_opnd_prf12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(3, true, enc, opnd);
}
static inline bool
encode_opnd_prf12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(3, true, opnd, enc_out);
}
/* prf9: prefetch variant of mem9 */
static inline bool
decode_opnd_prf9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(0, false, enc, opnd);
}
static inline bool
encode_opnd_prf9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(0, false, opnd, enc_out);
}
/* prfop: prefetch operation, such as PLDL1KEEP */
static inline bool
decode_opnd_prfop(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(0, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_prfop(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(0, 5, false, 0, 0, opnd, enc_out);
}
/* prfreg: prefetch variant of memreg */
static inline bool
decode_opnd_prfreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(OPSZ_0, enc, opnd);
}
static inline bool
encode_opnd_prfreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(OPSZ_0, opnd, enc_out);
}
static inline bool
decode_opnd_q0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 4, enc, opnd);
}
/* q0: Q register at bit position 0 */
static inline bool
encode_opnd_q0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 4, opnd, enc_out);
}
/* q0p1: as q0 but add 1 mod 32 to reg number */
static inline bool
decode_opnd_q0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(1, enc, opnd);
}
static inline bool
encode_opnd_q0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(1, opnd, enc_out);
}
/* q0p2: as q0 but add 2 mod 32 to reg number */
static inline bool
decode_opnd_q0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(2, enc, opnd);
}
static inline bool
encode_opnd_q0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(2, opnd, enc_out);
}
/* q0p3: as q0 but add 3 mod 32 to reg number */
static inline bool
decode_opnd_q0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(3, enc, opnd);
}
static inline bool
encode_opnd_q0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(3, opnd, enc_out);
}
/* q10: Q register at bit position 10 */
static inline bool
decode_opnd_q10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 4, enc, opnd);
}
static inline bool
encode_opnd_q10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 4, opnd, enc_out);
}
/* s0: S register at bit position 0 */
static inline bool
decode_opnd_s0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 2, enc, opnd);
}
static inline bool
encode_opnd_s0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 2, opnd, enc_out);
}
/* s10: S register at bit position 10 */
static inline bool
decode_opnd_s10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 2, enc, opnd);
}
static inline bool
encode_opnd_s10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 2, opnd, enc_out);
}
/* shift3: shift type for ADD/SUB: LSL, LSR or ASR */
static inline bool
decode_opnd_shift3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 22, 2) == 3)
return false;
return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd);
}
static inline bool
encode_opnd_shift3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) ||
extract_uint(t, 22, 2) == 3)
return false;
*enc_out = t;
return true;
}
/* shift4: shift type for logical operation: LSL, LSR, ASR or ROR */
static inline bool
decode_opnd_shift4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd);
}
static inline bool
encode_opnd_shift4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, enc_out);
}
/* sysops: immediate operand for SYS instruction */
static inline bool
decode_opnd_sysops(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(5, 14, false, 0, OPSZ_2, 0, enc, opnd);
}
static inline bool
encode_opnd_sysops(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(5, 14, false, 0, 0, opnd, enc_out);
}
/* sysreg: system register, operand of MRS/MSR */
static inline bool
decode_opnd_sysreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = decode_sysreg(extract_uint(enc, 5, 15));
return true;
}
static inline bool
encode_opnd_sysreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_sysreg(&t, opnd))
return false;
*enc_out = t << 5;
return true;
}
/* vmsz: B/H/S/D for load/store multiple structures */
static inline bool
decode_opnd_vmsz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 2, false, 0, OPSZ_2b, 0, enc, opnd);
}
static inline bool
encode_opnd_vmsz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 2, false, 0, 0, opnd, enc_out);
}
/* vt0: first register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(0, enc, opnd);
}
static inline bool
encode_opnd_vt0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(0, enc, opnd, enc_out);
}
/* vt1: second register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(1, enc, opnd);
}
static inline bool
encode_opnd_vt1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(1, enc, opnd, enc_out);
}
/* vt2: third register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(2, enc, opnd);
}
static inline bool
encode_opnd_vt2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(2, enc, opnd, enc_out);
}
/* vt3: fourth register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(3, enc, opnd);
}
static inline bool
encode_opnd_vt3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(3, enc, opnd, enc_out);
}
/* w0: W register or WZR at bit position 0 */
static inline bool
decode_opnd_w0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 0, enc, opnd);
}
static inline bool
encode_opnd_w0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 0, opnd, enc_out);
}
/* w0p0: even-numbered W register or WZR at bit position 0 */
static inline bool
decode_opnd_w0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 0, 0, enc, opnd);
}
static inline bool
encode_opnd_w0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 0, 0, opnd, enc_out);
}
/* w0p1: even-numbered W register or WZR at bit position 0, add 1 */
static inline bool
decode_opnd_w0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 1, 0, enc, opnd);
}
static inline bool
encode_opnd_w0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 1, 0, opnd, enc_out);
}
/* w10: W register or WZR at bit position 10 */
static inline bool
decode_opnd_w10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 10, enc, opnd);
}
static inline bool
encode_opnd_w10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 10, opnd, enc_out);
}
/* w16: W register or WZR at bit position 16 */
static inline bool
decode_opnd_w16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 16, enc, opnd);
}
static inline bool
encode_opnd_w16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 16, opnd, enc_out);
}
/* w16p0: even-numbered W register or WZR at bit position 16 */
static inline bool
decode_opnd_w16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 0, 16, enc, opnd);
}
static inline bool
encode_opnd_w16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 0, 16, opnd, enc_out);
}
/* w16p1: even-numbered W register or WZR at bit position 16, add 1 */
static inline bool
decode_opnd_w16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 1, 16, enc, opnd);
}
static inline bool
encode_opnd_w16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 1, 16, opnd, enc_out);
}
/* w5: W register or WZR at bit position 5 */
static inline bool
decode_opnd_w5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 5, enc, opnd);
}
static inline bool
encode_opnd_w5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 5, opnd, enc_out);
}
/* wx0: W/X register or WZR/XZR at bit position 0; bit 31 selects X reg */
static inline bool
decode_opnd_wx0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 0, enc, opnd);
}
static inline bool
encode_opnd_wx0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 0, opnd, enc_out);
}
/* wx0sp: W/X register or WSP/XSP at bit position 0; bit 31 selects X reg */
static inline bool
decode_opnd_wx0sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(true, 0, enc, opnd);
}
static inline bool
encode_opnd_wx0sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(true, 0, opnd, enc_out);
}
/* wx10: W/X register or WZR/XZR at bit position 10; bit 31 selects X reg */
static inline bool
decode_opnd_wx10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 10, enc, opnd);
}
static inline bool
encode_opnd_wx10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 10, opnd, enc_out);
}
/* wx16: W/X register or WZR/XZR at bit position 16; bit 31 selects X reg */
static inline bool
decode_opnd_wx16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 16, enc, opnd);
}
static inline bool
encode_opnd_wx16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 16, opnd, enc_out);
}
/* wx5: W/X register or WZR/XZR at bit position 5; bit 31 selects X reg */
static inline bool
decode_opnd_wx5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 5, enc, opnd);
}
static inline bool
encode_opnd_wx5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 5, opnd, enc_out);
}
/* wx5sp: W/X register or WSP/XSP at bit position 5; bit 31 selects X reg */
static inline bool
decode_opnd_wx5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(true, 5, enc, opnd);
}
static inline bool
encode_opnd_wx5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(true, 5, opnd, enc_out);
}
/* x0: X register or XZR at bit position 0 */
static inline bool
decode_opnd_x0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 0, enc, opnd);
}
static inline bool
encode_opnd_x0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 0, opnd, enc_out);
}
/* x0p0: even-numbered X register or XZR at bit position 0 */
static inline bool
decode_opnd_x0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 0, 0, enc, opnd);
}
static inline bool
encode_opnd_x0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 0, 0, opnd, enc_out);
}
/* x0p1: even-numbered X register or XZR at bit position 0, add 1 */
static inline bool
decode_opnd_x0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 1, 0, enc, opnd);
}
static inline bool
encode_opnd_x0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 1, 0, opnd, enc_out);
}
/* x10: X register or XZR at bit position 10 */
static inline bool
decode_opnd_x10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 10, enc, opnd);
}
static inline bool
encode_opnd_x10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 10, opnd, enc_out);
}
/* x16: X register or XZR at bit position 16 */
static inline bool
decode_opnd_x16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 16, enc, opnd);
}
static inline bool
encode_opnd_x16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 16, opnd, enc_out);
}
/* x16p0: even-numbered X register or XZR at bit position 16 */
static inline bool
decode_opnd_x16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 0, 16, enc, opnd);
}
static inline bool
encode_opnd_x16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 0, 16, opnd, enc_out);
}
/* x16p1: even-numbered X register or XZR at bit position 16, add 1 */
static inline bool
decode_opnd_x16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 1, 16, enc, opnd);
}
static inline bool
encode_opnd_x16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 1, 16, opnd, enc_out);
}
/* x16imm: immediate operand for SIMD load/store multiple structures (post-indexed) */
static inline bool
decode_opnd_x16imm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16imm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* x16immvr: immediate operand for SIMD load structure and replicate (post-indexed) */
static inline bool
decode_opnd_x16immvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16immvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != memvr_regcount(enc) << extract_uint(enc, 10, 2))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* x16immvs: immediate operand for SIMD load/store single structure (post-indexed) */
static inline bool
decode_opnd_x16immvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = memvs_size(enc);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16immvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != memvs_size(enc))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* x5: X register or XZR at position 5 */
static inline bool
decode_opnd_x5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 5, enc, opnd);
}
static inline bool
encode_opnd_x5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 5, opnd, enc_out);
}
/* x5: X register or XSP at position 5 */
static inline bool
decode_opnd_x5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, true, 5, enc, opnd);
}
static inline bool
encode_opnd_x5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, true, 5, opnd, enc_out);
}
/*******************************************************************************
* Pairs of functions for decoding and encoding opndsets, as listed in "codec.txt".
* Currently all branch instructions are handled in this way.
*/
/* adr: used for ADR and ADRP */
static inline bool
decode_opnds_adr(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
opnd_t opnd;
if (!decode_opnd_adr_page(opcode == OP_adrp ? 12 : 0, enc, pc, &opnd))
return false;
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
true, false)));
instr_set_src(instr, 0, opnd);
return true;
}
static inline uint
encode_opnds_adr(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
int opcode = instr_get_opcode(instr);
uint rd, adr;
if (instr_num_dsts(instr) == 1 && instr_num_srcs(instr) == 1 &&
encode_opnd_adr_page(opcode == OP_adrp ? 12 : 0,
pc, instr_get_src(instr, 0), &adr, instr, di) &&
encode_opnd_wxn(true, false, 0, instr_get_dst(instr, 0), &rd))
return (enc | adr | rd);
return ENCFAIL;
}
/* b: used for B and BL */
static inline bool
decode_opnds_b(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
if (opcode == OP_bl) {
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd_create_reg(DR_REG_X30));
} else
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 0, 26) * 4));
return true;
}
static inline uint
encode_opnds_b(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
int opcode = instr_get_opcode(instr);
bool is_bl = (opcode == OP_bl);
uint off, x30;
if (instr_num_dsts(instr) == (is_bl ? 1 : 0) &&
instr_num_srcs(instr) == 1 &&
(!is_bl || encode_opnd_impx30(enc, opcode, pc, instr_get_dst(instr, 0), &x30)) &&
encode_pc_off(&off, 26, pc, instr, instr_get_src(instr, 0), di))
return (enc | off);
return ENCFAIL;
}
/* bcond: used for B.cond */
static inline bool
decode_opnds_bcond(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4));
instr_set_predicate(instr, DR_PRED_EQ + (enc & 15));
return true;
}
static inline uint
encode_opnds_bcond(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 1 &&
encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0), di) &&
(uint)(instr_get_predicate(instr) - DR_PRED_EQ) < 16)
return (enc | off << 5 | (instr_get_predicate(instr) - DR_PRED_EQ));
return ENCFAIL;
}
/* cbz: used for CBNZ and CBZ */
static inline bool
decode_opnds_cbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 2);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4));
instr_set_src(instr, 1, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
TEST(1U << 31, enc), false)));
return true;
}
static inline uint
encode_opnds_cbz(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint rt, off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 &&
encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0), di) &&
encode_opnd_rn(false, 0, instr_get_src(instr, 1), &rt))
return (enc | off << 5 | rt);
return ENCFAIL;
}
/* logic_imm: used for AND, ANDS, EOR and ORR.
* Logical (immediate) instructions are awkward because there are sometimes
* many ways of representing the same immediate value. We add the raw encoding
* as an additional operand when the encoding is not the canonical one.
*/
static inline bool
decode_opnds_logic_imm(uint enc, dcontext_t *dcontext, byte *pc,
instr_t *instr, int opcode)
{
bool is_x = TEST(1U << 31, enc);
uint imm_enc = extract_uint(enc, 10, 13); /* encoding of bitmask */
ptr_uint_t imm_val = decode_bitmask(imm_enc); /* value of bitmask */
bool canonical = encode_bitmask(imm_val) == imm_enc;
if (imm_val == 0 || (!is_x && TEST(1U << 12, imm_enc)))
return false;
if (!is_x)
imm_val &= 0xffffffff;
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 1, 2 + (canonical ? 0 : 1));
instr_set_dst(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
is_x, opcode != OP_ands)));
instr_set_src(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 5, 5),
is_x, false)));
instr_set_src(instr, 1, opnd_create_immed_uint(imm_val, is_x ? OPSZ_8 : OPSZ_4));
if (!canonical)
instr_set_src(instr, 2, opnd_create_immed_uint(imm_enc, OPSZ_2));
return true;
}
static inline uint
encode_opnds_logic_imm(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
int opcode = instr_get_opcode(instr);
int srcs = instr_num_srcs(instr);
opnd_t opnd_val;
ptr_uint_t imm_val;
uint rd, rn;
if (srcs < 2 || srcs > 3 || instr_num_dsts(instr) != 1)
return ENCFAIL;
opnd_val = instr_get_src(instr, 1);
if (!encode_opnd_rn(opcode != OP_ands, 0, instr_get_dst(instr, 0), &rd) ||
!encode_opnd_rn(false, 5, instr_get_src(instr, 0), &rn) ||
TEST(1U << 31, rd ^ rn) ||
!opnd_is_immed_int(opnd_val))
return ENCFAIL;
imm_val = opnd_get_immed_int(opnd_val);
if (!TEST(1U << 31, rd)) {
if ((imm_val >> 32) != 0)
return ENCFAIL;
imm_val |= imm_val << 32;
}
if (srcs == 3) {
opnd_t opnd_enc = instr_get_src(instr, 2);
ptr_int_t imm_enc;
if (!opnd_is_immed_int(opnd_enc))
return ENCFAIL;
imm_enc = opnd_get_immed_int(opnd_enc);
if (imm_enc < 0 || imm_enc > 0x1fff || decode_bitmask(imm_enc) != imm_val)
return ENCFAIL;
return (enc | rd | rn | (uint)imm_enc << 10);
} else {
int imm_enc = encode_bitmask(imm_val);
if (imm_enc < 0)
return ENCFAIL;
return (enc | rd | rn | (uint)imm_enc << 10);
}
}
/* mst: used for MSR.
* With MSR the destination register may or may not be one of the system registers
* that we recognise.
*/
static inline bool
decode_opnds_msr(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
opnd_t opnd = decode_sysreg(extract_uint(enc, 5, 15));
instr_set_opcode(instr, opcode);
if (opnd_is_reg(opnd)) {
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd);
} else {
instr_set_num_opnds(dcontext, instr, 0, 2);
instr_set_src(instr, 1, opnd);
}
instr_set_src(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
true, false)));
return true;
}
static inline uint
encode_opnds_msr(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint imm15, xt;
if (instr_num_dsts(instr) == 1 && instr_num_srcs(instr) == 1 &&
opnd_is_reg(instr_get_dst(instr, 0)) &&
encode_sysreg(&imm15, instr_get_dst(instr, 0)) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt))
return (enc | xt | imm15 << 5);
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 &&
opnd_is_immed_int(instr_get_src(instr, 1)) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt) &&
encode_sysreg(&imm15, instr_get_src(instr, 1)))
return (enc | xt | imm15 << 5);
return ENCFAIL;
}
/* tbz: used for TBNZ and TBZ */
static inline bool
decode_opnds_tbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 3);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 14) * 4));
instr_set_src(instr, 1, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
true, false)));
instr_set_src(instr, 2, opnd_create_immed_int((enc >> 19 & 31) | (enc >> 26 & 32),
OPSZ_5b));
return true;
}
static inline uint
encode_opnds_tbz(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint xt, imm6, off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 3 &&
encode_pc_off(&off, 14, pc, instr, instr_get_src(instr, 0), di) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 1), &xt) &&
encode_opnd_int(0, 6, false, 0, 0, instr_get_src(instr, 2), &imm6))
return (enc | off << 5 | xt | (imm6 & 31) << 19 | (imm6 & 32) << 26);
return ENCFAIL;
}
/* Element size for vector floating point instructions. */
/* fsz: Operand size for single and double precision encoding of floating point
* vector instructions. We need to convert the generic size operand to the right
* encoding bits. It only supports FSZ_SINGLE and FSZ_DOUBLE.
*/
static inline bool
decode_opnd_fsz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (((enc >> 21) & 0x03) == 0x01) {
*opnd = opnd_create_immed_int(FSZ_SINGLE, OPSZ_2b);
return true;
}
if (((enc >> 21) & 0x03) == 0x03) {
*opnd = opnd_create_immed_int(FSZ_DOUBLE, OPSZ_2b);
return true;
}
return false;
}
static inline bool
encode_opnd_fsz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_get_immed_int(opnd) == FSZ_SINGLE) {
*enc_out = 0x01 << 21;
return true;
}
if (opnd_get_immed_int(opnd) == FSZ_DOUBLE) {
*enc_out = 0x03 << 21;
return true;
}
return false;
}
/* fsz16: Operand size for half precision encoding of floating point vector
* instructions. We need to convert the generic size operand to the right
* encoding bits. It only supports FSZ_HALF.
*/
static inline bool
decode_opnd_fsz16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (((enc >> 21) & 0x03) == 0x02) {
*opnd = opnd_create_immed_int(FSZ_HALF, OPSZ_2b);
return true;
}
return false;
}
static inline bool
encode_opnd_fsz16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_get_immed_int(opnd) == FSZ_HALF) {
*enc_out = 0x02 << 21;
return true;
}
return false;
}
/******************************************************************************/
/* Include automatically generated decoder and encoder. */
#include "decode_gen.h"
#include "encode_gen.h"
/******************************************************************************/
byte *
decode_common(dcontext_t *dcontext, byte *pc, byte *orig_pc, instr_t *instr)
{
byte *next_pc = pc + 4;
uint enc = *(uint *)pc;
uint eflags = 0;
int opc;
CLIENT_ASSERT(instr->opcode == OP_INVALID || instr->opcode == OP_UNDECODED,
"decode: instr is already decoded, may need to call instr_reset()");
if (!decoder(enc, dcontext, orig_pc, instr)) {
/* We use OP_xx for instructions not yet handled by the decoder.
* If an A64 instruction accesses a general-purpose register
* (except X30) then the number of that register appears in one
* of four possible places in the instruction word, so we can
* pessimistically assume that an unrecognised instruction reads
* and writes all four of those registers, and this is
* sufficient to enable correct (though often excessive) mangling.
*/
instr_set_opcode(instr, OP_xx);
instr_set_num_opnds(dcontext, instr, 4, 5);
instr->src0 = OPND_CREATE_INT32(enc);
instr->srcs[0] = opnd_create_reg(DR_REG_X0 + (enc & 31));
instr->dsts[0] = opnd_create_reg(DR_REG_X0 + (enc & 31));
instr->srcs[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31));
instr->dsts[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31));
instr->srcs[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31));
instr->dsts[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31));
instr->srcs[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31));
instr->dsts[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31));
}
/* XXX i#2374: This determination of flag usage should be separate from the decoding
* of operands. Also, we should perhaps add flag information in codec.txt instead of
* listing all the opcodes, although the list is short and unlikely to change.
*/
opc = instr_get_opcode(instr);
if ((opc == OP_mrs && instr_num_srcs(instr) == 1 &&
opnd_is_reg(instr_get_src(instr, 0)) &&
opnd_get_reg(instr_get_src(instr, 0)) == DR_REG_NZCV) ||
opc == OP_bcond ||
opc == OP_adc || opc == OP_adcs || opc == OP_sbc || opc == OP_sbcs ||
opc == OP_csel || opc == OP_csinc || opc == OP_csinv || opc == OP_csneg ||
opc == OP_ccmn || opc == OP_ccmp) {
/* FIXME i#1569: When handled by decoder, add:
* opc == OP_fcsel
*/
eflags |= EFLAGS_READ_NZCV;
}
if ((opc == OP_msr && instr_num_dsts(instr) == 1 &&
opnd_is_reg(instr_get_dst(instr, 0)) &&
opnd_get_reg(instr_get_dst(instr, 0)) == DR_REG_NZCV) ||
opc == OP_adcs || opc == OP_adds || opc == OP_sbcs || opc == OP_subs ||
opc == OP_ands || opc == OP_bics ||
opc == OP_ccmn || opc == OP_ccmp) {
/* FIXME i#1569: When handled by decoder, add:
* opc == OP_fccmp || opc == OP_fccmpe || opc == OP_fcmp || opc == OP_fcmpe
*/
eflags |= EFLAGS_WRITE_NZCV;
}
instr->eflags = eflags;
instr_set_eflags_valid(instr, true);
instr_set_operands_valid(instr, true);
if (orig_pc != pc) {
/* We do not want to copy when encoding and condone an invalid
* relative target.
*/
instr_set_raw_bits_valid(instr, false);
instr_set_translation(instr, orig_pc);
} else {
/* We set raw bits AFTER setting all srcs and dsts because setting
* a src or dst marks instr as having invalid raw bits.
*/
ASSERT(CHECK_TRUNCATE_TYPE_uint(next_pc - pc));
instr_set_raw_bits(instr, pc, (uint)(next_pc - pc));
}
return next_pc;
}
uint
encode_common(byte *pc, instr_t *i, decode_info_t *di)
{
ASSERT(((ptr_int_t)pc & 3) == 0);
return encoder(pc, i, di);
}
| 1 | 13,354 | Or maybe put into alphabetical order with the other #includes. | DynamoRIO-dynamorio | c |
@@ -235,8 +235,11 @@ func watchConfigFile(filename, adapterName string) {
return false
}
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+
// begin poller
- for range time.Tick(1 * time.Second) {
+ for range ticker.C {
// get the file info
info, err := os.Stat(filename)
if err != nil { | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
)
func init() {
// set a fitting User-Agent for ACME requests
goModule := caddy.GoModule()
cleanModVersion := strings.TrimPrefix(goModule.Version, "v")
certmagic.UserAgent = "Caddy/" + cleanModVersion
// by using Caddy, user indicates agreement to CA terms
// (very important, or ACME account creation will fail!)
certmagic.DefaultACME.Agreed = true
}
// Main implements the main function of the caddy command.
// Call this if Caddy is to be the main() if your program.
func Main() {
switch len(os.Args) {
case 0:
fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
os.Exit(caddy.ExitCodeFailedStartup)
case 1:
os.Args = append(os.Args, "help")
}
subcommandName := os.Args[1]
subcommand, ok := commands[subcommandName]
if !ok {
if strings.HasPrefix(os.Args[1], "-") {
// user probably forgot to type the subcommand
fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
} else {
fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1])
}
os.Exit(caddy.ExitCodeFailedStartup)
}
fs := subcommand.Flags
if fs == nil {
fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError)
}
err := fs.Parse(os.Args[2:])
if err != nil {
fmt.Println(err)
os.Exit(caddy.ExitCodeFailedStartup)
}
exitCode, err := subcommand.Func(Flags{fs})
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err)
}
os.Exit(exitCode)
}
// handlePingbackConn reads from conn and ensures it matches
// the bytes in expect, or returns an error if it doesn't.
func handlePingbackConn(conn net.Conn, expect []byte) error {
defer conn.Close()
confirmationBytes, err := ioutil.ReadAll(io.LimitReader(conn, 32))
if err != nil {
return err
}
if !bytes.Equal(confirmationBytes, expect) {
return fmt.Errorf("wrong confirmation: %x", confirmationBytes)
}
return nil
}
// loadConfig loads the config from configFile and adapts it
// using adapterName. If adapterName is specified, configFile
// must be also. If no configFile is specified, it tries
// loading a default config file. The lack of a config file is
// not treated as an error, but false will be returned if
// there is no config available. It prints any warnings to stderr,
// and returns the resulting JSON config bytes along with
// whether a config file was loaded or not.
func loadConfig(configFile, adapterName string) ([]byte, string, error) {
// specifying an adapter without a config file is ambiguous
if adapterName != "" && configFile == "" {
return nil, "", fmt.Errorf("cannot adapt config without config file (use --config)")
}
// load initial config and adapter
var config []byte
var cfgAdapter caddyconfig.Adapter
var err error
if configFile != "" {
config, err = ioutil.ReadFile(configFile)
if err != nil {
return nil, "", fmt.Errorf("reading config file: %v", err)
}
caddy.Log().Info("using provided configuration",
zap.String("config_file", configFile),
zap.String("config_adapter", adapterName))
} else if adapterName == "" {
// as a special case when no config file or adapter
// is specified, see if the Caddyfile adapter is
// plugged in, and if so, try using a default Caddyfile
cfgAdapter = caddyconfig.GetAdapter("caddyfile")
if cfgAdapter != nil {
config, err = ioutil.ReadFile("Caddyfile")
if os.IsNotExist(err) {
// okay, no default Caddyfile; pretend like this never happened
cfgAdapter = nil
} else if err != nil {
// default Caddyfile exists, but error reading it
return nil, "", fmt.Errorf("reading default Caddyfile: %v", err)
} else {
// success reading default Caddyfile
configFile = "Caddyfile"
caddy.Log().Info("using adjacent Caddyfile")
}
}
}
// as a special case, if a config file called "Caddyfile" was
// specified, and no adapter is specified, assume caddyfile adapter
// for convenience
if strings.HasPrefix(filepath.Base(configFile), "Caddyfile") &&
filepath.Ext(configFile) != ".json" &&
adapterName == "" {
adapterName = "caddyfile"
}
// load config adapter
if adapterName != "" {
cfgAdapter = caddyconfig.GetAdapter(adapterName)
if cfgAdapter == nil {
return nil, "", fmt.Errorf("unrecognized config adapter: %s", adapterName)
}
}
// adapt config
if cfgAdapter != nil {
adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{
"filename": configFile,
})
if err != nil {
return nil, "", fmt.Errorf("adapting config using %s: %v", adapterName, err)
}
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
fmt.Printf("[WARNING][%s] %s:%d: %s\n", adapterName, warn.File, warn.Line, msg)
}
config = adaptedConfig
}
return config, configFile, nil
}
// watchConfigFile watches the config file at filename for changes
// and reloads the config if the file was updated. This function
// blocks indefinitely; it only quits if the poller has errors for
// long enough time. The filename passed in must be the actual
// config file used, not one to be discovered.
func watchConfigFile(filename, adapterName string) {
defer func() {
if err := recover(); err != nil {
log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack())
}
}()
// make our logger; since config reloads can change the
// default logger, we need to get it dynamically each time
logger := func() *zap.Logger {
return caddy.Log().
Named("watcher").
With(zap.String("config_file", filename))
}
// get the initial timestamp on the config file
info, err := os.Stat(filename)
if err != nil {
logger().Error("cannot watch config file", zap.Error(err))
return
}
lastModified := info.ModTime()
logger().Info("watching config file for changes")
// if the file disappears or something, we can
// stop polling if the error lasts long enough
var lastErr time.Time
finalError := func(err error) bool {
if lastErr.IsZero() {
lastErr = time.Now()
return false
}
if time.Since(lastErr) > 30*time.Second {
logger().Error("giving up watching config file; too many errors",
zap.Error(err))
return true
}
return false
}
// begin poller
for range time.Tick(1 * time.Second) {
// get the file info
info, err := os.Stat(filename)
if err != nil {
if finalError(err) {
return
}
continue
}
lastErr = time.Time{} // no error, so clear any memory of one
// if it hasn't changed, nothing to do
if !info.ModTime().After(lastModified) {
continue
}
logger().Info("config file changed; reloading")
// remember this timestamp
lastModified = info.ModTime()
// load the contents of the file
config, _, err := loadConfig(filename, adapterName)
if err != nil {
logger().Error("unable to load latest config", zap.Error(err))
continue
}
// apply the updated config
err = caddy.Load(config, false)
if err != nil {
logger().Error("applying latest config", zap.Error(err))
continue
}
}
}
// Flags wraps a FlagSet so that typed values
// from flags can be easily retrieved.
type Flags struct {
*flag.FlagSet
}
// String returns the string representation of the
// flag given by name. It panics if the flag is not
// in the flag set.
func (f Flags) String(name string) string {
return f.FlagSet.Lookup(name).Value.String()
}
// Bool returns the boolean representation of the
// flag given by name. It returns false if the flag
// is not a boolean type. It panics if the flag is
// not in the flag set.
func (f Flags) Bool(name string) bool {
val, _ := strconv.ParseBool(f.String(name))
return val
}
// Int returns the integer representation of the
// flag given by name. It returns 0 if the flag
// is not an integer type. It panics if the flag is
// not in the flag set.
func (f Flags) Int(name string) int {
val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize)
return int(val)
}
// Float64 returns the float64 representation of the
// flag given by name. It returns false if the flag
// is not a float63 type. It panics if the flag is
// not in the flag set.
func (f Flags) Float64(name string) float64 {
val, _ := strconv.ParseFloat(f.String(name), 64)
return val
}
// Duration returns the duration representation of the
// flag given by name. It returns false if the flag
// is not a duration type. It panics if the flag is
// not in the flag set.
func (f Flags) Duration(name string) time.Duration {
val, _ := caddy.ParseDuration(f.String(name))
return val
}
// flagHelp returns the help text for fs.
func flagHelp(fs *flag.FlagSet) string {
if fs == nil {
return ""
}
// temporarily redirect output
out := fs.Output()
defer fs.SetOutput(out)
buf := new(bytes.Buffer)
fs.SetOutput(buf)
fs.PrintDefaults()
return buf.String()
}
func loadEnvFromFile(envFile string) error {
file, err := os.Open(envFile)
if err != nil {
return fmt.Errorf("reading environment file: %v", err)
}
defer file.Close()
envMap, err := parseEnvFile(file)
if err != nil {
return fmt.Errorf("parsing environment file: %v", err)
}
for k, v := range envMap {
if err := os.Setenv(k, v); err != nil {
return fmt.Errorf("setting environment variables: %v", err)
}
}
return nil
}
func parseEnvFile(envInput io.Reader) (map[string]string, error) {
envMap := make(map[string]string)
scanner := bufio.NewScanner(envInput)
var line string
lineNumber := 0
for scanner.Scan() {
line = strings.TrimSpace(scanner.Text())
lineNumber++
// skip lines starting with comment
if strings.HasPrefix(line, "#") {
continue
}
// skip empty line
if len(line) == 0 {
continue
}
fields := strings.SplitN(line, "=", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
}
if strings.Contains(fields[0], " ") {
return nil, fmt.Errorf("bad key on line %d: contains whitespace", lineNumber)
}
key := fields[0]
val := fields[1]
if key == "" {
return nil, fmt.Errorf("missing or empty key on line %d", lineNumber)
}
envMap[key] = val
}
if err := scanner.Err(); err != nil {
return nil, err
}
return envMap, nil
}
func printEnvironment() {
fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
fmt.Printf("caddy.Version=%s\n", caddy.GoModule().Version)
fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU())
fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0))
fmt.Printf("runtime.Version=%s\n", runtime.Version())
cwd, err := os.Getwd()
if err != nil {
cwd = fmt.Sprintf("<error: %v>", err)
}
fmt.Printf("os.Getwd=%s\n\n", cwd)
for _, v := range os.Environ() {
fmt.Println(v)
}
}
// moveStorage moves the old default dataDir to the new default dataDir.
// TODO: This is TEMPORARY until the release candidates.
func moveStorage() {
// get the home directory (the old way)
oldHome := os.Getenv("HOME")
if oldHome == "" && runtime.GOOS == "windows" {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
oldHome = drive + path
if drive == "" || path == "" {
oldHome = os.Getenv("USERPROFILE")
}
}
if oldHome == "" {
oldHome = "."
}
oldDataDir := filepath.Join(oldHome, ".local", "share", "caddy")
// nothing to do if old data dir doesn't exist
_, err := os.Stat(oldDataDir)
if os.IsNotExist(err) {
return
}
// nothing to do if the new data dir is the same as the old one
newDataDir := caddy.AppDataDir()
if oldDataDir == newDataDir {
return
}
logger := caddy.Log().Named("automigrate").With(
zap.String("old_dir", oldDataDir),
zap.String("new_dir", newDataDir))
logger.Info("beginning one-time data directory migration",
zap.String("details", "https://github.com/caddyserver/caddy/issues/2955"))
// if new data directory exists, avoid auto-migration as a conservative safety measure
_, err = os.Stat(newDataDir)
if !os.IsNotExist(err) {
logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure",
zap.Error(err),
zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"))
return
}
// construct the new data directory's parent folder
err = os.MkdirAll(filepath.Dir(newDataDir), 0700)
if err != nil {
logger.Error("unable to make new datadirectory - follow link for instructions",
zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"),
zap.Error(err))
return
}
// folder structure is same, so just try to rename (move) it;
// this fails if the new path is on a separate device
err = os.Rename(oldDataDir, newDataDir)
if err != nil {
logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure - follow link for instructions",
zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"),
zap.Error(err))
}
logger.Info("successfully completed one-time migration of data directory",
zap.String("details", "https://github.com/caddyserver/caddy/issues/2955"))
}
| 1 | 15,737 | This runs the entire duration of the program; this change is not needed. | caddyserver-caddy | go |
@@ -20,6 +20,10 @@ export class TouchScroll extends BasePlugin {
return PLUGIN_PRIORITY;
}
+ static get ALWAYS_UPDATE() {
+ return true;
+ }
+
constructor(hotInstance) {
super(hotInstance);
| 1 | import { addClass, removeClass } from '../../helpers/dom/element';
import { arrayEach } from '../../helpers/array';
import { BasePlugin } from '../base';
import { isTouchSupported } from '../../helpers/feature';
export const PLUGIN_KEY = 'touchScroll';
export const PLUGIN_PRIORITY = 200;
/**
* @private
* @plugin TouchScroll
* @class TouchScroll
*/
export class TouchScroll extends BasePlugin {
static get PLUGIN_KEY() {
return PLUGIN_KEY;
}
static get PLUGIN_PRIORITY() {
return PLUGIN_PRIORITY;
}
constructor(hotInstance) {
super(hotInstance);
/**
* Collection of scrollbars to update.
*
* @type {Array}
*/
this.scrollbars = [];
/**
* Collection of overlays to update.
*
* @type {Array}
*/
this.clones = [];
/**
* Flag which determines if collection of overlays should be refilled on every table render.
*
* @type {boolean}
* @default false
*/
this.lockedCollection = false;
/**
* Flag which determines if walkontable should freeze overlays while scrolling.
*
* @type {boolean}
* @default false
*/
this.freezeOverlays = false;
}
/**
* Check if plugin is enabled.
*
* @returns {boolean}
*/
isEnabled() {
return isTouchSupported();
}
/**
* Enable the plugin.
*/
enablePlugin() {
if (this.enabled) {
return;
}
this.addHook('afterViewRender', () => this.onAfterViewRender());
this.registerEvents();
super.enablePlugin();
}
/**
* Updates the plugin to use the latest options you have specified.
*/
updatePlugin() {
this.lockedCollection = false;
super.updatePlugin();
}
/**
* Disable plugin for this Handsontable instance.
*/
disablePlugin() {
super.disablePlugin();
}
/**
* Register all necessary events.
*
* @private
*/
registerEvents() {
this.addHook('beforeTouchScroll', () => this.onBeforeTouchScroll());
this.addHook('afterMomentumScroll', () => this.onAfterMomentumScroll());
}
/**
* After view render listener.
*
* @private
*/
onAfterViewRender() {
if (this.lockedCollection) {
return;
}
const {
topOverlay,
bottomOverlay,
leftOverlay,
topLeftCornerOverlay,
bottomLeftCornerOverlay
} = this.hot.view.wt.wtOverlays;
this.lockedCollection = true;
this.scrollbars.length = 0;
this.scrollbars.push(topOverlay);
if (bottomOverlay.clone) {
this.scrollbars.push(bottomOverlay);
}
this.scrollbars.push(leftOverlay);
if (topLeftCornerOverlay) {
this.scrollbars.push(topLeftCornerOverlay);
}
if (bottomLeftCornerOverlay && bottomLeftCornerOverlay.clone) {
this.scrollbars.push(bottomLeftCornerOverlay);
}
this.clones.length = 0;
if (topOverlay.needFullRender) {
this.clones.push(topOverlay.clone.wtTable.holder.parentNode);
}
if (bottomOverlay.needFullRender) {
this.clones.push(bottomOverlay.clone.wtTable.holder.parentNode);
}
if (leftOverlay.needFullRender) {
this.clones.push(leftOverlay.clone.wtTable.holder.parentNode);
}
if (topLeftCornerOverlay) {
this.clones.push(topLeftCornerOverlay.clone.wtTable.holder.parentNode);
}
if (bottomLeftCornerOverlay && bottomLeftCornerOverlay.clone) {
this.clones.push(bottomLeftCornerOverlay.clone.wtTable.holder.parentNode);
}
}
/**
* Touch scroll listener.
*
* @private
*/
onBeforeTouchScroll() {
this.freezeOverlays = true;
arrayEach(this.clones, (clone) => {
addClass(clone, 'hide-tween');
});
}
/**
* After momentum scroll listener.
*
* @private
*/
onAfterMomentumScroll() {
this.freezeOverlays = false;
arrayEach(this.clones, (clone) => {
removeClass(clone, 'hide-tween');
addClass(clone, 'show-tween');
});
setTimeout(() => {
arrayEach(this.clones, (clone) => {
removeClass(clone, 'show-tween');
});
}, 400);
arrayEach(this.scrollbars, (scrollbar) => {
scrollbar.refresh();
scrollbar.resetFixedPosition();
});
this.hot.view.wt.wtOverlays.syncScrollWithMaster();
}
}
| 1 | 20,747 | I don't feel well about the `ALWAYS_UPDATE` option sounds like a workaround. I can imagine that some of the plugins want to observe all options. Maybe, in that case, we can reuse the `CONFIG_KEYS` that would return an empty array for observing all settings and `false` for disabling observing at all? I just thinking aloud - Like `CONFIG_KEYS` sounds reasonable. We could support and maintain it in the future, even after implementing the "observability" to the MetaManager then `ALWAYS_UPDATE` ... I don't know :) | handsontable-handsontable | js |
@@ -34,6 +34,11 @@
* by asynch interrupt.
*/
+/* clang-format off */
+/* XXX: clang-format incorrectly detected a tab difference at "clang-format on"
+ * below. This is why "clang-format off" has been moved outside the ifdef until
+ * bug is fixed.
+ */
#ifndef ASM_CODE_ONLY /* C code */
# include "configure.h"
# ifndef UNIX | 1 | /* **********************************************************
* Copyright (c) 2015-2018 Google, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Google, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Test xl8 pc of rip-rel instruction (xref #3307) in mangling epilogue, caused
* by asynch interrupt.
*/
#ifndef ASM_CODE_ONLY /* C code */
# include "configure.h"
# ifndef UNIX
# error UNIX-only
# endif
# include "tools.h"
# include "mangle_suspend-shared.h"
# include <setjmp.h>
# include <signal.h>
# include <pthread.h>
volatile bool test_ready = false;
volatile bool test_done = false;
volatile bool test_suspend = false;
volatile int loop_inc = 1;
void
test_1_asm();
void
test_2_asm();
static void *
suspend_thread_1_routine(void *arg)
{
# ifdef X86_64
/* This thread is executing labels for the client to insert a clean call that
* does the suspend and subsequent check for correctness.
*/
while (!test_ready) {
/* Empty. */
}
while (!test_done) {
asm volatile("mov %0, %%rdx\n\t"
"mov %0, %%rdx\n"
:
: "i"(SUSPEND_VAL_TEST_1_C)
: "rdx");
while (!test_suspend && !test_done) {
/* Empty. */
}
test_suspend = false;
}
# endif
return NULL;
}
static void *
suspend_thread_2_routine(void *arg)
{
# ifdef X86_64
/* This thread is executing labels for the client to insert a clean call that
* does the suspend and subsequent check for correctness.
*/
while (!test_ready) {
/* Empty. */
}
while (!test_done) {
asm volatile("mov %0, %%rdx\n\t"
"mov %0, %%rdx\n"
:
: "i"(SUSPEND_VAL_TEST_2_C)
: "rdx");
while (!test_suspend && !test_done) {
/* Empty. */
}
test_suspend = false;
}
# endif
return NULL;
}
int
main(int argc, const char *argv[])
{
pthread_t suspend_thread;
void *retval;
if (pthread_create(&suspend_thread, NULL, suspend_thread_1_routine, NULL) != 0) {
perror("Failed to create thread");
exit(1);
}
/* Test xl8 pc of rip-rel instruction (xref #3307) caused by
* asynch interrupt.
*/
test_1_asm();
if (pthread_join(suspend_thread, &retval) != 0)
perror("Failed to join thread");
print("Test 1 finished\n");
test_ready = false;
test_done = false;
test_suspend = false;
if (pthread_create(&suspend_thread, NULL, suspend_thread_2_routine, NULL) != 0) {
perror("Failed to create thread");
exit(1);
}
/* Test xl8 pc of rip-rel instruction (xref #3307) caused by
* asynch interrupt.
*/
test_2_asm();
if (pthread_join(suspend_thread, &retval) != 0)
perror("Failed to join thread");
print("Test 2 finished\n");
return 0;
}
#else /* asm code *************************************************************/
# include "asm_defines.asm"
# include "mangle_suspend-shared.h"
/* clang-format off */
START_FILE
#ifdef X64
# define FRAME_PADDING 0
#else
# define FRAME_PADDING 0
#endif
#define FUNCNAME test_1_asm
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
#ifdef X64
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test_1
test_1:
mov PTRSZ [REG_XSP], TEST_1_LOOP_COUNT_REG_ASM
sub REG_XSP, 8
mov SUSPEND_TEST_REG_ASM, TEST_VAL
mov SUSPEND_TEST_REG_ASM, TEST_VAL
nop
mov BYTE SYMREF(test_ready), HEX(1)
mov LOOP_TEST_REG_OUTER_ASM, LOOP_COUNT_OUTER
mov TEST_1_LOOP_COUNT_REG_ASM, 2
/* Code changes here must stay in synch with the loop bounds
* check hardcoded in the dll.
*/
loop_a_outer:
mov LOOP_TEST_REG_INNER_ASM, LOOP_COUNT_INNER
loop_a_inner:
mov TEST_1_LOOP_COUNT_REG_ASM, 1
add TEST_1_LOOP_COUNT_REG_ASM, PTRSZ SYMREF(loop_inc)
mov TEST_1_LOOP_COUNT_REG_ASM, 2
sub LOOP_TEST_REG_INNER_ASM, 1
cmp LOOP_TEST_REG_INNER_ASM, 0
jnz loop_a_inner
mov BYTE SYMREF(test_suspend), HEX(1)
sub LOOP_TEST_REG_OUTER_ASM, 1
cmp LOOP_TEST_REG_OUTER_ASM, 0
jnz loop_a_outer
jmp epilog_a
epilog_a:
mov BYTE SYMREF(test_done), HEX(1)
add REG_XSP, 8
mov TEST_1_LOOP_COUNT_REG_ASM, PTRSZ [REG_XSP]
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
#endif
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
/* Test: not implemented for ARM */
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
/* Test: not implemented for AARCH64 */
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
#define FUNCNAME test_2_asm
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
#ifdef X64
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test_2
test_2:
mov PTRSZ [REG_XSP], TEST_2_LOOP_COUNT_REG_ASM
sub REG_XSP, 8
mov PTRSZ [REG_XSP], TEST_2_CHECK_REG_ASM
sub REG_XSP, 8
mov SUSPEND_TEST_REG_ASM, TEST_VAL
mov SUSPEND_TEST_REG_ASM, TEST_VAL
nop
mov BYTE SYMREF(test_ready), HEX(1)
mov LOOP_TEST_REG_OUTER_ASM, LOOP_COUNT_OUTER
mov TEST_2_LOOP_COUNT_REG_ASM, 2
mov TEST_2_CHECK_REG_ASM, HEX(0)
/* Code changes here must stay in synch with the loop bounds
* check hardcoded in the dll.
*/
loop_b_outer:
mov LOOP_TEST_REG_INNER_ASM, LOOP_COUNT_INNER
loop_b_inner:
mov TEST_2_LOOP_COUNT_REG_ASM, 1
add TEST_2_LOOP_COUNT_REG_ASM, PTRSZ SYMREF(loop_inc)
mov TEST_2_LOOP_COUNT_REG_ASM, 2
sub LOOP_TEST_REG_INNER_ASM, 1
cmp LOOP_TEST_REG_INNER_ASM, 0
jnz loop_b_inner
mov BYTE SYMREF(test_suspend), HEX(1)
sub LOOP_TEST_REG_OUTER_ASM, 1
cmp LOOP_TEST_REG_OUTER_ASM, 0
jnz loop_b_outer
jmp epilog_b
epilog_b:
mov BYTE SYMREF(test_done), HEX(1)
add REG_XSP, 8
mov TEST_2_CHECK_REG_ASM, PTRSZ [REG_XSP]
add REG_XSP, 8
mov TEST_2_LOOP_COUNT_REG_ASM, PTRSZ [REG_XSP]
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
#endif
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
/* Test: not implemented for ARM */
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
/* Test: not implemented for AARCH64 */
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
END_FILE
/* clang-format on */
#endif
| 1 | 15,302 | A "tab difference"? As in, there are tab characters somewhere? This disabling of clang-format for all the code here seems like overkill. Are you sure there's not some other solution? I don't really understand the problem though. Do you mean clang-format-diff? | DynamoRIO-dynamorio | c |
@@ -31,12 +31,7 @@ import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.grid.sessionmap.local.LocalSessionMap;
import org.openqa.selenium.netty.server.NettyServer;
import org.openqa.selenium.remote.SessionId;
-import org.openqa.selenium.remote.http.HttpClient;
-import org.openqa.selenium.remote.http.HttpHandler;
-import org.openqa.selenium.remote.http.HttpRequest;
-import org.openqa.selenium.remote.http.HttpResponse;
-import org.openqa.selenium.remote.http.TextMessage;
-import org.openqa.selenium.remote.http.WebSocket;
+import org.openqa.selenium.remote.http.*;
import org.openqa.selenium.remote.tracing.DefaultTestTracer;
import org.openqa.selenium.remote.tracing.Tracer;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.router;
import org.junit.Before;
import org.junit.Test;
import org.openqa.selenium.ImmutableCapabilities;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.events.local.GuavaEventBus;
import org.openqa.selenium.grid.config.Config;
import org.openqa.selenium.grid.config.MapConfig;
import org.openqa.selenium.grid.data.Session;
import org.openqa.selenium.grid.server.BaseServerOptions;
import org.openqa.selenium.grid.server.Server;
import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.grid.sessionmap.local.LocalSessionMap;
import org.openqa.selenium.netty.server.NettyServer;
import org.openqa.selenium.remote.SessionId;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.http.TextMessage;
import org.openqa.selenium.remote.http.WebSocket;
import org.openqa.selenium.remote.tracing.DefaultTestTracer;
import org.openqa.selenium.remote.tracing.Tracer;
import java.net.URISyntaxException;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
public class ProxyCdpTest {
private final HttpHandler nullHandler = req -> new HttpResponse();
private final Config emptyConfig = new MapConfig(Map.of());
private Server<?> proxyServer;
private SessionMap sessions;
@Before
public void setUp() {
Tracer tracer = DefaultTestTracer.createTracer();
EventBus events = new GuavaEventBus();
sessions = new LocalSessionMap(tracer, events);
// Set up the proxy we'll be using
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
ProxyCdpIntoGrid proxy = new ProxyCdpIntoGrid(clientFactory, sessions);
proxyServer = new NettyServer(new BaseServerOptions(emptyConfig), nullHandler, proxy).start();
}
@Test
public void shouldForwardTextMessageToServer() throws URISyntaxException, InterruptedException {
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
// Create a backend server which will capture any incoming text message
AtomicReference<String> text = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
Server<?> backend = createBackendServer(latch, text, "");
// Push a session that resolves to the backend server into the session map
SessionId id = new SessionId(UUID.randomUUID());
sessions.add(new Session(id, backend.getUrl().toURI(), new ImmutableCapabilities()));
// Now! Send a message. We expect it to eventually show up in the backend
WebSocket socket = clientFactory.createClient(proxyServer.getUrl())
.openSocket(new HttpRequest(GET, String.format("/session/%s/cdp", id)), new WebSocket.Listener(){});
socket.sendText("Cheese!");
assertThat(latch.await(5, SECONDS)).isTrue();
assertThat(text.get()).isEqualTo("Cheese!");
socket.close();
}
@Test
public void shouldForwardTextMessageFromServerToLocalEnd() throws URISyntaxException, InterruptedException {
HttpClient.Factory clientFactory = HttpClient.Factory.createDefault();
Server<?> backend = createBackendServer(new CountDownLatch(1), new AtomicReference<>(), "Asiago");
// Push a session that resolves to the backend server into the session map
SessionId id = new SessionId(UUID.randomUUID());
sessions.add(new Session(id, backend.getUrl().toURI(), new ImmutableCapabilities()));
// Now! Send a message. We expect it to eventually show up in the backend
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<String> text = new AtomicReference<>();
WebSocket socket = clientFactory.createClient(proxyServer.getUrl())
.openSocket(new HttpRequest(GET, String.format("/session/%s/cdp", id)), new WebSocket.Listener() {
@Override
public void onText(CharSequence data) {
text.set(data.toString());
latch.countDown();
}
});
socket.sendText("Cheese!");
assertThat(latch.await(5, SECONDS)).isTrue();
assertThat(text.get()).isEqualTo("Asiago");
socket.close();
}
private Server<?> createBackendServer(CountDownLatch latch, AtomicReference<String> incomingRef, String response) {
return new NettyServer(
new BaseServerOptions(emptyConfig),
nullHandler,
(uri, sink) -> Optional.of(msg -> {
if (msg instanceof TextMessage) {
incomingRef.set(((TextMessage) msg).text());
sink.accept(new TextMessage(response));
latch.countDown();
}
}))
.start();
}
}
| 1 | 17,921 | Could you please leave the explicit imports? | SeleniumHQ-selenium | js |
@@ -265,6 +265,9 @@ public abstract class GapicInterfaceConfig implements InterfaceConfig {
}
List<T> methodConfigs = new ArrayList<>();
for (MethodConfigProto methodConfigProto : interfaceConfigProto.getMethodsList()) {
+ if (methodConfigMap.get(methodConfigProto.getName()) == null) {
+ continue;
+ }
methodConfigs.add(methodConfigMap.get(methodConfigProto.getName()));
}
return methodConfigs; | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.codegen.CollectionConfigProto;
import com.google.api.codegen.InterfaceConfigProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.RetryParamsDefinitionProto;
import com.google.api.codegen.common.TargetLanguage;
import com.google.api.codegen.transformer.RetryDefinitionsTransformer;
import com.google.api.codegen.util.ProtoParser;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.annotation.Nullable;
/**
* GapicInterfaceConfig represents the client code-gen config for an API interface, and includes the
* configuration for methods and resource names.
*
* <p>In grpc-based Gapic clients, an API interface is defined by a "service" section in a proto
* file.
*/
@AutoValue
public abstract class GapicInterfaceConfig implements InterfaceConfig {
private static final String SERVICE_ADDRESS_PARAM = "service_address";
private static final String SCOPES_PARAM = "scopes";
private static final ImmutableSet<String> CONSTRUCTOR_PARAMS =
ImmutableSet.of(SERVICE_ADDRESS_PARAM, SCOPES_PARAM);
public Interface getInterface() {
return getInterfaceModel().getInterface();
}
@Override
public abstract ProtoInterfaceModel getInterfaceModel();
@Override
public abstract List<GapicMethodConfig> getMethodConfigs();
@Nullable
@Override
public abstract SmokeTestConfig getSmokeTestConfig();
abstract ImmutableMap<String, GapicMethodConfig> getMethodConfigMap();
@Override
public abstract RetryCodesConfig getRetryCodesConfig();
@Override
public abstract ImmutableMap<String, RetryParamsDefinitionProto> getRetrySettingsDefinition();
@Override
public abstract ImmutableList<String> getRequiredConstructorParams();
@Override
public abstract ImmutableList<SingleResourceNameConfig> getSingleResourceNameConfigs();
@Override
public abstract String getManualDoc();
@Override
public String getName() {
return getInterfaceNameOverride() != null
? getInterfaceNameOverride()
: getInterface().getSimpleName();
}
@Override
public String getRawName() {
return getInterface().getSimpleName();
}
/**
* Creates an instance of GapicInterfaceConfig based on ConfigProto, linking up method
* configurations with specified methods in methodConfigMap. On errors, null will be returned, and
* diagnostics are reported to the model.
*/
@Nullable
static GapicInterfaceConfig createInterfaceConfig(
DiagCollector diagCollector,
TargetLanguage language,
String defaultPackageName,
GapicInterfaceInput interfaceInput,
String interfaceNameOverride,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ProtoParser protoParser) {
Interface apiInterface = interfaceInput.getInterface();
Map<Method, MethodConfigProto> methodsToGenerate = interfaceInput.getMethodsToGenerate();
InterfaceConfigProto interfaceConfigProto = interfaceInput.getInterfaceConfigProto();
RetryCodesConfig retryCodesConfig =
RetryCodesConfig.create(
diagCollector,
interfaceConfigProto,
new ArrayList<>(interfaceInput.getMethodsToGenerate().keySet()),
protoParser);
ImmutableMap<String, RetryParamsDefinitionProto> retrySettingsDefinition =
RetryDefinitionsTransformer.createRetrySettingsDefinition(interfaceConfigProto);
ImmutableMap<String, GapicMethodConfig> methodConfigsMap;
List<GapicMethodConfig> methodConfigs;
if (retryCodesConfig != null && retrySettingsDefinition != null) {
methodConfigsMap =
createMethodConfigMap(
diagCollector,
language,
defaultPackageName,
methodsToGenerate,
messageConfigs,
resourceNameConfigs,
retryCodesConfig,
retrySettingsDefinition.keySet(),
protoParser);
if (methodConfigsMap == null) {
diagCollector.addDiag(
Diag.error(SimpleLocation.TOPLEVEL, "Error constructing methodConfigMap"));
return null;
}
methodConfigs = createMethodConfigs(methodConfigsMap, interfaceConfigProto);
} else {
methodConfigsMap = ImmutableMap.of();
methodConfigs = ImmutableList.of();
}
SmokeTestConfig smokeTestConfig =
createSmokeTestConfig(diagCollector, apiInterface, interfaceConfigProto);
ImmutableList<String> requiredConstructorParams =
ImmutableList.copyOf(interfaceConfigProto.getRequiredConstructorParamsList());
for (String param : interfaceConfigProto.getRequiredConstructorParamsList()) {
if (!CONSTRUCTOR_PARAMS.contains(param)) {
diagCollector.addDiag(
Diag.error(SimpleLocation.TOPLEVEL, "Unsupported constructor param: %s", param));
}
}
ImmutableList.Builder<SingleResourceNameConfig> resourcesBuilder = ImmutableList.builder();
for (CollectionConfigProto collectionConfigProto : interfaceConfigProto.getCollectionsList()) {
String entityName = collectionConfigProto.getEntityName();
ResourceNameConfig resourceName = resourceNameConfigs.get(entityName);
if (!(resourceName instanceof SingleResourceNameConfig)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Inconsistent configuration - single resource name %s specified for interface, "
+ " but was not found in GapicProductConfig configuration.",
entityName));
return null;
}
resourcesBuilder.add((SingleResourceNameConfig) resourceName);
}
ImmutableList<SingleResourceNameConfig> singleResourceNames = resourcesBuilder.build();
String manualDoc =
Strings.nullToEmpty(
interfaceConfigProto.getLangDoc().get(language.toString().toLowerCase()))
.trim();
if (diagCollector.hasErrors()) {
return null;
} else {
return new AutoValue_GapicInterfaceConfig(
interfaceNameOverride,
new ProtoInterfaceModel(apiInterface),
methodConfigs,
smokeTestConfig,
methodConfigsMap,
retryCodesConfig,
retrySettingsDefinition,
requiredConstructorParams,
singleResourceNames,
manualDoc);
}
}
private static SmokeTestConfig createSmokeTestConfig(
DiagCollector diagCollector,
Interface apiInterface,
InterfaceConfigProto interfaceConfigProto) {
if (interfaceConfigProto.hasSmokeTest()) {
return SmokeTestConfig.createSmokeTestConfig(
new ProtoInterfaceModel(apiInterface),
interfaceConfigProto.getSmokeTest(),
diagCollector);
} else {
return null;
}
}
private static ImmutableMap<String, GapicMethodConfig> createMethodConfigMap(
DiagCollector diagCollector,
TargetLanguage language,
String defaultPackageName,
Map<Method, MethodConfigProto> methodsToGenerate,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
RetryCodesConfig retryCodesConfig,
ImmutableSet<String> retryParamsConfigNames,
ProtoParser protoParser) {
Map<String, GapicMethodConfig> methodConfigMapBuilder = new LinkedHashMap<>();
for (Entry<Method, MethodConfigProto> methodEntry : methodsToGenerate.entrySet()) {
MethodConfigProto methodConfigProto = methodEntry.getValue();
Method method = methodEntry.getKey();
GapicMethodConfig methodConfig =
GapicMethodConfig.createMethodConfig(
diagCollector,
language,
defaultPackageName,
methodConfigProto,
method,
messageConfigs,
resourceNameConfigs,
retryCodesConfig,
retryParamsConfigNames,
protoParser);
if (methodConfig == null) {
continue;
}
methodConfigMapBuilder.put(method.getSimpleName(), methodConfig);
}
if (diagCollector.getErrorCount() > 0) {
return null;
} else {
return ImmutableMap.copyOf(methodConfigMapBuilder);
}
}
/** Return a list of configs for method in the order given by the GAPIC interface config. */
static <T> List<T> createMethodConfigs(
ImmutableMap<String, T> methodConfigMap, InterfaceConfigProto interfaceConfigProto) {
if (interfaceConfigProto.equals(InterfaceConfigProto.getDefaultInstance())) {
// InterfaceConfigProto was not given, so just return the order in methodConfigMap.
return methodConfigMap.values().asList();
}
List<T> methodConfigs = new ArrayList<>();
for (MethodConfigProto methodConfigProto : interfaceConfigProto.getMethodsList()) {
methodConfigs.add(methodConfigMap.get(methodConfigProto.getName()));
}
return methodConfigs;
}
/** Returns the GapicMethodConfig for the given method. */
@Override
public GapicMethodConfig getMethodConfig(MethodModel method) {
return getMethodConfig(method.getSimpleName());
}
/** Returns the GapicMethodConfig for the given method. */
public GapicMethodConfig getMethodConfig(Method method) {
return getMethodConfig(method.getSimpleName());
}
public GapicMethodConfig getMethodConfig(String methodSimpleName) {
return getMethodConfigMap().get(methodSimpleName);
}
@Override
public boolean hasDefaultServiceAddress() {
return !getRequiredConstructorParams().contains(SERVICE_ADDRESS_PARAM);
}
@Override
public boolean hasDefaultServiceScopes() {
return !getRequiredConstructorParams().contains(SCOPES_PARAM);
}
@Override
public boolean hasDefaultInstance() {
return getRequiredConstructorParams().size() == 0;
}
/**
* If rerouteToGrpcInterface is set, then looks up that interface and returns it, otherwise
* returns the value of defaultInterface.
*/
public static Interface getTargetInterface(
Interface defaultInterface, String rerouteToGrpcInterface) {
Interface targetInterface = defaultInterface;
if (!Strings.isNullOrEmpty(rerouteToGrpcInterface)) {
targetInterface =
defaultInterface.getModel().getSymbolTable().lookupInterface(rerouteToGrpcInterface);
if (targetInterface == null) {
throw new IllegalArgumentException(
"reroute_to_grpc_interface not found: " + rerouteToGrpcInterface);
}
}
return targetInterface;
}
@Override
public boolean hasPageStreamingMethods() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isPageStreaming()) {
return true;
}
}
return false;
}
@Override
public boolean hasBatchingMethods() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isBatching()) {
return true;
}
}
return false;
}
@Override
public boolean hasGrpcStreamingMethods() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isGrpcStreaming()) {
return true;
}
}
return false;
}
@Override
public boolean hasGrpcStreamingMethods(GrpcStreamingConfig.GrpcStreamingType streamingType) {
for (GapicMethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isGrpcStreaming() && methodConfig.getGrpcStreamingType() == streamingType) {
return true;
}
}
return false;
}
@Override
public boolean hasLongRunningOperations() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.hasLroConfig()) {
return true;
}
}
return false;
}
@Override
public boolean hasReroutedInterfaceMethods() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (!Strings.isNullOrEmpty(methodConfig.getRerouteToGrpcInterface())) {
return true;
}
}
return false;
}
}
| 1 | 28,835 | Can we check for containsKey here? | googleapis-gapic-generator | java |
@@ -35,9 +35,14 @@ import (
"golang.org/x/tools/go/types/typeutil"
)
+const usage = "usage: wire [gen] [PKG] | wire show [...] | wire check [...]"
+
func main() {
var err error
switch {
+ case len(os.Args) == 2 && (os.Args[1] == "help" || os.Args[1] == "-h" || os.Args[1] == "-help" || os.Args[1] == "--help"):
+ fmt.Fprintln(os.Stderr, usage)
+ os.Exit(2)
case len(os.Args) == 1 || len(os.Args) == 2 && os.Args[1] == "gen":
err = generate(".")
case len(os.Args) == 2 && os.Args[1] == "show": | 1 | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Wire is a compile-time dependency injection tool.
//
// For an overview, see https://github.com/google/go-cloud/blob/master/wire/README.md
package main
import (
"errors"
"fmt"
"go/build"
"go/token"
"go/types"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"github.com/google/go-cloud/wire/internal/wire"
"golang.org/x/tools/go/types/typeutil"
)
func main() {
var err error
switch {
case len(os.Args) == 1 || len(os.Args) == 2 && os.Args[1] == "gen":
err = generate(".")
case len(os.Args) == 2 && os.Args[1] == "show":
err = show(".")
case len(os.Args) > 2 && os.Args[1] == "show":
err = show(os.Args[2:]...)
case len(os.Args) == 2 && os.Args[1] == "check":
err = check(".")
case len(os.Args) > 2 && os.Args[1] == "check":
err = check(os.Args[2:]...)
case len(os.Args) == 2:
err = generate(os.Args[1])
case len(os.Args) == 3 && os.Args[1] == "gen":
err = generate(os.Args[2])
default:
fmt.Fprintln(os.Stderr, "usage: wire [gen] [PKG] | wire show [...] | wire check [...]")
os.Exit(64)
}
if err != nil {
fmt.Fprintln(os.Stderr, "wire:", err)
os.Exit(1)
}
}
// generate runs the gen subcommand. Given a package, gen will create
// the wire_gen.go file.
func generate(pkg string) error {
wd, err := os.Getwd()
if err != nil {
return err
}
pkgInfo, err := build.Default.Import(pkg, wd, build.FindOnly)
if err != nil {
return err
}
out, errs := wire.Generate(&build.Default, wd, pkg)
if len(errs) > 0 {
logErrors(errs)
return errors.New("generate failed")
}
if len(out) == 0 {
// No Wire directives, don't write anything.
fmt.Fprintln(os.Stderr, "wire: no injector found for", pkg)
return nil
}
p := filepath.Join(pkgInfo.Dir, "wire_gen.go")
if err := ioutil.WriteFile(p, out, 0666); err != nil {
return err
}
return nil
}
// show runs the show subcommand.
//
// Given one or more packages, show will find all the provider sets
// declared as top-level variables and print what other provider sets it
// imports and what outputs it can produce, given possible inputs.
// It also lists any injector functions defined in the package.
func show(pkgs ...string) error {
wd, err := os.Getwd()
if err != nil {
return err
}
info, errs := wire.Load(&build.Default, wd, pkgs)
if info != nil {
keys := make([]wire.ProviderSetID, 0, len(info.Sets))
for k := range info.Sets {
keys = append(keys, k)
}
sort.Slice(keys, func(i, j int) bool {
if keys[i].ImportPath == keys[j].ImportPath {
return keys[i].VarName < keys[j].VarName
}
return keys[i].ImportPath < keys[j].ImportPath
})
// ANSI color codes.
// TODO(light): Possibly use github.com/fatih/color?
const (
reset = "\x1b[0m"
redBold = "\x1b[0;1;31m"
blue = "\x1b[0;34m"
green = "\x1b[0;32m"
)
for i, k := range keys {
if i > 0 {
fmt.Println()
}
outGroups, imports := gather(info, k)
fmt.Printf("%s%s%s\n", redBold, k, reset)
for _, imp := range sortSet(imports) {
fmt.Printf("\t%s\n", imp)
}
for i := range outGroups {
fmt.Printf("%sOutputs given %s:%s\n", blue, outGroups[i].name, reset)
out := make(map[string]token.Pos, outGroups[i].outputs.Len())
outGroups[i].outputs.Iterate(func(t types.Type, v interface{}) {
switch v := v.(type) {
case *wire.Provider:
out[types.TypeString(t, nil)] = v.Pos
case *wire.Value:
out[types.TypeString(t, nil)] = v.Pos
default:
panic("unreachable")
}
})
for _, t := range sortSet(out) {
fmt.Printf("\t%s%s%s\n", green, t, reset)
fmt.Printf("\t\tat %v\n", info.Fset.Position(out[t]))
}
}
}
if len(info.Injectors) > 0 {
injectors := append([]*wire.Injector(nil), info.Injectors...)
sort.Slice(injectors, func(i, j int) bool {
if injectors[i].ImportPath == injectors[j].ImportPath {
return injectors[i].FuncName < injectors[j].FuncName
}
return injectors[i].ImportPath < injectors[j].ImportPath
})
fmt.Printf("%sInjectors:%s\n", redBold, reset)
for _, in := range injectors {
fmt.Printf("\t%v\n", in)
}
}
}
if len(errs) > 0 {
logErrors(errs)
return errors.New("error loading packages")
}
return nil
}
// check runs the check subcommand.
//
// Given one or more packages, check will print any type-checking or
// Wire errors found with top-level variable provider sets or injector
// functions.
func check(pkgs ...string) error {
wd, err := os.Getwd()
if err != nil {
return err
}
_, errs := wire.Load(&build.Default, wd, pkgs)
if len(errs) > 0 {
logErrors(errs)
return errors.New("error loading packages")
}
return nil
}
type outGroup struct {
name string
inputs *typeutil.Map // values are not important
outputs *typeutil.Map // values are *wire.Provider or *wire.Value
}
// gather flattens a provider set into outputs grouped by the inputs
// required to create them. As it flattens the provider set, it records
// the visited named provider sets as imports.
func gather(info *wire.Info, key wire.ProviderSetID) (_ []outGroup, imports map[string]struct{}) {
set := info.Sets[key]
hash := typeutil.MakeHasher()
// Find imports.
next := []*wire.ProviderSet{info.Sets[key]}
visited := make(map[*wire.ProviderSet]struct{})
imports = make(map[string]struct{})
for len(next) > 0 {
curr := next[len(next)-1]
next = next[:len(next)-1]
if _, found := visited[curr]; found {
continue
}
visited[curr] = struct{}{}
if curr.Name != "" && !(curr.PkgPath == key.ImportPath && curr.Name == key.VarName) {
imports[formatProviderSetName(curr.PkgPath, curr.Name)] = struct{}{}
}
for _, imp := range curr.Imports {
next = append(next, imp)
}
}
// Depth-first search to build groups.
var groups []outGroup
inputVisited := new(typeutil.Map) // values are int, indices into groups or -1 for input.
inputVisited.SetHasher(hash)
var stk []types.Type
for _, k := range set.Outputs() {
// Start a DFS by picking a random unvisited node.
if inputVisited.At(k) == nil {
stk = append(stk, k)
}
// Run DFS
dfs:
for len(stk) > 0 {
curr := stk[len(stk)-1]
stk = stk[:len(stk)-1]
if inputVisited.At(curr) != nil {
continue
}
switch pv := set.For(curr); {
case pv.IsNil():
// This is an input.
inputVisited.Set(curr, -1)
case pv.IsProvider():
// Try to see if any args haven't been visited.
p := pv.Provider()
allPresent := true
for _, arg := range p.Args {
if inputVisited.At(arg.Type) == nil {
allPresent = false
}
}
if !allPresent {
stk = append(stk, curr)
for _, arg := range p.Args {
if inputVisited.At(arg.Type) == nil {
stk = append(stk, arg.Type)
}
}
continue dfs
}
// Build up set of input types, match to a group.
in := new(typeutil.Map)
in.SetHasher(hash)
for _, arg := range p.Args {
i := inputVisited.At(arg.Type).(int)
if i == -1 {
in.Set(arg.Type, true)
} else {
mergeTypeSets(in, groups[i].inputs)
}
}
for i := range groups {
if sameTypeKeys(groups[i].inputs, in) {
groups[i].outputs.Set(curr, p)
inputVisited.Set(curr, i)
continue dfs
}
}
out := new(typeutil.Map)
out.SetHasher(hash)
out.Set(curr, p)
inputVisited.Set(curr, len(groups))
groups = append(groups, outGroup{
inputs: in,
outputs: out,
})
case pv.IsValue():
v := pv.Value()
for i := range groups {
if groups[i].inputs.Len() == 0 {
groups[i].outputs.Set(curr, v)
inputVisited.Set(curr, i)
continue dfs
}
}
in := new(typeutil.Map)
in.SetHasher(hash)
out := new(typeutil.Map)
out.SetHasher(hash)
out.Set(curr, v)
inputVisited.Set(curr, len(groups))
groups = append(groups, outGroup{
inputs: in,
outputs: out,
})
default:
panic("unreachable")
}
}
}
// Name and sort groups.
for i := range groups {
if groups[i].inputs.Len() == 0 {
groups[i].name = "no inputs"
continue
}
instr := make([]string, 0, groups[i].inputs.Len())
groups[i].inputs.Iterate(func(k types.Type, _ interface{}) {
instr = append(instr, types.TypeString(k, nil))
})
sort.Strings(instr)
groups[i].name = strings.Join(instr, ", ")
}
sort.Slice(groups, func(i, j int) bool {
if groups[i].inputs.Len() == groups[j].inputs.Len() {
return groups[i].name < groups[j].name
}
return groups[i].inputs.Len() < groups[j].inputs.Len()
})
return groups, imports
}
func mergeTypeSets(dst, src *typeutil.Map) {
src.Iterate(func(k types.Type, _ interface{}) {
dst.Set(k, true)
})
}
func sameTypeKeys(a, b *typeutil.Map) bool {
if a.Len() != b.Len() {
return false
}
same := true
a.Iterate(func(k types.Type, _ interface{}) {
if b.At(k) == nil {
same = false
}
})
return same
}
func sortSet(set interface{}) []string {
rv := reflect.ValueOf(set)
a := make([]string, 0, rv.Len())
keys := rv.MapKeys()
for _, k := range keys {
a = append(a, k.String())
}
sort.Strings(a)
return a
}
func formatProviderSetName(importPath, varName string) string {
// Since varName is an identifier, it doesn't make sense to quote.
return strconv.Quote(importPath) + "." + varName
}
func logErrors(errs []error) {
for _, err := range errs {
fmt.Fprintln(os.Stderr, strings.Replace(err.Error(), "\n", "\n\t", -1))
}
}
| 1 | 10,527 | Exit successfully. We served the help the user wanted. | google-go-cloud | go |
@@ -15,13 +15,13 @@ import (
// RepoRoot is the root of the Please repository
var RepoRoot string
-// initialWorkingDir is the directory we began in. Early on we chdir() to the repo root but for
+// InitialWorkingDir is the directory we began in. Early on we chdir() to the repo root but for
// some things we need to remember this.
-var initialWorkingDir string
+var InitialWorkingDir string
-// initialPackage is the initial subdir of the working directory, ie. what package did we start in.
-// This is similar but not identical to initialWorkingDir.
-var initialPackage string
+// InitialPackagePath is the initial subdir of the working directory, ie. what package did we start in.
+// This is similar but not identical to InitialWorkingDir.
+var InitialPackagePath string
// usingBazelWorkspace is true if we detected a Bazel WORKSPACE file to find our repo root.
var usingBazelWorkspace bool | 1 | package core
import (
"bytes"
"crypto/sha1"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"github.com/thought-machine/please/src/fs"
)
// RepoRoot is the root of the Please repository
var RepoRoot string
// initialWorkingDir is the directory we began in. Early on we chdir() to the repo root but for
// some things we need to remember this.
var initialWorkingDir string
// initialPackage is the initial subdir of the working directory, ie. what package did we start in.
// This is similar but not identical to initialWorkingDir.
var initialPackage string
// usingBazelWorkspace is true if we detected a Bazel WORKSPACE file to find our repo root.
var usingBazelWorkspace bool
// DirPermissions are the default permission bits we apply to directories.
const DirPermissions = os.ModeDir | 0775
// FindRepoRoot returns the root directory of the current repo and sets the initial working dir.
// It returns true if the repo root was found.
func FindRepoRoot() bool {
initialWorkingDir, _ = os.Getwd()
RepoRoot, initialPackage = getRepoRoot(ConfigFileName)
return RepoRoot != ""
}
// MustFindRepoRoot returns the root directory of the current repo and sets the initial working dir.
// It dies on failure, although will fall back to looking for a Bazel WORKSPACE file first.
func MustFindRepoRoot() string {
if RepoRoot != "" {
return RepoRoot
} else if FindRepoRoot() {
return RepoRoot
}
RepoRoot, initialPackage = getRepoRoot("WORKSPACE")
if RepoRoot != "" {
log.Warning("No .plzconfig file found to define the repo root.")
log.Warning("Falling back to Bazel WORKSPACE at %s", path.Join(RepoRoot, "WORKSPACE"))
usingBazelWorkspace = true
return RepoRoot
}
// Check the config for a default repo location. Of course, we have to load system-level config
// in order to do that...
config, err := ReadConfigFiles(defaultGlobalConfigFiles(), nil)
if err != nil {
log.Fatalf("Error reading config file: %s", err)
}
if config.Please.DefaultRepo != "" {
log.Warning("Using default repo at %s", config.Please.DefaultRepo)
RepoRoot = fs.ExpandHomePath(config.Please.DefaultRepo)
return RepoRoot
}
log.Fatalf("Couldn't locate the repo root. Are you sure you're inside a plz repo?")
return ""
}
// InitialPackage returns a label corresponding to the initial package we started in.
func InitialPackage() []BuildLabel {
// It's possible to start off in directories that aren't legal package names, because
// our package naming is stricter than directory naming requirements.
// In that case move up until we find somewhere we can run from.
dir := initialPackage
for dir != "." {
if label, err := TryNewBuildLabel(dir, "test"); err == nil {
label.Name = "..."
return []BuildLabel{label}
}
dir = filepath.Dir(dir)
}
return WholeGraph
}
// getRepoRoot returns the root directory of the current repo and the initial package.
func getRepoRoot(filename string) (string, string) {
dir, err := os.Getwd()
if err != nil {
log.Fatalf("Couldn't determine working directory: %s", err)
}
// Walk up directories looking for a .plzconfig file, which we use to identify the root.
initial := dir
for dir != "" {
if PathExists(path.Join(dir, filename)) {
return dir, strings.TrimLeft(initial[len(dir):], "/")
}
dir, _ = path.Split(dir)
dir = strings.TrimRight(dir, "/")
}
return "", ""
}
// StartedAtRepoRoot returns true if the build was initiated from the repo root.
// Used to provide slightly nicer output in some places.
func StartedAtRepoRoot() bool {
return RepoRoot == initialWorkingDir
}
// ReturnToInitialWorkingDir changes directory back to where plz was first started from.
func ReturnToInitialWorkingDir() {
if err := os.Chdir(initialWorkingDir); err != nil {
log.Error("Failed to change directory to %s: %s", initialWorkingDir, err)
}
}
// A SourcePair represents a source file with its source and temporary locations.
// This isn't typically used much by callers; it's just useful to have a single type for channels.
type SourcePair struct{ Src, Tmp string }
// IterSources returns all the sources for a function, allowing for sources that are other rules
// and rules that require transitive dependencies.
// Yielded values are pairs of the original source location and its temporary location for this rule.
// If includeTools is true it yields the target's tools as well.
func IterSources(graph *BuildGraph, target *BuildTarget, includeTools bool) <-chan SourcePair {
ch := make(chan SourcePair)
done := map[string]bool{}
tmpDir := target.TmpDir()
go func() {
for input := range IterInputs(graph, target, includeTools, false) {
fullPaths := input.FullPaths(graph)
for i, sourcePath := range input.Paths(graph) {
if tmpPath := path.Join(tmpDir, sourcePath); !done[tmpPath] {
ch <- SourcePair{fullPaths[i], tmpPath}
done[tmpPath] = true
}
}
}
close(ch)
}()
return ch
}
// IterInputs iterates all the inputs for a target.
func IterInputs(graph *BuildGraph, target *BuildTarget, includeTools, sourcesOnly bool) <-chan BuildInput {
ch := make(chan BuildInput)
done := map[BuildLabel]bool{}
var inner func(dependency *BuildTarget)
inner = func(dependency *BuildTarget) {
if dependency != target {
ch <- dependency.Label
}
// All the sources of this target now count as done
for _, src := range dependency.AllSources() {
if label, ok := src.Label(); ok && dependency.IsSourceOnlyDep(label) {
done[label] = true
}
}
done[dependency.Label] = true
if target == dependency || (target.NeedsTransitiveDependencies && !dependency.OutputIsComplete) {
for _, dep := range dependency.BuildDependencies() {
for _, dep2 := range recursivelyProvideFor(graph, target, dependency, dep.Label) {
if !done[dep2] && !dependency.IsTool(dep2) {
inner(graph.TargetOrDie(dep2))
}
}
}
} else {
for _, dep := range dependency.ExportedDependencies() {
for _, dep2 := range recursivelyProvideFor(graph, target, dependency, dep) {
if !done[dep2] {
inner(graph.TargetOrDie(dep2))
}
}
}
}
}
go func() {
for _, source := range target.AllSources() {
recursivelyProvideSource(graph, target, source, ch)
}
if includeTools {
for _, tool := range target.AllTools() {
recursivelyProvideSource(graph, target, tool, ch)
}
}
if !sourcesOnly {
inner(target)
}
close(ch)
}()
return ch
}
// recursivelyProvideFor recursively applies ProvideFor to a target.
func recursivelyProvideFor(graph *BuildGraph, target, dependency *BuildTarget, dep BuildLabel) []BuildLabel {
depTarget := graph.TargetOrDie(dep)
ret := depTarget.ProvideFor(dependency)
if len(ret) == 1 && ret[0] == dep {
// Dependency doesn't have a require/provide directly on this guy, up to the top-level
// target. We have to check the dep first to keep things consistent with what targets
// have actually been built.
ret = depTarget.ProvideFor(target)
if len(ret) == 1 && ret[0] == dep {
return ret
}
}
ret2 := make([]BuildLabel, 0, len(ret))
for _, r := range ret {
if r == dep {
ret2 = append(ret2, r) // Providing itself, don't recurse
} else {
ret2 = append(ret2, recursivelyProvideFor(graph, target, dependency, r)...)
}
}
return ret2
}
// recursivelyProvideSource is similar to recursivelyProvideFor but operates on a BuildInput.
func recursivelyProvideSource(graph *BuildGraph, target *BuildTarget, src BuildInput, ch chan BuildInput) {
if label, ok := src.nonOutputLabel(); ok {
for _, p := range recursivelyProvideFor(graph, target, target, label) {
ch <- p
}
return
}
ch <- src
}
// IterRuntimeFiles yields all the runtime files for a rule (outputs, tools & data files), similar to above.
func IterRuntimeFiles(graph *BuildGraph, target *BuildTarget, absoluteOuts bool, runtimeDir string) <-chan SourcePair {
done := map[string]bool{}
ch := make(chan SourcePair)
pushOut := func(src, out string) {
if absoluteOuts {
out = path.Join(RepoRoot, runtimeDir, out)
}
if !done[out] {
ch <- SourcePair{src, out}
done[out] = true
}
}
go func() {
outDir := target.OutDir()
for _, out := range target.Outputs() {
pushOut(path.Join(outDir, out), out)
}
for _, data := range target.AllData() {
fullPaths := data.FullPaths(graph)
for i, dataPath := range data.Paths(graph) {
pushOut(fullPaths[i], dataPath)
}
}
if target.Test != nil {
for _, tool := range target.AllTestTools() {
fullPaths := tool.FullPaths(graph)
for i, dataPath := range tool.Paths(graph) {
pushOut(fullPaths[i], dataPath)
}
}
}
close(ch)
}()
return ch
}
// IterInputPaths yields all the transitive input files for a rule (sources & data files), similar to above (again).
func IterInputPaths(graph *BuildGraph, target *BuildTarget) <-chan string {
// Use a couple of maps to protect us from dep-graph loops and to stop parsing the same target
// multiple times. We also only want to push files to the channel that it has not already seen.
donePaths := map[string]bool{}
doneTargets := map[*BuildTarget]bool{}
ch := make(chan string)
var inner func(*BuildTarget)
inner = func(target *BuildTarget) {
if !doneTargets[target] {
// First yield all the sources of the target only ever pushing declared paths to
// the channel to prevent us outputting any intermediate files.
for _, source := range target.AllSources() {
// If the label is nil add any input paths contained here.
if label, ok := source.nonOutputLabel(); !ok {
for _, sourcePath := range source.FullPaths(graph) {
if !donePaths[sourcePath] {
ch <- sourcePath
donePaths[sourcePath] = true
}
}
// Otherwise we should recurse for this build label (and gather its sources)
} else {
inner(graph.TargetOrDie(label))
}
}
// Now yield all the data deps of this rule.
for _, data := range target.AllData() {
// If the label is nil add any input paths contained here.
if label, ok := data.Label(); !ok {
for _, sourcePath := range data.FullPaths(graph) {
if !donePaths[sourcePath] {
ch <- sourcePath
donePaths[sourcePath] = true
}
}
// Otherwise we should recurse for this build label (and gather its sources)
} else {
inner(graph.TargetOrDie(label))
}
}
// Finally recurse for all the deps of this rule.
for _, dep := range target.Dependencies() {
inner(dep)
}
doneTargets[target] = true
}
}
go func() {
inner(target)
close(ch)
}()
return ch
}
// PrepareSource symlinks a single source file for a build rule.
func PrepareSource(sourcePath string, tmpPath string) error {
dir := path.Dir(tmpPath)
if !PathExists(dir) {
if err := os.MkdirAll(dir, DirPermissions); err != nil {
return err
}
}
if !PathExists(sourcePath) {
return fmt.Errorf("Source file %s doesn't exist", sourcePath)
}
return fs.RecursiveLink(sourcePath, tmpPath, 0)
}
// PrepareSourcePair prepares a source file for a build.
func PrepareSourcePair(pair SourcePair) error {
if path.IsAbs(pair.Src) {
return PrepareSource(pair.Src, pair.Tmp)
}
return PrepareSource(path.Join(RepoRoot, pair.Src), pair.Tmp)
}
// CollapseHash combines our usual four-part hash into one by XOR'ing them together.
// This helps keep things short in places where sometimes we get complaints about filenames being
// too long (this is most noticeable on e.g. Ubuntu with an encrypted home directory, but
// not an entire encrypted disk) and where we don't especially care about breaking out the
// individual parts of hashes, which is important for many parts of the system.
func CollapseHash(key []byte) []byte {
short := [sha1.Size]byte{}
// We store the rule hash twice, if it's repeated we must make sure not to xor it
// against itself.
if bytes.Equal(key[0:sha1.Size], key[sha1.Size:2*sha1.Size]) {
for i := 0; i < sha1.Size; i++ {
short[i] = key[i] ^ key[i+2*sha1.Size] ^ key[i+3*sha1.Size]
}
} else {
for i := 0; i < sha1.Size; i++ {
short[i] = key[i] ^ key[i+sha1.Size] ^ key[i+2*sha1.Size] ^ key[i+3*sha1.Size]
}
}
return short[:]
}
// LookPath does roughly the same as exec.LookPath, i.e. looks for the named file on the path.
// The main difference is that it looks based on our config which isn't necessarily the same
// as the external environment variable.
func LookPath(filename string, paths []string) (string, error) {
for _, p := range paths {
for _, p2 := range strings.Split(p, ":") {
p3 := path.Join(p2, filename)
if _, err := os.Stat(p3); err == nil {
return p3, nil
}
}
}
return "", fmt.Errorf("%s not found in path %s", filename, strings.Join(paths, ":"))
}
// LookBuildPath is like LookPath but takes the config's build path into account.
func LookBuildPath(filename string, config *Configuration) (string, error) {
return LookPath(filename, config.Path())
}
// PathExists is an alias to fs.PathExists.
// TODO(peterebden): Remove and migrate everything over.
func PathExists(filename string) bool {
return fs.PathExists(filename)
}
| 1 | 10,123 | Had to rename this because `InitialPackage()` already existed. | thought-machine-please | go |
@@ -19,4 +19,15 @@ module IntegrationSpecHelper
}
)
end
+
+ def with_18f_procurement_env_variables(setup_vars=nil)
+ old_approver_email = ENV['GSA18F_APPROVER_EMAIL']
+ old_purchaser_email = ENV['GSA18F_PURCHASER_EMAIL']
+
+ ENV['GSA18F_APPROVER_EMAIL'] = '[email protected]'
+ ENV['GSA18F_PURCHASER_EMAIL'] = '[email protected]'
+ yield
+ ENV['GSA18F_APPROVER_EMAIL'] = old_approver_email
+ ENV['GSA18F_PURCHASER_EMAIL'] = old_purchaser_email
+ end
end | 1 | module IntegrationSpecHelper
def setup_mock_auth(service_name=:myusa, user=FactoryGirl.create(:user))
OmniAuth.config.mock_auth[service_name] = OmniAuth::AuthHash.new(
provider: service_name.to_s,
raw_info: {
'name' => "George Jetson"
},
uid: '12345',
nickname: 'georgejetsonmyusa',
extra: {
'raw_info' => {
'email' => user.email_address,
'first_name' => user.first_name,
'last_name' => user.last_name
}
},
credentials: {
'token' => '1a2b3c4d'
}
)
end
end
| 1 | 13,225 | Can we put this in a different helper? | 18F-C2 | rb |
@@ -603,6 +603,9 @@ insert_push_all_registers(dcontext_t *dcontext, clean_call_info_t *cci,
DR_REG_LIST_LENGTH_ARM, DR_REG_LIST_ARM));
}
dstack_offs += 15 * XSP_SZ;
+
+ /* Make dstack_offs 8-byte algined, as we only accounted for 31 4-byte slots */
+ dstack_offs += XSP_SZ;
ASSERT(cci->skip_save_flags ||
cci->num_simd_skip != 0 ||
cci->num_regs_skip != 0 || | 1 | /* **********************************************************
* Copyright (c) 2014-2017 Google, Inc. All rights reserved.
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "../globals.h"
#include "arch.h"
#include "instr_create.h"
#include "instrument.h" /* instrlist_meta_preinsert */
#include "../clean_call_opt.h"
#include "disassemble.h"
/* Make code more readable by shortening long lines.
* We mark everything we add as non-app instr.
*/
#define POST instrlist_meta_postinsert
#define PRE instrlist_meta_preinsert
/* For ARM and AArch64, we always use TLS and never use hardcoded
* dcontext (xref USE_SHARED_GENCODE_ALWAYS() and -private_ib_in_tls).
* Thus we use instr_create_{save_to,restore_from}_tls() directly.
*/
#ifdef AARCH64
/* Defined in aarch64.asm. */
void icache_op_ic_ivau_asm(void);
void icache_op_isb_asm(void);
typedef struct ALIGN_VAR(16) _icache_op_struct_t {
/* This flag is set if any icache lines have been invalidated. */
unsigned int flag;
/* The lower half of the address of "lock" must be non-zero as we want to
* acquire the lock using only two free registers and STXR Ws, Wt, [Xn]
* requires s != t and s != n, so we use t == n. With this ordering of the
* members alignment guarantees that bit 2 of the address of "lock" is set.
*/
unsigned int lock;
/* The icache line size. This is discovered using the system register
* ctr_el0 and will be (1 << (2 + n)) with 0 <= n < 16.
*/
size_t linesize;
/* If these are equal then no icache lines have been invalidated. Otherwise
* they are both aligned to the icache line size and describe a set of
* consecutive icache lines (which could wrap around the top of memory).
*/
void *begin, *end;
/* Some space to spill registers. */
ptr_uint_t spill[2];
} icache_op_struct_t;
/* Used in aarch64.asm. */
icache_op_struct_t icache_op_struct;
#endif
void
mangle_arch_init(void)
{
#ifdef AARCH64
/* Check address of "lock" is unaligned. See comment in icache_op_struct_t. */
ASSERT(!ALIGNED(&icache_op_struct.lock, 16));
#endif
}
byte *
remangle_short_rewrite(dcontext_t *dcontext,
instr_t *instr, byte *pc, app_pc target)
{
#ifdef AARCH64
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
#else
uint mangled_sz = CTI_SHORT_REWRITE_LENGTH;
uint raw_jmp;
ASSERT(instr_is_cti_short_rewrite(instr, pc));
if (target == NULL)
target = decode_raw_jmp_target(dcontext, pc + CTI_SHORT_REWRITE_B_OFFS);
instr_set_target(instr, opnd_create_pc(target));
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
instr_set_raw_bytes(instr, pc, mangled_sz);
encode_raw_jmp(dr_get_isa_mode(dcontext), target, (byte *)&raw_jmp,
pc + CTI_SHORT_REWRITE_B_OFFS);
instr_set_raw_word(instr, CTI_SHORT_REWRITE_B_OFFS, raw_jmp);
instr_set_operands_valid(instr, true);
return (pc+mangled_sz);
#endif
}
instr_t *
convert_to_near_rel_arch(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
#ifdef AARCH64
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
#else
int opcode = instr_get_opcode(instr);
if (opcode == OP_b_short) {
instr_set_opcode(instr, OP_b);
return instr;
} else if (opcode == OP_cbz || opcode == OP_cbnz) {
/* While for non-trace-mode we could get by w/o converting,
* as we use local stubs with a far-away link-through-stub
* soln needed even for regular branches and thus these would
* reach the stub, they won't reach for traces.
* Thus we mirror what x86 does for jecxz:
* cbz foo
* =>
* cbnz fall
* jmp foo
* fall:
*
* The fact that we invert the cbr ends up requiring extra logic
* in linkstub_cbr_disambiguate().
*/
app_pc target = NULL;
uint mangled_sz, offs, raw_jmp;
reg_id_t src_reg;
if (ilist != NULL) {
/* PR 266292: for meta instrs, insert separate instrs */
opnd_t tgt = instr_get_target(instr);
instr_t *fall = INSTR_CREATE_label(dcontext);
instr_t *jmp = INSTR_CREATE_b(dcontext, tgt);
ASSERT(instr_is_meta(instr));
/* reverse order */
instrlist_meta_postinsert(ilist, instr, fall);
instrlist_meta_postinsert(ilist, instr, jmp);
instrlist_meta_postinsert(ilist, instr, instr);
instr_set_target(instr, opnd_create_instr(fall));
instr_invert_cbr(instr);
return jmp; /* API specifies we return the long-reach cti */
}
if (opnd_is_near_pc(instr_get_target(instr)))
target = opnd_get_pc(instr_get_target(instr));
else if (opnd_is_near_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
/* XXX: not using get_app_instr_xl8() b/c drdecodelib doesn't link
* mangle_shared.c.
*/
target = instr_get_translation(tgt);
if (target == NULL && instr_raw_bits_valid(tgt))
target = instr_get_raw_bits(tgt);
ASSERT(target != NULL);
} else
ASSERT_NOT_REACHED();
/* PR 251646: cti_short_rewrite: target is in src0, so operands are
* valid, but raw bits must also be valid, since they hide the multiple
* instrs. For x64, it is marked for re-relativization, but it's
* special since the target must be obtained from src0 and not
* from the raw bits (since that might not reach).
*/
/* query IR before we set raw bits */
ASSERT(opnd_is_reg(instr_get_src(instr, 1)));
src_reg = opnd_get_reg(instr_get_src(instr, 1));
/* need 6 bytes */
mangled_sz = CTI_SHORT_REWRITE_LENGTH;
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
offs = 0;
/* first 2 bytes: cbz or cbnz to "cur pc" + 2 which means immed is 1 */
instr_set_raw_byte(instr, offs, 0x08 | (src_reg - DR_REG_R0));
offs++;
instr_set_raw_byte(instr, offs, (opcode == OP_cbz) ? CBNZ_BYTE_A : CBZ_BYTE_A);
offs++;
/* next 4 bytes: b to target */
ASSERT(offs == CTI_SHORT_REWRITE_B_OFFS);
encode_raw_jmp(dr_get_isa_mode(dcontext),
instr->bytes + offs /*not target, b/c may not reach*/,
(byte *)&raw_jmp, instr->bytes + offs);
instr_set_raw_word(instr, offs, raw_jmp);
offs += sizeof(int);
ASSERT(offs == mangled_sz);
LOG(THREAD, LOG_INTERP, 2, "convert_to_near_rel: cbz/cbnz opcode\n");
/* original target operand is still valid */
instr_set_operands_valid(instr, true);
return instr;
}
ASSERT_NOT_REACHED();
return instr;
#endif
}
/***************************************************************************/
#if !defined(STANDALONE_DECODER)
void
insert_clear_eflags(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr)
{
/* On ARM/AArch64 no known calling convention requires any of the
* flags to be zero on entry to a function, so there is nothing to do.
*/
}
# ifdef AARCH64
/* Maximum positive immediate offset for STP/LDP with 64 bit registers. */
# define MAX_STP_OFFSET 504
/* Creates a memory reference for registers saved/restored to memory. */
static opnd_t
create_base_disp_for_save_restore(uint base_reg, bool is_single_reg, bool is_gpr,
uint num_saved, callee_info_t *ci)
{
/* opzs depends on the kind of register and whether a single register or
* a pair of registers is saved/restored using stp/ldp.
*/
uint opsz;
if (is_gpr) {
if (is_single_reg)
opsz = OPSZ_8;
else
opsz = OPSZ_16;
} else {
if (is_single_reg)
opsz = OPSZ_16;
else
opsz = OPSZ_32;
}
uint offset = num_saved * (is_gpr ? sizeof(reg_t) : sizeof(dr_simd_t));
return opnd_create_base_disp(base_reg, DR_REG_NULL, 0, offset, opsz);
}
static instr_t*
create_load_or_store_instr(dcontext_t *dcontext, reg_id_t reg, opnd_t mem, bool save)
{
if (save) {
return INSTR_CREATE_str(dcontext, mem, opnd_create_reg(reg));
}
return INSTR_CREATE_ldr(dcontext, opnd_create_reg(reg), mem);
}
/* Creates code to save or restore GPR or SIMD registers to memory starting at
* base_reg. Uses stp/ldp to save/restore as many register pairs to memory as possible
* and uses a single str/ldp for the last register in case the number of registers
* is odd. Optionally takes reg_skip into account.
*/
static void
insert_save_or_restore_registers(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool *reg_skip, reg_id_t base_reg, reg_id_t first_reg,
bool save, bool is_gpr,
opnd_t (*get_mem_opnd)(uint base_reg,
bool is_single_reg, bool is_gpr,
uint num_saved, callee_info_t *ci),
callee_info_t *ci)
{
uint i, reg1 = UINT_MAX, num_regs = is_gpr ? 30 : 32;
uint saved_regs = 0;
instr_t *new_instr;
/* Use stp/ldp to save/restore as many register pairs to memory, skipping
* registers according to reg_skip.
*/
for (i = 0; i < num_regs; i += 1) {
if (reg_skip != NULL && reg_skip[i])
continue;
if (reg1 == UINT_MAX)
reg1 = i;
else {
opnd_t mem1 = get_mem_opnd(base_reg, false /* is_single_reg */, is_gpr,
/* When creating save/restore instructions
* for inlining, we need the register id
* to compute the address.
*/
ci != NULL ? first_reg + (reg_id_t)reg1
: saved_regs, ci);
uint disp = opnd_get_disp(mem1);
/* We cannot use STP/LDP if the immediate offset is too big. */
if (disp > MAX_STP_OFFSET) {
PRE(ilist, instr, create_load_or_store_instr(dcontext, first_reg + reg1,
mem1, save));
opnd_t mem2 = get_mem_opnd(base_reg, false /* is_single_reg */, is_gpr,
/* When creating save/restore instructions
* for inlining, we need the register id
* to compute the address.
*/
ci != NULL ? first_reg + (reg_id_t)i
: saved_regs, ci);
PRE(ilist, instr, create_load_or_store_instr(dcontext, first_reg + i,
mem2, save));
} else {
if (save) {
new_instr = INSTR_CREATE_stp(dcontext, mem1,
opnd_create_reg(first_reg + reg1),
opnd_create_reg(first_reg + i));
} else {
new_instr = INSTR_CREATE_ldp(dcontext,
opnd_create_reg(first_reg + reg1),
opnd_create_reg(first_reg + i), mem1);
}
PRE(ilist, instr, new_instr);
}
reg1 = UINT_MAX;
saved_regs += 2;
}
}
/* Use str/ldr to save/restore last single register to memory if the number
* of registers to save/restore is odd.
*/
if (reg1 != UINT_MAX) {
opnd_t mem = get_mem_opnd(base_reg, true /* is_single_reg */, is_gpr,
ci != NULL ? first_reg + (reg_id_t)reg1 : saved_regs,
ci);
PRE(ilist, instr, create_load_or_store_instr(dcontext, first_reg + reg1, mem,
save));
}
}
static void
insert_save_registers(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool *reg_skip, reg_id_t base_reg, reg_id_t first_reg,
bool is_gpr)
{
insert_save_or_restore_registers(dcontext, ilist, instr, reg_skip, base_reg,
first_reg, true /* save */, is_gpr,
create_base_disp_for_save_restore, NULL);
}
static void
insert_restore_registers(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool *reg_skip, reg_id_t base_reg, reg_id_t first_reg,
bool is_gpr)
{
insert_save_or_restore_registers(dcontext, ilist, instr, reg_skip, base_reg,
first_reg, false /* restore */, is_gpr,
create_base_disp_for_save_restore, NULL);
}
static opnd_t
inline_get_mem_opnd(uint base_reg, bool is_single_reg, bool is_gpr, uint reg_id,
callee_info_t *ci)
{
return callee_info_slot_opnd(ci, SLOT_REG, reg_id);
}
void
insert_save_inline_registers(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool *reg_skip,reg_id_t first_reg, bool is_gpr, void *ci)
{
insert_save_or_restore_registers(dcontext, ilist, instr, reg_skip, 0,
first_reg, true /* save */, is_gpr,
inline_get_mem_opnd, (callee_info_t *)ci);
}
void
insert_restore_inline_registers(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool *reg_skip, reg_id_t first_reg, bool is_gpr, void *ci)
{
insert_save_or_restore_registers(dcontext, ilist, instr, reg_skip, 0,
first_reg, false /* restore */, is_gpr,
inline_get_mem_opnd, (callee_info_t *)ci);
}
# endif
/* Pushes not only the GPRs but also simd regs, xip, and xflags, in
* priv_mcontext_t order.
* The current stack pointer alignment should be passed. Use 1 if
* unknown (NOT 0).
* Returns the amount of data pushed. Does NOT fix up the xsp value pushed
* to be the value prior to any pushes for x64 as no caller needs that
* currently (they all build a priv_mcontext_t and have to do further xsp
* fixups anyway).
* Does NOT push the app's value of the stolen register.
* If scratch is REG_NULL, spills a register for scratch space.
*/
uint
insert_push_all_registers(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr,
uint alignment, opnd_t push_pc, reg_id_t scratch/*optional*/
_IF_AARCH64(bool out_of_line))
{
uint dstack_offs = 0;
# ifdef AARCH64
uint max_offs;
# endif
if (cci == NULL)
cci = &default_clean_call_info;
if (cci->preserve_mcontext || cci->num_simd_skip != NUM_SIMD_REGS) {
/* FIXME i#1551: once we add skipping of regs, need to keep shape here */
}
/* FIXME i#1551: once we have cci->num_simd_skip, skip this if possible */
# ifdef AARCH64
/* X0 is used to hold the stack pointer. */
cci->reg_skip[DR_REG_X0 - DR_REG_START_GPR] = false;
/* X1 and X2 are used to save and restore the status and control registers. */
cci->reg_skip[DR_REG_X1 - DR_REG_START_GPR] = false;
cci->reg_skip[DR_REG_X2 - DR_REG_START_GPR] = false;
/* X11 is used to calculate the target address of the clean call. */
cci->reg_skip[DR_REG_X11 - DR_REG_START_GPR] = false;
max_offs = get_clean_call_switch_stack_size();
/* For out-of-line clean calls, the stack pointer is adjusted before jumping
* to this code.
*/
if (!out_of_line) {
/* sub sp, sp, #clean_call_switch_stack_size */
PRE(ilist, instr, XINST_CREATE_sub(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT16(max_offs)));
}
/* Push GPRs. */
insert_save_registers(dcontext, ilist, instr, cci->reg_skip, DR_REG_SP, DR_REG_X0,
true /* is_gpr */);
dstack_offs += 32 * XSP_SZ;
/* mov x0, sp */
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_X0),
opnd_create_reg(DR_REG_SP)));
/* For out-of-line clean calls, X30 is saved before jumping to this code,
* because it is used for the return address.
*/
if (!out_of_line) {
/* stp x30, x0, [sp, #x30_offset] */
PRE(ilist, instr,
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(DR_REG_SP, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X30), OPSZ_16),
opnd_create_reg(DR_REG_X30),
opnd_create_reg(DR_REG_X0)));
}
/* add x0, x0, #dstack_offs */
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_X0),
OPND_CREATE_INT16(dstack_offs)));
/* save the push_pc operand to the priv_mcontext_t.pc field */
if (!(cci->skip_save_flags)) {
if (opnd_is_immed_int(push_pc)) {
PRE(ilist, instr,
XINST_CREATE_load_int(dcontext, opnd_create_reg(DR_REG_X1), push_pc));
} else {
ASSERT(opnd_is_reg(push_pc));
reg_id_t push_pc_reg = opnd_get_reg(push_pc);
/* push_pc opnd is already pushed on the stack */
/* ldr x1, [sp, #push_pc_offset] */
PRE(ilist, instr,
INSTR_CREATE_ldr(dcontext, opnd_create_reg(DR_REG_X1),
OPND_CREATE_MEM64(DR_REG_SP,
REG_OFFSET(push_pc_reg))));
}
/* str x1, [sp, #dstack_offset] */
PRE(ilist, instr,
INSTR_CREATE_str(dcontext,
OPND_CREATE_MEM64(DR_REG_SP, dstack_offs),
opnd_create_reg(DR_REG_X1)));
}
dstack_offs += XSP_SZ;
/* Save flag values using x1, x2. */
/* mrs x1, nzcv */
PRE(ilist, instr, INSTR_CREATE_mrs(dcontext, opnd_create_reg(DR_REG_X1),
opnd_create_reg(DR_REG_NZCV)));
/* mrs x2, fpcr */
PRE(ilist, instr, INSTR_CREATE_mrs(dcontext, opnd_create_reg(DR_REG_X2),
opnd_create_reg(DR_REG_FPCR)));
/* stp w1, w2, [x0, #8] */
PRE(ilist, instr,
INSTR_CREATE_stp(dcontext, OPND_CREATE_MEM64(DR_REG_X0, 8),
opnd_create_reg(DR_REG_W1), opnd_create_reg(DR_REG_W2)));
/* mrs x1, fpsr */
PRE(ilist, instr, INSTR_CREATE_mrs(dcontext, opnd_create_reg(DR_REG_X1),
opnd_create_reg(DR_REG_FPSR)));
/* str w1, [x0, #16] */
PRE(ilist, instr,
INSTR_CREATE_str(dcontext,
OPND_CREATE_MEM32(DR_REG_X0, 16),
opnd_create_reg(DR_REG_W1)));
/* The three flag registers take 12 bytes. */
dstack_offs += 12;
/* The SIMD register data is 16-byte-aligned. */
dstack_offs = ALIGN_FORWARD(dstack_offs, 16);
/* add x0, x0, #(dstack_offs - prev_dstack_offs) */
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_X0),
OPND_CREATE_INT16(dstack_offs - 32 * XSP_SZ)));
/* Push SIMD registers. */
insert_save_registers(dcontext, ilist, instr, cci->simd_skip, DR_REG_X0, DR_REG_Q0,
false /* is_gpr */);
dstack_offs += (NUM_SIMD_SLOTS * sizeof(dr_simd_t));
/* Restore the registers we used. */
/* ldp x0, x1, [sp] */
PRE(ilist, instr,
INSTR_CREATE_ldp(dcontext,
opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X1),
opnd_create_base_disp(DR_REG_SP, DR_REG_NULL, 0, 0, OPSZ_16)));
/* ldr x2, [sp, #x2_offset] */
PRE(ilist, instr,
INSTR_CREATE_ldr(dcontext,
opnd_create_reg(DR_REG_X2),
opnd_create_base_disp(DR_REG_SP, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X2), OPSZ_8)));
# else
/* vstmdb always does writeback */
PRE(ilist, instr, INSTR_CREATE_vstmdb(dcontext, OPND_CREATE_MEMLIST(DR_REG_SP),
SIMD_REG_LIST_LEN, SIMD_REG_LIST_16_31));
PRE(ilist, instr, INSTR_CREATE_vstmdb(dcontext, OPND_CREATE_MEMLIST(DR_REG_SP),
SIMD_REG_LIST_LEN, SIMD_REG_LIST_0_15));
dstack_offs += NUM_SIMD_SLOTS*sizeof(dr_simd_t);
/* pc and aflags */
if (cci->skip_save_flags) {
/* even if we skip flag saves we want to keep mcontext shape */
int offs_beyond_xmm = 2 * XSP_SZ;
dstack_offs += offs_beyond_xmm;
PRE(ilist, instr, XINST_CREATE_sub(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT(offs_beyond_xmm)));
} else {
uint slot = TLS_REG0_SLOT;
bool spill = scratch == REG_NULL;
if (spill) {
scratch = DR_REG_R0;
if (opnd_is_reg(push_pc) && opnd_get_reg(push_pc) == scratch) {
scratch = DR_REG_R1;
slot = TLS_REG1_SLOT;
}
}
/* XXX: actually, r0 was just used as scratch for swapping stack
* via dcontext, so an optimization opportunity exists to avoid
* that restore and the re-spill here.
*/
if (spill)
PRE(ilist, instr, instr_create_save_to_tls(dcontext, scratch, slot));
PRE(ilist, instr, INSTR_CREATE_mrs(dcontext, opnd_create_reg(scratch),
opnd_create_reg(DR_REG_CPSR)));
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(scratch)));
dstack_offs += XSP_SZ;
if (opnd_is_immed_int(push_pc)) {
PRE(ilist, instr, XINST_CREATE_load_int(dcontext, opnd_create_reg(scratch),
push_pc));
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(scratch)));
} else {
ASSERT(opnd_is_reg(push_pc));
PRE(ilist, instr, INSTR_CREATE_push(dcontext, push_pc));
}
if (spill)
PRE(ilist, instr, instr_create_restore_from_tls(dcontext, scratch, slot));
dstack_offs += XSP_SZ;
}
/* We rely on dr_get_mcontext_priv() to fill in the app's stolen reg value
* and sp value.
*/
if (dr_get_isa_mode(dcontext) == DR_ISA_ARM_THUMB) {
/* We can't use sp with stm */
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(DR_REG_LR)));
/* We can't push sp w/ writeback, and in fact dr_get_mcontext() gets
* sp from the stack swap so we can leave this empty.
*/
PRE(ilist, instr, XINST_CREATE_sub(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT(XSP_SZ)));
PRE(ilist, instr, INSTR_CREATE_stmdb_wb(dcontext, OPND_CREATE_MEMLIST(DR_REG_SP),
DR_REG_LIST_LENGTH_T32, DR_REG_LIST_T32));
} else {
PRE(ilist, instr,
INSTR_CREATE_stmdb_wb(dcontext, OPND_CREATE_MEMLIST(DR_REG_SP),
DR_REG_LIST_LENGTH_ARM, DR_REG_LIST_ARM));
}
dstack_offs += 15 * XSP_SZ;
ASSERT(cci->skip_save_flags ||
cci->num_simd_skip != 0 ||
cci->num_regs_skip != 0 ||
dstack_offs == (uint)get_clean_call_switch_stack_size());
# endif
return dstack_offs;
}
/* User should pass the alignment from insert_push_all_registers: i.e., the
* alignment at the end of all the popping, not the alignment prior to
* the popping.
*/
void
insert_pop_all_registers(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr,
uint alignment _IF_AARCH64(bool out_of_line))
{
if (cci == NULL)
cci = &default_clean_call_info;
# ifdef AARCH64
uint current_offs;
/* mov x0, sp */
PRE(ilist, instr, XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_X0),
opnd_create_reg(DR_REG_SP)));
current_offs = get_clean_call_switch_stack_size() -
NUM_SIMD_SLOTS * sizeof(dr_simd_t);
/* add x0, x0, current_offs */
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_X0),
OPND_CREATE_INT32(current_offs)));
/* Pop SIMD registers. */
insert_restore_registers(dcontext, ilist, instr, cci->simd_skip, DR_REG_X0, DR_REG_Q0,
false /* is_gpr */);
/* mov x0, sp */
PRE(ilist, instr, XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_X0),
opnd_create_reg(DR_REG_SP)));
/* point x0 to push_pc field */
current_offs = (32 * XSP_SZ);
/* add x0, x0, #gpr_size */
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_X0),
OPND_CREATE_INT32(current_offs)));
/* load pc and flags */
if (!(cci->skip_save_flags)) {
/* ldp w1, w2, [x0, #8] */
PRE(ilist, instr,
INSTR_CREATE_ldp(dcontext,
opnd_create_reg(DR_REG_W1), opnd_create_reg(DR_REG_W2),
OPND_CREATE_MEM64(DR_REG_X0, 8)));
/* msr nzcv, w1 */
PRE(ilist, instr,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_NZCV),
opnd_create_reg(DR_REG_X1)));
/* msr fpcr, w2 */
PRE(ilist, instr,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_FPCR),
opnd_create_reg(DR_REG_X2)));
/* ldr w1, [x0, #16] */
PRE(ilist, instr,
INSTR_CREATE_ldr(dcontext, opnd_create_reg(DR_REG_W1),
OPND_CREATE_MEM32(DR_REG_X0,16)));
/* msr fpsr, w1 */
PRE(ilist, instr,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_FPSR),
opnd_create_reg(DR_REG_X1)));
}
/* Pop GPRs */
insert_restore_registers(dcontext, ilist, instr, cci->reg_skip, DR_REG_SP, DR_REG_X0,
true /* is_gpr */);
/* For out-of-line clean calls, X30 is restored after jumping back from this
* code, because it is used for the return address.
*/
if (!out_of_line) {
/* Recover x30 */
/* ldr w3, [x0, #16] */
PRE(ilist, instr,
INSTR_CREATE_ldr(dcontext, opnd_create_reg(DR_REG_X30),
OPND_CREATE_MEM64(DR_REG_SP, REG_OFFSET(DR_REG_X30))));
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT16(get_clean_call_switch_stack_size())));
}
# else
/* We rely on dr_set_mcontext_priv() to set the app's stolen reg value,
* and the stack swap to set the sp value: we assume the stolen reg on
* the stack still has our TLS base in it.
*/
/* We can't use sp with ldm for Thumb, and we don't want to write sp for ARM. */
PRE(ilist, instr, INSTR_CREATE_ldm_wb(dcontext, OPND_CREATE_MEMLIST(DR_REG_SP),
DR_REG_LIST_LENGTH_T32, DR_REG_LIST_T32));
/* We don't want the sp value */
PRE(ilist, instr, XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT(XSP_SZ)));
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(DR_REG_LR)));
/* pc and aflags */
if (cci->skip_save_flags) {
/* even if we skip flag saves we still keep mcontext shape */
int offs_beyond_xmm = 2 * XSP_SZ;
PRE(ilist, instr, XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT(offs_beyond_xmm)));
} else {
reg_id_t scratch = DR_REG_R0;
uint slot = TLS_REG0_SLOT;
/* just throw pc slot away */
PRE(ilist, instr, XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT(XSP_SZ)));
PRE(ilist, instr, instr_create_save_to_tls(dcontext, scratch, slot));
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(scratch)));
PRE(ilist, instr, INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_CPSR),
OPND_CREATE_INT_MSR_NZCVQG(),
opnd_create_reg(scratch)));
PRE(ilist, instr, instr_create_restore_from_tls(dcontext, scratch, slot));
}
/* FIXME i#1551: once we have cci->num_simd_skip, skip this if possible */
PRE(ilist, instr, INSTR_CREATE_vldm_wb(dcontext, OPND_CREATE_MEMLIST(DR_REG_SP),
SIMD_REG_LIST_LEN, SIMD_REG_LIST_0_15));
PRE(ilist, instr, INSTR_CREATE_vldm_wb(dcontext, OPND_CREATE_MEMLIST(DR_REG_SP),
SIMD_REG_LIST_LEN, SIMD_REG_LIST_16_31));
# endif
}
# ifndef AARCH64
reg_id_t
shrink_reg_for_param(reg_id_t regular, opnd_t arg)
{
return regular;
}
# endif /* !AARCH64 */
/* Return true if opnd is a register, but not XSP, or immediate zero on AArch64. */
static bool
opnd_is_reglike(opnd_t opnd)
{
return ((opnd_is_reg(opnd) && opnd_get_reg(opnd) != DR_REG_XSP)
IF_X64(|| (opnd_is_immed_int(opnd) && opnd_get_immed_int(opnd) == 0)));
}
uint
insert_parameter_preparation(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool clean_call, uint num_args, opnd_t *args)
{
uint num_regs = num_args < NUM_REGPARM ? num_args : NUM_REGPARM;
signed char regs[NUM_REGPARM];
int usecount[NUM_REGPARM];
ptr_int_t stack_inc = 0;
uint i, j;
/* We expect every arg to be an immediate integer, a full-size register,
* or a simple memory reference (NYI).
*/
for (i = 0; i < num_args; i++) {
CLIENT_ASSERT(opnd_is_immed_int((args[i])) ||
(opnd_is_reg(args[i]) &&
reg_get_size(opnd_get_reg(args[i])) == OPSZ_PTR) ||
opnd_is_base_disp(args[i]),
"insert_parameter_preparation: bad argument type");
ASSERT_NOT_IMPLEMENTED(!opnd_is_base_disp(args[i])); /* FIXME i#2210 */
}
/* The strategy here is to first set up the arguments that can be set up
* without using a temporary register: stack arguments that are registers and
* register arguments that are not involved in a cycle. When this has been done,
* the value in the link register (LR) will be dead, so we can use LR as a
* temporary for setting up the remaining arguments.
*/
/* Set up stack arguments that are registers (not SP) or zero (on AArch64). */
if (num_args > NUM_REGPARM) {
uint n = num_args - NUM_REGPARM;
/* On both ARM and AArch64 the stack pointer is kept (2 * XSP_SZ)-aligned. */
stack_inc = ALIGN_FORWARD(n, 2) * XSP_SZ;
# ifdef AARCH64
for (i = 0; i < n; i += 2) {
opnd_t *arg0 = &args[NUM_REGPARM + i];
opnd_t *arg1 = i + 1 < n ? &args[NUM_REGPARM + i + 1] : NULL;
if (i == 0) {
if (i + 1 < n && opnd_is_reglike(*arg1)) {
/* stp x(...), x(...), [sp, #-(stack_inc)]! */
PRE(ilist, instr, instr_create_2dst_4src
(dcontext, OP_stp,
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
-stack_inc, OPSZ_16),
opnd_create_reg(DR_REG_XSP),
opnd_is_reg(*arg0) ? *arg0 : opnd_create_reg(DR_REG_XZR),
opnd_is_reg(*arg1) ? *arg1 : opnd_create_reg(DR_REG_XZR),
opnd_create_reg(DR_REG_XSP),
opnd_create_immed_int(-stack_inc, OPSZ_PTR)));
} else if (opnd_is_reglike(*arg0)) {
/* str x(...), [sp, #-(stack_inc)]! */
PRE(ilist, instr, instr_create_2dst_3src
(dcontext, OP_str,
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
-stack_inc, OPSZ_PTR),
opnd_create_reg(DR_REG_XSP),
opnd_is_reg(*arg0) ? *arg0 : opnd_create_reg(DR_REG_XZR),
opnd_create_reg(DR_REG_XSP),
opnd_create_immed_int(-stack_inc, OPSZ_PTR)));
} else {
/* sub sp, sp, #(stack_inc) */
PRE(ilist, instr, INSTR_CREATE_sub
(dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_reg(DR_REG_XSP), OPND_CREATE_INT32(stack_inc)));
}
} else if (opnd_is_reglike(*arg0)) {
if (i + 1 < n && opnd_is_reglike(*arg1)) {
/* stp x(...), x(...), [sp, #(i * XSP_SZ)] */
PRE(ilist, instr, instr_create_1dst_2src
(dcontext, OP_stp,
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
i * XSP_SZ, OPSZ_16),
opnd_is_reg(*arg0) ? *arg0 : opnd_create_reg(DR_REG_XZR),
opnd_is_reg(*arg1) ? *arg1 : opnd_create_reg(DR_REG_XZR)));
} else {
/* str x(...), [sp, #(i * XSP_SZ)] */
PRE(ilist, instr, instr_create_1dst_1src
(dcontext, OP_str,
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
i * XSP_SZ, OPSZ_PTR),
opnd_is_reg(*arg0) ? *arg0 : opnd_create_reg(DR_REG_XZR)));
}
} else if (i + 1 < n && opnd_is_reglike(*arg1)) {
/* str x(...), [sp, #((i + 1) * XSP_SZ)] */
PRE(ilist, instr, instr_create_1dst_1src
(dcontext, OP_str,
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
(i + 1) * XSP_SZ, OPSZ_PTR),
opnd_is_reg(*arg1) ? *arg1 : opnd_create_reg(DR_REG_XZR)));
}
}
# else /* ARM */
/* XXX: We could use OP_stm here, but with lots of awkward corner cases. */
PRE(ilist, instr, INSTR_CREATE_sub(dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_reg(DR_REG_XSP),
OPND_CREATE_INT32(stack_inc)));
for (i = 0; i < n; i++) {
opnd_t arg = args[NUM_REGPARM + i];
if (opnd_is_reglike(arg)) {
/* str r(...), [sp, #(i * XSP_SZ)] */
PRE(ilist, instr, XINST_CREATE_store
(dcontext, opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
i * XSP_SZ, OPSZ_PTR), arg));
}
}
# endif
}
/* Initialise regs[], which encodes the contents of parameter registers.
* A non-negative value x means regparms[x];
* -1 means an immediate integer;
* -2 means a non-parameter register.
*/
for (i = 0; i < num_regs; i++) {
if (opnd_is_immed_int(args[i]))
regs[i] = -1;
else {
reg_id_t reg = opnd_get_reg(args[i]);
regs[i] = -2;
for (j = 0; j < NUM_REGPARM; j++) {
if (reg == regparms[j]) {
regs[i] = j;
break;
}
}
}
}
/* Initialise usecount[]: how many other registers use the value in a reg. */
for (i = 0; i < num_regs; i++)
usecount[i] = 0;
for (i = 0; i < num_regs; i++) {
if (regs[i] >= 0 && regs[i] != i)
++usecount[regs[i]];
}
/* Set up register arguments that are not part of a cycle. */
{
bool changed;
do {
changed = false;
for (i = 0; i < num_regs; i++) {
if (regs[i] == i || usecount[i] != 0)
continue;
if (regs[i] == -1) {
insert_mov_immed_ptrsz(dcontext, opnd_get_immed_int(args[i]),
opnd_create_reg(regparms[i]),
ilist, instr, NULL, NULL);
} else if (regs[i] == -2 && opnd_get_reg(args[i]) == DR_REG_XSP) {
/* XXX: We could record which register has been set to the SP to
* avoid repeating this load if several arguments are set to SP.
*/
insert_get_mcontext_base(dcontext, ilist, instr, regparms[i]);
PRE(ilist, instr, instr_create_restore_from_dc_via_reg
(dcontext, regparms[i], regparms[i], XSP_OFFSET));
} else {
PRE(ilist, instr, XINST_CREATE_move(dcontext,
opnd_create_reg(regparms[i]),
args[i]));
if (regs[i] != -2)
--usecount[regs[i]];
}
regs[i] = i;
changed = true;
}
} while (changed);
}
/* From now on it is safe to use LR as a temporary. */
/* Set up register arguments that are in cycles. A rotation of n values is
* realised with (n + 1) moves.
*/
for (;;) {
int first, tmp;
for (i = 0; i < num_regs; i++) {
if (regs[i] != i)
break;
}
if (i >= num_regs)
break;
first = i;
PRE(ilist, instr, XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_LR),
opnd_create_reg(regparms[i])));
do {
tmp = regs[i];
ASSERT(0 <= tmp && tmp < num_regs);
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(regparms[i]),
tmp == first ? opnd_create_reg(DR_REG_LR) :
opnd_create_reg(regparms[tmp])));
regs[i] = i;
i = tmp;
} while (tmp != first);
}
/* Set up stack arguments that are (non-zero) constants or SP. */
for (i = NUM_REGPARM; i < num_args; i++) {
uint off = (i - NUM_REGPARM) * XSP_SZ;
opnd_t arg = args[i];
if (!opnd_is_reglike(arg)) {
if (opnd_is_reg(arg)) {
ASSERT(opnd_get_reg(arg) == DR_REG_XSP);
insert_get_mcontext_base(dcontext, ilist, instr, DR_REG_LR);
PRE(ilist, instr, instr_create_restore_from_dc_via_reg
(dcontext, DR_REG_LR, DR_REG_LR, XSP_OFFSET));
} else {
ASSERT(opnd_is_immed_int(arg));
insert_mov_immed_ptrsz(dcontext, opnd_get_immed_int(arg),
opnd_create_reg(DR_REG_LR),
ilist, instr, NULL, NULL);
}
PRE(ilist, instr, XINST_CREATE_store
(dcontext,
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0, off, OPSZ_PTR),
opnd_create_reg(DR_REG_LR)));
}
}
return (uint)stack_inc;
}
bool
insert_reachable_cti(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
byte *encode_pc, byte *target, bool jmp, bool returns, bool precise,
reg_id_t scratch, instr_t **inlined_tgt_instr)
{
ASSERT(scratch != REG_NULL); /* required */
/* load target into scratch register */
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)
PC_AS_JMP_TGT(dr_get_isa_mode(dcontext), target),
opnd_create_reg(scratch), ilist, where, NULL, NULL);
/* even if a call and not a jmp, we can skip this if it doesn't return */
if (!jmp && returns) {
PRE(ilist, where,
IF_AARCH64_ELSE(INSTR_CREATE_blr, INSTR_CREATE_blx_ind)
(dcontext, opnd_create_reg(scratch)));
} else {
PRE(ilist, where,
XINST_CREATE_jump_reg(dcontext, opnd_create_reg(scratch)));
}
return false /* an ind branch */;
}
int
insert_out_of_line_context_switch(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, bool save, byte *encode_pc)
{
# ifdef AARCH64
if (save) {
/* Reserve stack space to push the context. We do it here instead of
* in insert_push_all_registers, so we can save the original value
* of X30 on the stack before it is changed by the BL (branch & link)
* to the clean call save routine in the code cache.
*
* sub sp, sp, #clean_call_switch_stack_size
*/
PRE(ilist, instr,
XINST_CREATE_sub(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT16(get_clean_call_switch_stack_size())));
/* str x30, [sp, #x30_offset]
*
* We have to save the original value of x30 before using BLR to jump
* to the save code, because BLR will modify x30. The original value of
* x30 is restored after the returning from the save/restore functions below.
*/
PRE(ilist, instr,
INSTR_CREATE_str(dcontext,
opnd_create_base_disp(DR_REG_SP, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X30), OPSZ_8),
opnd_create_reg(DR_REG_X30)));
}
insert_mov_immed_ptrsz(dcontext,
(long) (save ? get_clean_call_save(dcontext) :
get_clean_call_restore(dcontext)),
opnd_create_reg(DR_REG_X30), ilist, instr, NULL, NULL);
PRE(ilist, instr,
INSTR_CREATE_blr(dcontext, opnd_create_reg(DR_REG_X30)));
/* Restore original value of X30, which was changed by BLR.
*
* ldr x30, [sp, #x30_offset]
*/
PRE(ilist, instr,
INSTR_CREATE_ldr(dcontext, opnd_create_reg(DR_REG_X30),
OPND_CREATE_MEM64(DR_REG_SP, REG_OFFSET(DR_REG_X30))));
if (!save) {
/* add sp, sp, #clean_call_switch_stack_size */
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(DR_REG_SP),
OPND_CREATE_INT16(get_clean_call_switch_stack_size())));
}
return get_clean_call_switch_stack_size();
# else
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1621: NYI on AArch32. */
return 0;
# endif
}
/*###########################################################################
*###########################################################################
*
* M A N G L I N G R O U T I N E S
*/
/* forward declaration */
static void
mangle_stolen_reg(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr, bool instr_to_be_removed);
# ifndef AARCH64
/* i#1662 optimization: we try to pick the same scratch register during
* mangling to provide more opportunities for optimization,
* xref insert_save_to_tls_if_necessary().
*
* Returns the prev reg restore instruction.
*/
static instr_t *
find_prior_scratch_reg_restore(dcontext_t *dcontext, instr_t *instr, reg_id_t *prior_reg)
{
instr_t *prev = instr_get_prev(instr);
bool tls, spill;
ASSERT(prior_reg != NULL);
*prior_reg = REG_NULL;
if (INTERNAL_OPTION(opt_mangle) == 0)
return NULL;
while (prev != NULL &&
/* We can eliminate the restore/respill pair only if they are executed
* together, so only our own mangling label instruction is allowed in
* between.
*/
instr_is_label(prev) && instr_is_our_mangling(prev))
prev = instr_get_prev(prev);
if (prev != NULL &&
instr_is_DR_reg_spill_or_restore(dcontext, prev, &tls, &spill, prior_reg)) {
if (tls && !spill &&
*prior_reg >= SCRATCH_REG0 && *prior_reg <= SCRATCH_REG_LAST)
return prev;
}
*prior_reg = REG_NULL;
return NULL;
}
# endif /* !AARCH64 */
/* optimized spill: only if not immediately spilled already */
static void
insert_save_to_tls_if_necessary(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg, ushort slot)
{
# ifdef AARCH64
/* FIXME i#1569: not yet optimized */
PRE(ilist, where, instr_create_save_to_tls(dcontext, reg, slot));
# else
instr_t *prev;
reg_id_t prior_reg;
DEBUG_DECLARE(bool tls;)
DEBUG_DECLARE(bool spill;)
/* this routine is only called for non-mbr mangling */
STATS_INC(non_mbr_spills);
prev = find_prior_scratch_reg_restore(dcontext, where, &prior_reg);
if (INTERNAL_OPTION(opt_mangle) > 0 && prev != NULL && prior_reg == reg) {
ASSERT(instr_is_DR_reg_spill_or_restore(dcontext, prev, &tls,
&spill, &prior_reg) &&
tls && !spill && prior_reg == reg);
/* remove the redundant restore-spill pair */
instrlist_remove(ilist, prev);
instr_destroy(dcontext, prev);
STATS_INC(non_mbr_respill_avoided);
} else {
PRE(ilist, where, instr_create_save_to_tls(dcontext, reg, slot));
}
# endif
}
# ifndef AARCH64
/* If instr is inside an IT block, removes it from the block and
* leaves it as an isolated (un-encodable) predicated instr, with any
* other instrs from the same block made to be legal on both sides by
* modifying and adding new OP_it instrs as necessary, which are marked
* as app instrs.
* Returns a new next_instr.
*/
static instr_t *
mangle_remove_from_it_block(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
instr_t *prev, *it;
uint prior, count;
if (instr_get_isa_mode(instr) != DR_ISA_ARM_THUMB || !instr_is_predicated(instr))
return instr_get_next(instr); /* nothing to do */
for (prior = 0, prev = instr_get_prev(instr); prev != NULL;
prior++, prev = instr_get_prev(prev)) {
if (instr_get_opcode(prev) == OP_it)
break;
ASSERT(instr_is_predicated(prev));
}
ASSERT(prev != NULL);
it = prev;
count = instr_it_block_get_count(it);
ASSERT(count > prior && count <= IT_BLOCK_MAX_INSTRS);
if (prior > 0) {
instrlist_preinsert
(ilist, it, instr_it_block_create
(dcontext, instr_it_block_get_pred(it, 0),
prior > 1 ? instr_it_block_get_pred(it, 1) : DR_PRED_NONE,
prior > 2 ? instr_it_block_get_pred(it, 2) : DR_PRED_NONE,
DR_PRED_NONE));
count -= prior;
}
count--; /* this instr */
if (count > 0) {
instrlist_postinsert
(ilist, instr, instr_it_block_create
(dcontext, instr_it_block_get_pred(it, prior + 1),
count > 1 ? instr_it_block_get_pred(it, prior + 2) : DR_PRED_NONE,
count > 2 ? instr_it_block_get_pred(it, prior + 3) : DR_PRED_NONE,
DR_PRED_NONE));
}
/* It is now safe to remove the original OP_it instr */
instrlist_remove(ilist, it);
instr_destroy(dcontext, it);
DOLOG(5, LOG_INTERP, {
LOG(THREAD, LOG_INTERP, 4, "bb ilist after removing from IT block:\n");
instrlist_disassemble(dcontext, NULL, ilist, THREAD);
});
return instr_get_next(instr);
}
/* Adds enough OP_it instrs to ensure that each predicated instr in [start, end)
* (open-ended, so pass NULL to go to the final instr in ilist) is inside an IT
* block and is thus legally encodable. Marks the OP_it instrs as app instrs.
*/
int
reinstate_it_blocks(dcontext_t *dcontext, instrlist_t *ilist, instr_t *start,
instr_t *end)
{
instr_t *instr, *block_start = NULL;
app_pc block_xl8 = NULL;
int res = 0;
uint it_count = 0, block_count = 0;
dr_pred_type_t block_pred[IT_BLOCK_MAX_INSTRS];
for (instr = start; instr != NULL && instr != end; instr = instr_get_next(instr)) {
bool instr_predicated = instr_is_predicated(instr) &&
/* A label instruction may be used as a cti target, so we stop
* the IT block on label instructions.
*/
!instr_is_label(instr) &&
/* Do not put OP_b exit cti into block: patch_branch can't handle */
instr_get_opcode(instr) != OP_b &&
instr_get_opcode(instr) != OP_b_short;
if (block_start != NULL) {
bool matches = true;
ASSERT(block_count < IT_BLOCK_MAX_INSTRS);
if (instr_predicated) {
if (instr_get_predicate(instr) != block_pred[0] &&
instr_get_predicate(instr) != instr_invert_predicate(block_pred[0]))
matches = false;
else
block_pred[block_count++] = instr_get_predicate(instr);
}
if (!matches || !instr_predicated || block_count == IT_BLOCK_MAX_INSTRS ||
/* i#1702: a cti must end the IT-block */
instr_is_cti(instr)) {
res++;
instrlist_preinsert
(ilist, block_start, INSTR_XL8(instr_it_block_create
(dcontext, block_pred[0],
block_count > 1 ? block_pred[1] : DR_PRED_NONE,
block_count > 2 ? block_pred[2] : DR_PRED_NONE,
block_count > 3 ? block_pred[3] : DR_PRED_NONE), block_xl8));
block_start = NULL;
if (instr_predicated && matches)
continue;
} else
continue;
}
/* Skip existing IT blocks.
* XXX: merge w/ adjacent blocks.
*/
if (it_count > 0)
it_count--;
else if (instr_get_opcode(instr) == OP_it)
it_count = instr_it_block_get_count(instr);
else if (instr_predicated) {
instr_t *app;
block_start = instr;
block_pred[0] = instr_get_predicate(instr);
block_count = 1;
/* XXX i#1695: we want the xl8 to be the original app IT instr, if
* it existed, as using the first instr inside the block will not
* work on relocation. Should we insert labels to keep that info
* when we remove IT instrs?
*/
for (app = instr; app != NULL && instr_get_app_pc(app) == NULL;
app = instr_get_next(app))
/*nothing*/;
if (app != NULL)
block_xl8 = instr_get_app_pc(app);
else
block_xl8 = NULL;
}
}
if (block_start != NULL) {
res++;
instrlist_preinsert
(ilist, block_start, INSTR_XL8(instr_it_block_create
(dcontext, block_pred[0],
block_count > 1 ? block_pred[1] : DR_PRED_NONE,
block_count > 2 ? block_pred[2] : DR_PRED_NONE,
block_count > 3 ? block_pred[3] : DR_PRED_NONE), block_xl8));
}
return res;
}
static void
mangle_reinstate_it_blocks(dcontext_t *dcontext, instrlist_t *ilist, instr_t *start,
instr_t *end)
{
if (dr_get_isa_mode(dcontext) != DR_ISA_ARM_THUMB)
return; /* nothing to do */
reinstate_it_blocks(dcontext, ilist, start, end);
DOLOG(5, LOG_INTERP, {
LOG(THREAD, LOG_INTERP, 4, "bb ilist after reinstating IT blocks:\n");
instrlist_disassemble(dcontext, NULL, ilist, THREAD);
});
}
# endif /* !AARCH64 */
void
insert_mov_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, opnd_t dst,
instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
# ifdef AARCH64
instr_t *mov;
int i;
CLIENT_ASSERT(opnd_is_reg(dst),
"AArch64 cannot store an immediate direct to memory");
if (opnd_get_reg(dst) == DR_REG_XZR) {
/* Moving a value to the zero register is a no-op. We insert nothing,
* so *first and *last are set to NULL. Caller beware!
*/
if (first != NULL)
*first = NULL;
if (last != NULL)
*last = NULL;
return;
}
ASSERT((uint)(opnd_get_reg(dst) - DR_REG_X0) < 31);
if (src_inst != NULL)
val = (ptr_int_t)encode_estimate;
/* movz x(dst), #(val & 0xffff) */
mov = INSTR_CREATE_movz(dcontext, dst,
OPND_CREATE_INT16(val & 0xffff), OPND_CREATE_INT8(0));
PRE(ilist, instr, mov);
if (first != NULL)
*first = mov;
for (i = 1; i < 4; i++) {
if ((val >> (16 * i) & 0xffff) != 0) {
/* movk x(dst), #(val >> sh & 0xffff), lsl #(sh) */
mov = INSTR_CREATE_movk(dcontext, dst,
OPND_CREATE_INT16((val >> 16 * i) & 0xffff),
OPND_CREATE_INT8(i * 16));
PRE(ilist, instr, mov);
}
}
if (last != NULL)
*last = mov;
# else
instr_t *mov1, *mov2;
if (src_inst != NULL)
val = (ptr_int_t) encode_estimate;
CLIENT_ASSERT(opnd_is_reg(dst), "ARM cannot store an immediate direct to memory");
/* MVN writes the bitwise inverse of an immediate value to the dst register */
/* XXX: we could check for larger tile/rotate immed patterns */
if (src_inst == NULL && ~val >= 0 && ~val <= 0xff) {
mov1 = INSTR_CREATE_mvn(dcontext, dst, OPND_CREATE_INT(~val));
PRE(ilist, instr, mov1);
mov2 = NULL;
} else {
/* To use INT16 here and pass the size checks in opnd_create_immed_int
* we'd have to add UINT16 (or sign-extend the bottom half again):
* simpler to use INT, and our general ARM philosophy is to use INT and
* ignore immed sizes at instr creation time (only at encode time do we
* check them).
*/
mov1 = INSTR_CREATE_movw(dcontext, dst,
(src_inst == NULL) ?
OPND_CREATE_INT(val & 0xffff) :
opnd_create_instr_ex(src_inst, OPSZ_2, 0));
PRE(ilist, instr, mov1);
val = (val >> 16) & 0xffff;
if (val == 0) {
/* movw zero-extends so we're done */
mov2 = NULL;
} else {
mov2 = INSTR_CREATE_movt(dcontext, dst,
(src_inst == NULL) ?
OPND_CREATE_INT(val) :
opnd_create_instr_ex(src_inst, OPSZ_2, 16));
PRE(ilist, instr, mov2);
}
}
if (first != NULL)
*first = mov1;
if (last != NULL)
*last = mov2;
# endif
}
void
insert_push_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1551, i#1569 */
}
/* Used for fault translation */
bool
instr_check_xsp_mangling(dcontext_t *dcontext, instr_t *inst, int *xsp_adjust)
{
ASSERT(xsp_adjust != NULL);
/* No current ARM/AArch64 mangling splits an atomic push/pop into emulated pieces:
* the OP_ldm/OP_stm splits shouldn't need special translation handling.
*/
return false;
}
void
mangle_syscall_arch(dcontext_t *dcontext, instrlist_t *ilist, uint flags,
instr_t *instr, instr_t *next_instr)
{
/* inlined conditional system call mangling is not supported */
ASSERT(!instr_is_predicated(instr));
/* Shared routine already checked method, handled INSTR_NI_SYSCALL*,
* and inserted the signal barrier and non-auto-restart nop.
* If we get here, we're dealing with an ignorable syscall.
*/
/* We assume that the stolen register will, in effect, be neither
* read nor written by a system call as it is above the highest
* register used for the syscall arguments or number. This assumption
* currently seems to be valid on arm/arm64 Linux, which only writes the
* return value (with system calls that return). When other kernels are
* supported it may be necessary to move the stolen register value to a
* safer register (one that is "callee-saved" and not used by the gateway
* mechanism) before the system call, and restore it afterwards.
*/
ASSERT(DR_REG_STOLEN_MIN > DR_REG_SYSNUM);
}
# ifdef UNIX
/* Inserts code to handle clone into ilist.
* instr is the syscall instr itself.
* Assumes that instructions exist beyond instr in ilist.
*/
void
mangle_insert_clone_code(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr)
{
/* svc 0
* cbnz r0, parent
* jmp new_thread_dynamo_start
* parent:
* <post system call, etc.>
*/
instr_t *in = instr_get_next(instr);
instr_t *parent = INSTR_CREATE_label(dcontext);
ASSERT(in != NULL);
PRE(ilist, in,
INSTR_CREATE_cbnz(dcontext, opnd_create_instr(parent),
opnd_create_reg(DR_REG_R0)));
insert_reachable_cti(dcontext, ilist, in, vmcode_get_start(),
(byte *) get_new_thread_start(dcontext),
true/*jmp*/, false/*!returns*/, false/*!precise*/,
DR_REG_R0/*scratch*/, NULL);
instr_set_meta(instr_get_prev(in));
PRE(ilist, in, parent);
}
# endif /* UNIX */
void
mangle_interrupt(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1551, i#1569 */
}
# ifndef AARCH64
/* Adds a mov of the fall-through address into IBL_TARGET_REG, predicated
* with the inverse of instr's predicate.
* The caller must call mangle_reinstate_it_blocks() in Thumb mode afterward
* in order to make for legal encodings.
*/
static void
mangle_add_predicated_fall_through(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr,
instr_t *mangle_start)
{
/* Our approach is to simply add a move-immediate of the fallthrough
* address under the inverted predicate. This is much simpler to
* implement than adding a new kind of indirect branch ("conditional
* indirect") and plumbing it through all the optimized emit and link
* code (in particular, cbr stub sharing and other complex features).
*/
dr_pred_type_t pred = instr_get_predicate(instr);
ptr_int_t fall_through = get_call_return_address(dcontext, ilist, instr);
instr_t *first, *last;
ASSERT(instr_is_predicated(instr)); /* caller should check */
/* Mark the taken mangling as predicated. We are starting after our r2
* spill. It gets complex w/ interactions with mangle_stolen_reg() (b/c
* we aren't starting far enough back) so we bail for that.
* For mangle_pc_read(), we simply don't predicate the restore (b/c
* we aren't predicating the save).
*/
if (!instr_uses_reg(instr, dr_reg_stolen)) {
instr_t *prev = instr_get_next(mangle_start);
for (; prev != next_instr; prev = instr_get_next(prev)) {
if (instr_is_app(prev) ||
!instr_is_DR_reg_spill_or_restore(dcontext, prev, NULL, NULL, NULL))
instr_set_predicate(prev, pred);
}
}
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)
PC_AS_JMP_TGT(instr_get_isa_mode(instr),
(app_pc)fall_through),
opnd_create_reg(IBL_TARGET_REG), ilist, next_instr,
&first, &last);
for (;; first = instr_get_next(first)) {
instr_set_predicate(first, instr_invert_predicate(pred));
if (last == NULL || first == last)
break;
}
}
static inline bool
app_instr_is_in_it_block(dcontext_t *dcontext, instr_t *instr)
{
ASSERT(instr_is_app(instr));
return (instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB &&
instr_is_predicated(instr));
}
# endif /* !AARCH64 */
instr_t *
mangle_direct_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
# ifdef AARCH64
ptr_int_t target, retaddr;
ASSERT(instr_get_opcode(instr) == OP_bl);
ASSERT(opnd_is_pc(instr_get_target(instr)));
target = (ptr_int_t)opnd_get_pc(instr_get_target(instr));
retaddr = get_call_return_address(dcontext, ilist, instr);
insert_mov_immed_ptrsz(dcontext, retaddr,
opnd_create_reg(DR_REG_X30), ilist, instr, NULL, NULL);
instrlist_remove(ilist, instr); /* remove OP_bl */
instr_destroy(dcontext, instr);
return next_instr;
# else
/* Strategy: replace OP_bl with 2-step mov immed into lr + OP_b */
ptr_uint_t retaddr;
uint opc = instr_get_opcode(instr);
ptr_int_t target;
instr_t *first, *last;
bool in_it = app_instr_is_in_it_block(dcontext, instr);
instr_t *bound_start = INSTR_CREATE_label(dcontext);
if (in_it) {
/* split instr off from its IT block for easier mangling (we reinstate later) */
next_instr = mangle_remove_from_it_block(dcontext, ilist, instr);
}
PRE(ilist, instr, bound_start);
ASSERT(opc == OP_bl || opc == OP_blx);
ASSERT(opnd_is_pc(instr_get_target(instr)));
target = (ptr_int_t) opnd_get_pc(instr_get_target(instr));
retaddr = get_call_return_address(dcontext, ilist, instr);
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)
PC_AS_JMP_TGT(instr_get_isa_mode(instr), (app_pc)retaddr),
opnd_create_reg(DR_REG_LR), ilist, instr, &first, &last);
if (opc == OP_bl) {
/* OP_blx predication is handled below */
if (instr_is_predicated(instr)) {
for (;; first = instr_get_next(first)) {
instr_set_predicate(first, instr_get_predicate(instr));
if (last == NULL || first == last)
break;
}
/* Add exit cti for taken direction b/c we're removing the OP_bl */
instrlist_preinsert
(ilist, instr, INSTR_PRED
(XINST_CREATE_jump(dcontext, opnd_create_pc((app_pc)target)),
instr_get_predicate(instr)));
}
} else {
/* Unfortunately while there is OP_blx with an immed, OP_bx requires
* indirection through a register. We thus need to swap modes separately,
* but our ISA doesn't support mixing modes in one fragment, making
* a local "blx next_instr" not easy. We have two potential solutions:
* A) Implement far linking through stub's "ldr pc, [pc + 8]" and use
* it for blx. We need to implement that anyway for reachability,
* but as it's not implemented yet, I'm going w/ B) for now.
* B) Pretend this is an indirect branch and use the ibl.
* This is slower so XXX i#1612: switch to A once we have far links.
*/
if (instr_get_isa_mode(instr) == DR_ISA_ARM_A32)
target = (ptr_int_t) PC_AS_JMP_TGT(DR_ISA_ARM_THUMB, (app_pc)target);
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, IBL_TARGET_REG, IBL_TARGET_SLOT));
insert_mov_immed_ptrsz(dcontext, target, opnd_create_reg(IBL_TARGET_REG),
ilist, instr, NULL, NULL);
if (instr_is_predicated(instr)) {
mangle_add_predicated_fall_through(dcontext, ilist, instr, next_instr,
bound_start);
ASSERT(in_it || instr_get_isa_mode(instr) != DR_ISA_ARM_THUMB);
}
}
/* remove OP_bl (final added jmp already targets the callee) or OP_blx */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
if (in_it)
mangle_reinstate_it_blocks(dcontext, ilist, bound_start, next_instr);
return next_instr;
# endif
}
instr_t *
mangle_indirect_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
# ifdef AARCH64
ASSERT(instr_get_opcode(instr) == OP_blr);
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, IBL_TARGET_REG, IBL_TARGET_SLOT));
ASSERT(opnd_is_reg(instr_get_target(instr)));
if (opnd_same(instr_get_target(instr), opnd_create_reg(dr_reg_stolen))) {
/* if the target reg is dr_reg_stolen, the app value is in TLS */
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext,
IBL_TARGET_REG,
TLS_REG_STOLEN_SLOT));
} else {
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(IBL_TARGET_REG),
instr_get_target(instr)));
}
insert_mov_immed_ptrsz(dcontext,
get_call_return_address(dcontext, ilist, instr),
opnd_create_reg(DR_REG_X30),
ilist, next_instr, NULL, NULL);
instrlist_remove(ilist, instr); /* remove OP_blr */
instr_destroy(dcontext, instr);
return next_instr;
# else
ptr_uint_t retaddr;
bool in_it = app_instr_is_in_it_block(dcontext, instr);
instr_t *bound_start = INSTR_CREATE_label(dcontext);
if (in_it) {
/* split instr off from its IT block for easier mangling (we reinstate later) */
next_instr = mangle_remove_from_it_block(dcontext, ilist, instr);
}
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, IBL_TARGET_REG, IBL_TARGET_SLOT));
/* We need the spill to be unconditional so start pred processing here */
PRE(ilist, instr, bound_start);
if (!opnd_same(instr_get_target(instr), opnd_create_reg(IBL_TARGET_REG))) {
if (opnd_same(instr_get_target(instr), opnd_create_reg(dr_reg_stolen))) {
/* if the target reg is dr_reg_stolen, the app value is in TLS */
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext,
IBL_TARGET_REG,
TLS_REG_STOLEN_SLOT));
} else {
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(IBL_TARGET_REG),
instr_get_target(instr)));
}
}
retaddr = get_call_return_address(dcontext, ilist, instr);
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)
PC_AS_JMP_TGT(instr_get_isa_mode(instr), (app_pc)retaddr),
opnd_create_reg(DR_REG_LR), ilist, instr, NULL, NULL);
if (instr_is_predicated(instr)) {
mangle_add_predicated_fall_through(dcontext, ilist, instr, next_instr,
bound_start);
ASSERT(in_it || instr_get_isa_mode(instr) != DR_ISA_ARM_THUMB);
}
/* remove OP_blx_ind (final added jmp already targets the callee) */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
if (in_it)
mangle_reinstate_it_blocks(dcontext, ilist, bound_start, next_instr);
return next_instr;
# endif
}
void
mangle_return(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
/* The mangling is identical */
mangle_indirect_jump(dcontext, ilist, instr, next_instr, flags);
}
instr_t *
mangle_indirect_jump(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
# ifdef AARCH64
ASSERT(instr_get_opcode(instr) == OP_br ||
instr_get_opcode(instr) == OP_ret);
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, IBL_TARGET_REG, IBL_TARGET_SLOT));
ASSERT(opnd_is_reg(instr_get_target(instr)));
if (opnd_same(instr_get_target(instr), opnd_create_reg(dr_reg_stolen))) {
/* if the target reg is dr_reg_stolen, the app value is in TLS */
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext,
IBL_TARGET_REG,
TLS_REG_STOLEN_SLOT));
} else {
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(IBL_TARGET_REG),
instr_get_target(instr)));
}
instrlist_remove(ilist, instr); /* remove OP_br or OP_ret */
instr_destroy(dcontext, instr);
return next_instr;
# else
bool remove_instr = false;
int opc = instr_get_opcode(instr);
dr_isa_mode_t isa_mode = instr_get_isa_mode(instr);
bool in_it = app_instr_is_in_it_block(dcontext, instr);
instr_t *bound_start = INSTR_CREATE_label(dcontext);
if (in_it) {
/* split instr off from its IT block for easier mangling (we reinstate later) */
next_instr = mangle_remove_from_it_block(dcontext, ilist, instr);
}
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, IBL_TARGET_REG, IBL_TARGET_SLOT));
/* We need the spill to be unconditional so start pred processing here */
PRE(ilist, instr, bound_start);
/* Most gpr_list writes are handled by mangle_gpr_list_write() by extracting
* a single "ldr pc" instr out for mangling here, except simple instructions
* like "pop pc". Xref mangle_gpr_list_write() for details.
*/
if (instr_writes_gpr_list(instr)) {
opnd_t memop = instr_get_src(instr, 0);
/* must be simple cases like "pop pc" */
ASSERT(opnd_is_base_disp(memop));
ASSERT(opnd_get_reg(instr_get_dst(instr, 0)) == DR_REG_PC);
/* FIXME i#1551: on A32, ldm* can have only one reg in the reglist,
* i.e., "ldm r10, {pc}" is valid, so we should check dr_reg_stolen usage.
*/
ASSERT_NOT_IMPLEMENTED(!opnd_uses_reg(memop, dr_reg_stolen));
opnd_set_size(&memop, OPSZ_VAR_REGLIST);
instr_set_src(instr, 0, memop);
instr_set_dst(instr, 0, opnd_create_reg(IBL_TARGET_REG));
# ifdef CLIENT_INTERFACE
/* We target only the typical return instructions: multi-pop here */
if (TEST(INSTR_CLOBBER_RETADDR, instr->flags) && opc == OP_ldmia) {
bool writeback = instr_num_srcs(instr) > 1;
if (writeback) {
opnd_set_disp(&memop, -sizeof(void*));
opnd_set_size(&memop, OPSZ_PTR);
/* We do not support writing a passed-in value as it would require
* spilling another reg. We write the only non-retaddr-guaranteed
* reg we have, our stolen reg.
*/
POST(ilist, instr,
XINST_CREATE_store(dcontext, memop, opnd_create_reg(dr_reg_stolen)));
} /* else not a pop */
}
# endif
} else if (opc == OP_bx || opc == OP_bxj) {
ASSERT(opnd_is_reg(instr_get_target(instr)));
if (opnd_same(instr_get_target(instr), opnd_create_reg(dr_reg_stolen))) {
/* if the target reg is dr_reg_stolen, the app value is in TLS */
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext,
IBL_TARGET_REG,
TLS_REG_STOLEN_SLOT));
} else {
PRE(ilist, instr,
XINST_CREATE_move(dcontext, opnd_create_reg(IBL_TARGET_REG),
instr_get_target(instr)));
}
/* remove the bx */
remove_instr = true;
} else if (opc == OP_tbb || opc == OP_tbh) {
/* XXX: should we add add dr_insert_get_mbr_branch_target() for use
* internally and by clients? OP_tb{b,h} break our assumptions of the target
* simply being stored as an absolute address at the memory operand location.
* Instead, these are pc-relative: pc += memval*2. However, it's non-trivial
* to add that, as it requires duplicating all this mangling code. Really
* clients should use dr_insert_mbr_instrumentation(), and instr_get_target()
* isn't that useful for mbrs.
*/
ptr_int_t cur_pc = (ptr_int_t)
decode_cur_pc(instr_get_raw_bits(instr), instr_get_isa_mode(instr),
opc, instr);
/* for case like tbh [pc, r10, lsl, #1] */
if (instr_uses_reg(instr, dr_reg_stolen))
mangle_stolen_reg(dcontext, ilist, instr, instr_get_next(instr), false);
if (opc == OP_tbb) {
PRE(ilist, instr,
INSTR_CREATE_ldrb(dcontext, opnd_create_reg(IBL_TARGET_REG),
instr_get_src(instr, 0)));
} else {
PRE(ilist, instr,
INSTR_CREATE_ldrh(dcontext, opnd_create_reg(IBL_TARGET_REG),
instr_get_src(instr, 0)));
}
PRE(ilist, instr,
INSTR_CREATE_lsl(dcontext, opnd_create_reg(IBL_TARGET_REG),
opnd_create_reg(IBL_TARGET_REG), OPND_CREATE_INT(1)));
/* Rather than steal another register and using movw,movt to put the pc
* into it, we split the add up into 4 pieces.
* Even if the memref is pc-relative, this is still faster than sharing
* the pc from mangle_rel_addr() if we have mangle_rel_addr() use r2
* as the scratch reg.
* XXX: arrange for that to happen, when we refactor the ind br vs PC
* and stolen reg mangling, if memref doesn't already use r2.
*/
if (opc == OP_tbb) {
/* One byte x2 won't touch the top half, so we use a movt to add: */
PRE(ilist, instr,
INSTR_CREATE_movt(dcontext, opnd_create_reg(IBL_TARGET_REG),
OPND_CREATE_INT((cur_pc & 0xffff0000) >> 16)));
} else {
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(IBL_TARGET_REG),
OPND_CREATE_INT(cur_pc & 0xff000000)));
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(IBL_TARGET_REG),
OPND_CREATE_INT(cur_pc & 0x00ff0000)));
}
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(IBL_TARGET_REG),
OPND_CREATE_INT(cur_pc & 0x0000ff00)));
PRE(ilist, instr,
XINST_CREATE_add(dcontext, opnd_create_reg(IBL_TARGET_REG),
/* These do not switch modes so we set LSB */
OPND_CREATE_INT((cur_pc & 0x000000ff) | 0x1)));
/* remove the instr */
remove_instr = true;
} else if (opc == OP_rfe || opc == OP_rfedb || opc == OP_rfeda || opc == OP_rfeib ||
opc == OP_eret) {
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
} else {
/* Explicitly writes just the pc */
uint i;
bool found_pc;
instr_t *immed_next = instr_get_next(instr);
/* XXX: can anything (non-OP_ldm) have r2 as an additional dst? */
ASSERT_NOT_IMPLEMENTED(!instr_writes_to_reg(instr, IBL_TARGET_REG,
DR_QUERY_INCLUDE_ALL));
for (i = 0; i < instr_num_dsts(instr); i++) {
if (opnd_is_reg(instr_get_dst(instr, i)) &&
opnd_get_reg(instr_get_dst(instr, i)) == DR_REG_PC) {
found_pc = true;
instr_set_dst(instr, i, opnd_create_reg(IBL_TARGET_REG));
break;
}
}
ASSERT(found_pc);
if (isa_mode == DR_ISA_ARM_THUMB &&
(instr_get_opcode(instr) == OP_mov || instr_get_opcode(instr) == OP_add)) {
/* Some Thumb write-to-PC instructions (OP_add and OP_mov) are simple
* non-mode-changing branches, so we set LSB to 1.
*/
opnd_t src = opnd_create_reg(IBL_TARGET_REG);
if (instr_get_opcode(instr) == OP_mov && !instr_is_predicated(instr)) {
/* Optimization: we can replace the mov */
src = instr_get_src(instr, 0);
remove_instr = true;
}
if (instr_get_opcode(instr) == OP_add) {
/* We need to add shift immeds: easiest to create a new add (i#1919) */
PRE(ilist, instr,
INSTR_CREATE_add(dcontext, instr_get_dst(instr, 0),
instr_get_src(instr, 0), instr_get_src(instr, 1)));
remove_instr = true;
}
/* We want this before any mangle_rel_addr mangling */
POST(ilist, instr,
INSTR_CREATE_orr(dcontext, opnd_create_reg(IBL_TARGET_REG), src,
OPND_CREATE_INT(1)));
}
if (instr_uses_reg(instr, dr_reg_stolen)) {
/* Stolen register mangling must happen after orr instr
* inserted above but before any mangle_rel_addr mangling.
*/
mangle_stolen_reg(dcontext, ilist, instr, immed_next, remove_instr);
}
# ifdef CLIENT_INTERFACE
/* We target only the typical return instructions: single pop here */
if (TEST(INSTR_CLOBBER_RETADDR, instr->flags) && opc == OP_ldr) {
bool writeback = instr_num_srcs(instr) > 1;
if (writeback && opnd_is_immed_int(instr_get_src(instr, 1))) {
opnd_t memop = instr_get_src(instr, 0);
opnd_set_disp(&memop, -opnd_get_immed_int(instr_get_src(instr, 1)));
/* See above: we just write our stolen reg value */
POST(ilist, instr,
XINST_CREATE_store(dcontext, memop, opnd_create_reg(dr_reg_stolen)));
} /* else not a pop */
}
# endif
}
if (instr_is_predicated(instr)) {
mangle_add_predicated_fall_through(dcontext, ilist, instr, next_instr,
bound_start);
ASSERT(in_it || isa_mode != DR_ISA_ARM_THUMB);
}
if (remove_instr) {
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
if (in_it)
mangle_reinstate_it_blocks(dcontext, ilist, bound_start, next_instr);
return next_instr;
# endif
}
/* Local single-instr-window scratch reg picker. Only considers r0-r3, so the
* caller must split up any GPR reg list first. Assumes we only care about instrs
* that read or write regs outside of r0-r3, so we'll only fail on instrs that
* can access 5 GPR's, and again caller should split those up.
*
* For some use case (e.g., mangle stolen reg), the scratch reg will be
* used across the app instr, so we cannot pick a dead reg.
*
* Returns REG_NULL if fail to find a scratch reg.
*/
static reg_id_t
pick_scratch_reg(dcontext_t *dcontext, instr_t *instr, bool dead_reg_ok,
ushort *scratch_slot OUT, bool *should_restore OUT)
{
reg_id_t reg;
ushort slot;
if (should_restore != NULL)
*should_restore = true;
# ifndef AARCH64 /* FIXME i#1569: not yet optimized */
if (find_prior_scratch_reg_restore(dcontext, instr, ®) != NULL &&
reg != REG_NULL && !instr_uses_reg(instr, reg) &&
/* Ensure no conflict in scratch regs for PC or stolen reg
* mangling vs ind br mangling. We can't just check for mbr b/c
* of OP_blx.
*/
(!instr_is_cti(instr) || reg != IBL_TARGET_REG)) {
ASSERT(reg >= SCRATCH_REG0 && reg <= SCRATCH_REG_LAST);
slot = TLS_REG0_SLOT + sizeof(reg_t)*(reg - SCRATCH_REG0);
DOLOG(4, LOG_INTERP, {
dcontext_t *dcontext = get_thread_private_dcontext();
LOG(THREAD, LOG_INTERP, 4, "use last scratch reg %s\n", reg_names[reg]);
});
} else
# endif
reg = REG_NULL;
if (reg == REG_NULL) {
for (reg = SCRATCH_REG0, slot = TLS_REG0_SLOT;
reg <= SCRATCH_REG_LAST; reg++, slot+=sizeof(reg_t)) {
if (!instr_uses_reg(instr, reg) &&
/* not pick IBL_TARGET_REG if instr is a cti */
(!instr_is_cti(instr) || reg != IBL_TARGET_REG))
break;
}
}
/* We can only try to pick a dead register if the scratch reg usage
* allows so (e.g., not across the app instr).
*/
if (reg > SCRATCH_REG_LAST && dead_reg_ok) {
/* Likely OP_ldm. We'll have to pick a dead reg (non-ideal b/c a fault
* could come in: i#400).
*/
for (reg = SCRATCH_REG0, slot = TLS_REG0_SLOT;
reg <= SCRATCH_REG_LAST; reg++, slot+=sizeof(reg_t)) {
if (!instr_reads_from_reg(instr, reg, DR_QUERY_INCLUDE_ALL) &&
/* Ensure no conflict vs ind br mangling */
(!instr_is_cti(instr) || reg != IBL_TARGET_REG))
break;
}
if (should_restore != NULL)
*should_restore = false;
}
/* Only OP_stm could read all 4 of our scratch regs and also read or write
* the PC or stolen reg (OP_smlal{b,t}{b,t} can read 4 GPR's but not a 4th),
* and it's not allowed to have PC as a base reg (it's "unpredictable" at
* least). For stolen reg as base, we should split it up before calling here.
*/
if (reg > SCRATCH_REG_LAST)
reg = REG_NULL;
if (scratch_slot != NULL)
*scratch_slot = slot;
return reg;
}
/* Should return NULL if it destroys "instr". */
instr_t *
mangle_rel_addr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
# ifdef AARCH64
uint opc = instr_get_opcode(instr);
opnd_t dst = instr_get_dst(instr, 0);
opnd_t src = instr_get_src(instr, 0);
app_pc tgt;
ASSERT(opc == OP_adr || opc == OP_adrp || opc == OP_ldr || opc == OP_ldrsw);
ASSERT(instr_has_rel_addr_reference(instr));
instr_get_rel_addr_target(instr, &tgt);
ASSERT(opnd_is_reg(dst));
ASSERT(opnd_is_rel_addr(src));
ASSERT(opnd_get_addr(src) == tgt);
if (instr_uses_reg(instr, dr_reg_stolen)) {
dst = opnd_create_reg(reg_resize_to_opsz(DR_REG_X0, opnd_get_size(dst)));
PRE(ilist, next_instr,
instr_create_save_to_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
}
if ((opc == OP_ldr || opc == OP_ldrsw) && reg_is_gpr(opnd_get_reg(dst))) {
reg_id_t xreg = reg_to_pointer_sized(opnd_get_reg(dst));
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)tgt, opnd_create_reg(xreg),
ilist, next_instr, NULL, NULL);
PRE(ilist, next_instr,
instr_create_1dst_1src(dcontext, opc, dst,
opnd_create_base_disp(xreg, REG_NULL,
0, 0, opnd_get_size(src))));
} else if (opc == OP_ldr) {
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)tgt,
opnd_create_reg(DR_REG_X0),
ilist, next_instr, NULL, NULL);
PRE(ilist, next_instr,
XINST_CREATE_load(dcontext, dst,
opnd_create_base_disp(DR_REG_X0, REG_NULL,
0, 0, opnd_get_size(dst))));
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
} else {
/* OP_adr, OP_adrp */
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)tgt, dst,
ilist, next_instr, NULL, NULL);
}
if (instr_uses_reg(instr, dr_reg_stolen)) {
PRE(ilist, next_instr,
instr_create_save_to_tls(dcontext,
DR_REG_X0, TLS_REG_STOLEN_SLOT));
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
}
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return NULL;
# else
/* Compute the value of r15==pc for orig app instr */
ptr_int_t r15 = (ptr_int_t)
decode_cur_pc(instr_get_raw_bits(instr), instr_get_isa_mode(instr),
instr_get_opcode(instr), instr);
opnd_t mem_op;
ushort slot;
bool should_restore;
reg_id_t reg = pick_scratch_reg(dcontext, instr, true, &slot, &should_restore);
opnd_t new_op;
dr_shift_type_t shift_type;
uint shift_amt, disp;
bool store = instr_writes_memory(instr);
bool in_it = app_instr_is_in_it_block(dcontext, instr);
instr_t *bound_start = INSTR_CREATE_label(dcontext);
if (in_it) {
/* split instr off from its IT block for easier mangling (we reinstate later) */
next_instr = mangle_remove_from_it_block(dcontext, ilist, instr);
}
PRE(ilist, instr, bound_start);
ASSERT(instr_has_rel_addr_reference(instr));
/* Manual says "unpredicatable" if PC is base of ldm/stm */
ASSERT(!instr_reads_gpr_list(instr) && !instr_writes_gpr_list(instr));
ASSERT(reg != REG_NULL);
if (store) {
mem_op = instr_get_dst(instr, 0);
} else {
mem_op = instr_get_src(instr, 0);
}
ASSERT(opnd_is_base_disp(mem_op));
ASSERT(opnd_get_base(mem_op) == DR_REG_PC);
disp = opnd_get_disp(mem_op);
/* For Thumb, there is a special-cased subtract from PC with a 12-bit immed that
* has no analogue with a non-PC base.
*/
if (instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB &&
TEST(DR_OPND_NEGATED, opnd_get_flags(mem_op)) &&
disp >= 256) {
/* Apply the disp now */
r15 -= disp;
disp = 0;
}
insert_save_to_tls_if_necessary(dcontext, ilist, instr, reg, slot);
insert_mov_immed_ptrsz(dcontext, r15, opnd_create_reg(reg),
ilist, instr, NULL, NULL);
shift_type = opnd_get_index_shift(mem_op, &shift_amt);
new_op = opnd_create_base_disp_arm
(reg, opnd_get_index(mem_op), shift_type, shift_amt, disp,
opnd_get_flags(mem_op), opnd_get_size(mem_op));
if (store) {
instr_set_dst(instr, 0, new_op);
} else {
instr_set_src(instr, 0, new_op);
}
if (should_restore)
PRE(ilist, next_instr, instr_create_restore_from_tls(dcontext, reg, slot));
if (in_it) {
/* XXX: we could mark our mangling as predicated in some cases,
* like mangle_add_predicated_fall_through() does.
*/
mangle_reinstate_it_blocks(dcontext, ilist, bound_start, next_instr);
}
return next_instr;
# endif
}
# ifndef AARCH64
/* mangle simple pc read, pc read in gpr_list is handled in mangle_gpr_list_read */
static void
mangle_pc_read(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
ushort slot;
bool should_restore;
reg_id_t reg = pick_scratch_reg(dcontext, instr, true, &slot, &should_restore);
ptr_int_t app_r15 = (ptr_int_t)
decode_cur_pc(instr_get_raw_bits(instr), instr_get_isa_mode(instr),
instr_get_opcode(instr), instr);
int i;
ASSERT(reg != REG_NULL);
ASSERT(!instr_is_meta(instr) &&
instr_reads_from_reg(instr, DR_REG_PC, DR_QUERY_INCLUDE_ALL));
insert_save_to_tls_if_necessary(dcontext, ilist, instr, reg, slot);
insert_mov_immed_ptrsz(dcontext, app_r15, opnd_create_reg(reg),
ilist, instr, NULL, NULL);
for (i = 0; i < instr_num_srcs(instr); i++) {
if (opnd_uses_reg(instr_get_src(instr, i), DR_REG_PC)) {
/* A memref should have been mangled already in mangle_rel_addr */
opnd_t orig = instr_get_src(instr, i);
ASSERT(opnd_is_reg(orig));
instr_set_src(instr, i, opnd_create_reg_ex(reg, opnd_get_size(orig),
opnd_get_flags(orig)));
}
}
if (should_restore)
PRE(ilist, next_instr, instr_create_restore_from_tls(dcontext, reg, slot));
}
# endif /* !AARCH64 */
/* save tls_base from dr_reg_stolen to reg and load app value to dr_reg_stolen */
static void
restore_app_value_to_stolen_reg(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, reg_id_t reg, ushort slot)
{
insert_save_to_tls_if_necessary(dcontext, ilist, instr, reg, slot);
PRE(ilist, instr, XINST_CREATE_move(dcontext,
opnd_create_reg(reg),
opnd_create_reg(dr_reg_stolen)));
/* We always read the app value to make sure we write back
* the correct value in the case of predicated execution.
*/
/* load the app value if the dr_reg_stolen might be read
* or it is not always be written.
*/
if (instr_reads_from_reg(instr, dr_reg_stolen, DR_QUERY_DEFAULT) ||
!instr_writes_to_exact_reg(instr, dr_reg_stolen, DR_QUERY_DEFAULT)) {
PRE(ilist, instr, instr_create_restore_from_tls(dcontext, dr_reg_stolen,
TLS_REG_STOLEN_SLOT));
} else {
DOLOG(4, LOG_INTERP, {
LOG(THREAD, LOG_INTERP, 4, "skip restore stolen reg app value for: ");
instr_disassemble(dcontext, instr, THREAD);
LOG(THREAD, LOG_INTERP, 4, "\n");
});
}
}
/* store app value from dr_reg_stolen to slot if writback is true and
* restore tls_base from reg back to dr_reg_stolen
*/
static void
restore_tls_base_to_stolen_reg(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr,
reg_id_t reg, ushort slot)
{
/* store app val back if it might be written */
if (instr_writes_to_reg(instr, dr_reg_stolen, DR_QUERY_INCLUDE_COND_DSTS)) {
PRE(ilist, next_instr, XINST_CREATE_store
(dcontext, opnd_create_base_disp(reg, REG_NULL, 0,
os_tls_offset(TLS_REG_STOLEN_SLOT),
OPSZ_PTR),
opnd_create_reg(dr_reg_stolen)));
} else {
DOLOG(4, LOG_INTERP, {
LOG(THREAD, LOG_INTERP, 4, "skip save stolen reg app value for: ");
instr_disassemble(dcontext, instr, THREAD);
LOG(THREAD, LOG_INTERP, 4, "\n");
});
}
/* restore stolen reg from spill reg */
PRE(ilist, next_instr, XINST_CREATE_move(dcontext,
opnd_create_reg(dr_reg_stolen),
opnd_create_reg(reg)));
}
/* XXX: merge with or refactor out old STEAL_REGISTER x86 code? */
/* Mangle simple dr_reg_stolen access.
* dr_reg_stolen in gpr_list is handled in mangle_gpr_list_{read/write}.
*
* Because this routine switches the register that hold DR's TLS base,
* it should be called after all other mangling routines that perform
* reg save/restore.
*/
static void
mangle_stolen_reg(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr, bool instr_to_be_removed)
{
ushort slot;
bool should_restore;
reg_id_t tmp;
/* Our stolen reg model is to expose to the client. We assume that any
* meta instrs using it are using it as TLS.
*/
ASSERT(!instr_is_meta(instr) && instr_uses_reg(instr, dr_reg_stolen));
# ifndef AARCH64 /* FIXME i#1569: recognise "move" on AArch64 */
/* optimization, convert simple mov to ldr/str:
* - "mov r0 -> r10" ==> "str r0 -> [r10_slot]"
* - "mov r10 -> r0" ==> "ldr [r10_slot] -> r0"
*/
if (instr_get_opcode(instr) == OP_mov && opnd_is_reg(instr_get_src(instr, 0))) {
opnd_t opnd;
ASSERT(instr_num_srcs(instr) == 1 && instr_num_dsts(instr) == 1);
ASSERT(opnd_is_reg(instr_get_dst(instr, 0)));
/* mov rx -> rx, do nothing */
if (opnd_same(instr_get_src(instr, 0), instr_get_dst(instr, 0)))
return;
/* this optimization changes the original instr, so it is only applied
* if instr_to_be_removed is false
*/
if (!instr_to_be_removed) {
opnd = opnd_create_tls_slot(os_tls_offset(TLS_REG_STOLEN_SLOT));
if (opnd_get_reg(instr_get_src(instr, 0)) == dr_reg_stolen) {
/* mov r10 -> rx, convert to a ldr */
instr_set_opcode(instr, OP_ldr);
instr_set_src(instr, 0, opnd);
return;
} else {
ASSERT(opnd_get_reg(instr_get_dst(instr, 0)) == dr_reg_stolen);
/* mov rx -> r10, convert to a str */
instr_set_opcode(instr, OP_str);
instr_set_dst(instr, 0, opnd);
return;
}
ASSERT_NOT_REACHED();
}
}
# endif
/* move stolen reg value into tmp reg for app instr execution */
tmp = pick_scratch_reg(dcontext, instr, false, &slot, &should_restore);
ASSERT(tmp != REG_NULL);
restore_app_value_to_stolen_reg(dcontext, ilist, instr, tmp, slot);
/* -- app instr executes here -- */
/* restore tls_base back to dr_reg_stolen */
restore_tls_base_to_stolen_reg(dcontext, ilist, instr, next_instr, tmp, slot);
/* restore tmp if necessary */
if (should_restore)
PRE(ilist, next_instr, instr_create_restore_from_tls(dcontext, tmp, slot));
}
/* replace thread register read instruction with a TLS load instr */
instr_t *
mangle_reads_thread_register(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr)
{
# ifdef AARCH64
reg_id_t reg = opnd_get_reg(instr_get_dst(instr, 0));
ASSERT(instr->opcode == OP_mrs);
if (reg != dr_reg_stolen) {
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, reg,
os_get_app_tls_base_offset(TLS_REG_LIB)));
} else {
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, DR_REG_X0,
os_get_app_tls_base_offset(TLS_REG_LIB)));
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, DR_REG_X0, TLS_REG_STOLEN_SLOT));
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
}
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
# else
opnd_t opnd;
reg_id_t reg;
bool in_it = app_instr_is_in_it_block(dcontext, instr);
instr_t *bound_start = INSTR_CREATE_label(dcontext);
if (in_it) {
/* split instr off from its IT block for easier mangling (we reinstate later) */
next_instr = mangle_remove_from_it_block(dcontext, ilist, instr);
}
PRE(ilist, instr, bound_start);
ASSERT(!instr_is_meta(instr) && instr_reads_thread_register(instr));
reg = opnd_get_reg(instr_get_dst(instr, 0));
ASSERT(reg_is_gpr(reg) && opnd_get_size(instr_get_dst(instr, 0)) == OPSZ_PTR);
/* convert mrc to load */
opnd = opnd_create_sized_tls_slot
(os_tls_offset(os_get_app_tls_base_offset(TLS_REG_LIB)), OPSZ_PTR);
instr_remove_srcs(dcontext, instr, 1, instr_num_srcs(instr));
instr_set_src(instr, 0, opnd);
instr_set_opcode(instr, OP_ldr);
ASSERT(reg != DR_REG_PC);
/* special case: dst reg is dr_reg_stolen */
if (reg == dr_reg_stolen) {
instr_t *immed_nexti;
/* we do not mangle r10 in [r10, disp], but need save r10 after execution,
* so we cannot use mangle_stolen_reg.
*/
insert_save_to_tls_if_necessary(dcontext, ilist, instr, SCRATCH_REG0,
TLS_REG0_SLOT);
PRE(ilist, instr, INSTR_CREATE_mov(dcontext,
opnd_create_reg(SCRATCH_REG0),
opnd_create_reg(dr_reg_stolen)));
/* -- "ldr r10, [r10, disp]" executes here -- */
immed_nexti = instr_get_next(instr);
restore_tls_base_to_stolen_reg(dcontext, ilist, instr, immed_nexti,
SCRATCH_REG0, TLS_REG0_SLOT);
PRE(ilist, immed_nexti, instr_create_restore_from_tls(dcontext,
SCRATCH_REG0,
TLS_REG0_SLOT));
}
if (in_it)
mangle_reinstate_it_blocks(dcontext, ilist, bound_start, next_instr);
return next_instr;
# endif
}
# ifdef AARCH64
instr_t *
mangle_writes_thread_register(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr)
{
reg_id_t reg = opnd_get_reg(instr_get_src(instr, 0));
ASSERT(instr->opcode == OP_msr);
if (reg != dr_reg_stolen) {
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, reg,
os_get_app_tls_base_offset(TLS_REG_LIB)));
} else {
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, DR_REG_X0, TLS_REG_STOLEN_SLOT));
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, DR_REG_X0,
os_get_app_tls_base_offset(TLS_REG_LIB)));
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
}
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
}
# endif
# ifndef AARCH64
static void
store_reg_to_memlist(dcontext_t *dcontext,
instrlist_t *ilist,
instr_t *instr,
instr_t *next_instr,
reg_id_t base_reg, /* reg holding memlist base */
ushort app_val_slot, /* slot holding app value */
reg_id_t tmp_reg, /* scratch reg */
reg_id_t fix_reg, /* reg to be fixed up */
uint fix_reg_idx)
{
bool writeback = instr_num_dsts(instr) > 1;
uint num_srcs = instr_num_srcs(instr);
int offs;
instr_t *store;
switch (instr_get_opcode(instr)) {
case OP_stmia:
if (writeback)
offs = -((num_srcs - 1/*writeback*/ - fix_reg_idx) * sizeof(reg_t));
else
offs = fix_reg_idx * sizeof(reg_t);
break;
case OP_stmda:
if (writeback)
offs = (fix_reg_idx + 1) * sizeof(reg_t);
else
offs = -((num_srcs - fix_reg_idx - 1) * sizeof(reg_t));
break;
case OP_stmdb:
if (writeback)
offs = fix_reg_idx * sizeof(reg_t);
else
offs = -((num_srcs - fix_reg_idx) * sizeof(reg_t));
break;
case OP_stmib:
if (writeback)
offs = -((num_srcs - 1/*writeback*/ - fix_reg_idx - 1) * sizeof(reg_t));
else
offs = (fix_reg_idx + 1) * sizeof(reg_t);
break;
default:
offs = 0;
ASSERT_NOT_REACHED();
}
/* load proper value into spill reg */
if (fix_reg == DR_REG_PC) {
ptr_int_t app_r15 = (ptr_int_t)
decode_cur_pc(instr_get_raw_bits(instr), instr_get_isa_mode(instr),
instr_get_opcode(instr), instr);
insert_mov_immed_ptrsz(dcontext, app_r15, opnd_create_reg(tmp_reg),
ilist, next_instr, NULL, NULL);
} else {
/* load from app_val_slot */
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, tmp_reg, app_val_slot));
}
/* store to proper location */
store = XINST_CREATE_store
(dcontext, opnd_create_base_disp(base_reg, REG_NULL, 0, offs, OPSZ_PTR),
opnd_create_reg(tmp_reg));
/* we must use the same predicate to avoid crashing here when original didn't run */
instr_set_predicate(store, instr_get_predicate(instr));
/* app instr, not meta */
instr_set_translation(store, instr_get_translation(instr));
instrlist_preinsert(ilist, next_instr, store);
}
/* mangle dr_reg_stolen or pc read in a reglist store (i.e., stm).
* Approach: fix up memory slot w/ app value after the store.
*/
static void
mangle_gpr_list_read(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
reg_id_t spill_regs[2] = {DR_REG_R0, DR_REG_R1};
reg_id_t spill_slots[2] = {TLS_REG0_SLOT, TLS_REG1_SLOT};
/* regs that need fix up in the memory slots */
reg_id_t fix_regs[2] = { DR_REG_PC, dr_reg_stolen};
bool reg_found[2] = { false, false };
uint reg_pos[2]; /* position of those fix_regs in reglist */
uint i, j, num_srcs = instr_num_srcs(instr);
bool writeback = instr_num_dsts(instr) > 1;
bool stolen_reg_is_base = false;
opnd_t memop = instr_get_dst(instr, 0);
ASSERT(dr_reg_stolen != spill_regs[0] && dr_reg_stolen != spill_regs[1]);
/* check base reg */
/* base reg cannot be PC, so could only be dr_reg_stolen */
if (opnd_uses_reg(memop, dr_reg_stolen)) {
stolen_reg_is_base = true;
restore_app_value_to_stolen_reg(dcontext, ilist, instr,
spill_regs[0], spill_slots[0]);
/* We do not need fix up memory slot for dr_reg_stolen since it holds
* app value now, but we may need fix up the slot for spill_regs[0].
*/
fix_regs[1] = spill_regs[0];
}
/* -- app instr executes here -- */
/* restore dr_reg_stolen if used as base */
if (stolen_reg_is_base) {
ASSERT(fix_regs[1] == spill_regs[0]);
ASSERT(opnd_uses_reg(memop, dr_reg_stolen));
/* restore dr_reg_stolen from spill_regs[0] */
restore_tls_base_to_stolen_reg(dcontext, ilist,
instr,
/* XXX: we must restore tls base right after instr
* for other TLS usage, so we use instr_get_next
* instead of next_instr.
*/
instr_get_next(instr),
spill_regs[0], spill_slots[0]);
/* do not restore spill_reg[0] as we may use it as scratch reg later */
}
/* fix up memory slot w/ app value after the store */
for (i = 0; i < (writeback ? (num_srcs - 1) : num_srcs); i++) {
reg_id_t reg;
ASSERT(opnd_is_reg(instr_get_src(instr, i)));
reg = opnd_get_reg(instr_get_src(instr, i));
for (j = 0; j < 2; j++) {
if (reg == fix_regs[j]) {
reg_found[j] = true;
reg_pos[j] = i;
}
}
}
if (reg_found[0] || reg_found[1]) {
ushort app_val_slot; /* slot holding app value */
reg_id_t base_reg;
reg_id_t scratch = spill_regs[1];
if (stolen_reg_is_base) {
/* dr_reg_stolen is used as the base in the app, but it is holding
* TLS base, so we now put dr_reg_stolen app value into spill_regs[0]
* to use it as the base instead.
*/
ASSERT(fix_regs[1] == spill_regs[0]);
app_val_slot = spill_slots[0];
base_reg = spill_regs[0];
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, spill_regs[0],
TLS_REG_STOLEN_SLOT));
} else {
ASSERT(fix_regs[1] == dr_reg_stolen);
app_val_slot = TLS_REG_STOLEN_SLOT;
base_reg = opnd_get_base(memop);
if (opnd_uses_reg(memop, scratch)) {
/* We know !stolen_reg_is_base so we can use r0 as scratch instead
* and not have any conflicts. We keep same TLS slot.
*/
scratch = spill_regs[0];
}
}
ASSERT(!opnd_uses_reg(memop, scratch));
/* save spill reg */
insert_save_to_tls_if_necessary(dcontext, ilist, next_instr,
scratch, spill_slots[1]);
/* fixup the slot in memlist */
for (i = 0; i < 2; i++) {
if (reg_found[i]) {
store_reg_to_memlist(dcontext, ilist, instr, next_instr,
base_reg, app_val_slot,
scratch, fix_regs[i], reg_pos[i]);
}
}
/* restore spill reg */
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, scratch, spill_slots[1]));
}
if (stolen_reg_is_base) {
ASSERT(fix_regs[1] == spill_regs[0]);
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, spill_regs[0], spill_slots[0]));
}
}
/* We normalize a ldm{ia,ib,da,db} instruction to a sequence of instructions:
* 1. adjust base
* 2. ldr r0 [base] # optional split for getting a scratch reg
* 3. ldmia
* 4. adjust base
* 5. ldr pc [base, disp]
*/
static void
normalize_ldm_instr(dcontext_t *dcontext,
instr_t *instr, /* ldm */
instr_t **pre_ldm_adjust,
instr_t **pre_ldm_ldr,
instr_t **post_ldm_adjust,
instr_t **ldr_pc)
{
int opcode = instr_get_opcode(instr);
reg_id_t base = opnd_get_base(instr_get_src(instr, 0));
bool writeback = instr_num_srcs(instr) > 1;
bool write_pc = instr_writes_to_reg(instr, DR_REG_PC, DR_QUERY_INCLUDE_ALL);
bool use_pop_pc = false;
uint num_dsts = instr_num_dsts(instr);
int memsz = sizeof(reg_t) * (writeback ? (num_dsts - 1) : num_dsts);
int adjust_pre = 0, adjust_post = 0, ldr_pc_disp = 0;
dr_pred_type_t pred = instr_get_predicate(instr);
app_pc pc = get_app_instr_xl8(instr);
/* FIXME i#1551: NYI on case like "ldm r10, {r10, pc}": if base reg
* is clobbered, "ldr pc [base, disp]" will use wrong base value.
* It seems the only solution is load the target value first and store
* it into some TLS slot for later "ldr pc".
*/
ASSERT_NOT_IMPLEMENTED(!(write_pc && !writeback &&
/* base reg is in the reglist */
instr_writes_to_reg(instr, base, DR_QUERY_INCLUDE_ALL)));
ASSERT(pre_ldm_adjust != NULL && pre_ldm_ldr != NULL &&
post_ldm_adjust != NULL && ldr_pc != NULL);
*pre_ldm_adjust = NULL;
*pre_ldm_ldr = NULL;
*post_ldm_adjust = NULL;
*ldr_pc = NULL;
if (opnd_get_reg(instr_get_dst(instr, 0)) == DR_REG_PC) {
/* special case like "pop pc" in T32.16, do nothing */
ASSERT(write_pc && memsz == sizeof(reg_t));
return;
}
/* using an example to better understand the code below:
* - ldm{*} r0{!}, {r1-r4} ==> ldmia r0{!}, {r1-r4}
* - ldm{*} r0{!}, {r1-r3,pc} ==> ldmia r0{!}, {r1-r3,pc}
*/
switch (opcode) {
case OP_ldmia:
/* ldmia r0, {r1-r4}: r0: X->X, read [X, X+0x10)
* ldmia r0!, {r1-r4}: r0: X->X+0x10, read [X, X+0x10)
* ldmia r0, {r1-r3,pc}: r0: X->X, read [X, X+0xc), [X+0xc, X+0x10)
* ldmia r0!, {r1-r3,pc}: r0: X->X+0x10, read [X, X+0xc), [X+0xc, X+0x10)
*/
adjust_pre = 0;
if (write_pc) {
/* we take pc out of reglist, so need post ldm adjust if w/ writeback */
if (writeback) {
/* use "pop pc" instead of "ldr pc" to avoid beyond TOS access */
if (base == DR_REG_SP) {
use_pop_pc = true;
adjust_post = 0;
ldr_pc_disp = 0;
} else {
adjust_post = sizeof(reg_t);
ldr_pc_disp = -sizeof(reg_t);
}
} else {
adjust_post = 0;
ldr_pc_disp = memsz - sizeof(reg_t);
}
} else {
adjust_post = 0;
}
break;
case OP_ldmda:
/* ldmda r0, {r1-r4}: r0: X->X, read [X-0xc, X+0x4)
* ldmda r0!, {r1-r4}: r0: X->X-0x10, read [X-0xc, X+0x4)
* ldmda r0, {r1-r3,pc}: r0: X->X, read [X-0xc, X), [X, X+0x4)
* ldmda r0!, {r1-r3,pc}: r0: X->X-0x10, read [X-0xc, X), [X, X+0x4)
*/
adjust_pre = -memsz + sizeof(reg_t);
if (write_pc) {
if (writeback) {
adjust_post = -memsz;
ldr_pc_disp = memsz + sizeof(reg_t);
} else {
/* XXX: optimize, add writeback to skip post ldm adjust */
adjust_post = -adjust_pre;
ldr_pc_disp = 0;
}
} else {
if (writeback) {
adjust_post = -memsz - sizeof(reg_t);
} else {
adjust_post = -adjust_pre;
}
}
break;
case OP_ldmdb:
/* ldmdb r0, {r1-r4}: r0: X->X, read [X-0x10, X)
* ldmdb r0!, {r1-r4}: r0: X->X-0x10, read [X-0x10, X)
* ldmdb r0, {r1-r3,pc}: r0: X->X, read [X-0x10, X-0x4), [X-0x4, X)
* ldmdb r0!, {r1-r3,pc}: r0: X->X-0x10, read [X-0x10, X-0x4), [X-0x4, X)
*/
adjust_pre = -memsz;
if (write_pc) {
if (writeback) {
adjust_post = -(memsz - sizeof(reg_t));
ldr_pc_disp = memsz - sizeof(reg_t);
} else {
adjust_post = -adjust_pre;
ldr_pc_disp = -sizeof(reg_t);
}
} else {
if (writeback) {
/* XXX: optimize, remove writeback to avoid post ldm adjust */
adjust_post = adjust_pre;
} else {
/* XXX: optimize, add writeback to avoid post ldm adjust */
adjust_post = -adjust_pre;
}
}
break;
case OP_ldmib:
/* ldmib r0, {r1-r4}: r0: X->X, read [X+4, X+0x14)
* ldmib r0!, {r1-r4}: r0: X->X+0x10, read [X+4, X+0x14)
* ldmib r0, {r1-r3,pc}: r0: X->X, read [X+4, X+0x10), [X+0x10, X+0x14)
* ldmib r0!, {r1-r3,pc}: r0: X->X+0x10, read [X+4, X+0x10), [X+0x10, X+0x14)
*/
adjust_pre = sizeof(reg_t);
if (write_pc) {
if (writeback) {
adjust_post = 0;
ldr_pc_disp = 0;
} else {
adjust_post = -adjust_pre;
ldr_pc_disp = memsz;
}
} else {
if (writeback)
adjust_post = -sizeof(reg_t);
else
adjust_post = -adjust_pre;
}
break;
default:
ASSERT_NOT_REACHED();
}
if (instr_uses_reg(instr, dr_reg_stolen) &&
pick_scratch_reg(dcontext, instr, false, NULL, NULL) == REG_NULL) {
/* We need split the ldm.
* We need a scratch reg from r0-r3, so by splitting the bottom reg we're
* guaranteed to get one. And since cti uses r2 it works out there.
*/
adjust_pre += sizeof(reg_t);
/* adjust base back if base won't be over-written, e.g.,:
* ldm (%r10)[16byte] -> %r0 %r1 %r2 %r3
*/
if (!instr_writes_to_reg(instr, base, DR_QUERY_INCLUDE_ALL))
adjust_post -= sizeof(reg_t);
/* pre_ldm_adjust makes sure that the base reg points to the start address of
* the ldmia memory, so we know the slot to be load is at [base, -4].
*/
*pre_ldm_ldr = XINST_CREATE_load(dcontext,
instr_get_dst(instr, 0),
OPND_CREATE_MEMPTR(base, -sizeof(reg_t)));
/* We remove the reg from reglist later after removing pc from reglist,
* so it won't mess up the index when removing pc.
*/
instr_set_predicate(*pre_ldm_ldr, pred);
instr_set_translation(*pre_ldm_ldr, pc);
}
if (adjust_pre != 0) {
*pre_ldm_adjust = adjust_pre > 0 ?
XINST_CREATE_add(dcontext,
opnd_create_reg(base),
OPND_CREATE_INT(adjust_pre)) :
XINST_CREATE_sub(dcontext,
opnd_create_reg(base),
OPND_CREATE_INT(-adjust_pre));
instr_set_predicate(*pre_ldm_adjust, pred);
instr_set_translation(*pre_ldm_adjust, pc);
}
if (write_pc) {
instr_remove_dsts(dcontext, instr,
writeback ? num_dsts - 2 : num_dsts - 1,
writeback ? num_dsts - 1: num_dsts);
}
if (*pre_ldm_ldr != NULL)
instr_remove_dsts(dcontext, instr, 0, 1);
/* check how many registers left in the reglist */
ASSERT(instr_num_dsts(instr) != (writeback ? 1 : 0));
if (instr_num_dsts(instr) == (writeback ? 2 : 1)) {
/* only one reg is left in the reglist, convert it to ldr */
instr_set_opcode(instr, OP_ldr);
instr_set_src(instr, 0, OPND_CREATE_MEMPTR(base, 0));
if (writeback) {
adjust_post += sizeof(reg_t);
instr_remove_srcs(dcontext, instr, 1, 2);
instr_remove_dsts(dcontext, instr, 1, 2);
}
} else {
instr_set_opcode(instr, OP_ldmia);
instr_set_src(instr, 0, OPND_CREATE_MEMLIST(base));
}
/* post ldm base register adjustment */
if (!writeback && instr_writes_to_reg(instr, base, DR_QUERY_INCLUDE_ALL)) {
/* if the base reg is in the reglist, we do not need to post adjust */
adjust_post = 0;
}
if (adjust_post != 0) {
*post_ldm_adjust = adjust_post > 0 ?
XINST_CREATE_add(dcontext,
opnd_create_reg(base),
OPND_CREATE_INT(adjust_post)) :
XINST_CREATE_sub(dcontext,
opnd_create_reg(base),
OPND_CREATE_INT(-adjust_post));
instr_set_predicate(*post_ldm_adjust, pred);
instr_set_translation(*post_ldm_adjust, pc);
}
/* post ldm load-pc */
if (write_pc) {
if (use_pop_pc) {
ASSERT(ldr_pc_disp == 0 && base == DR_REG_SP && writeback);
/* we use pop_list to generate A32.T16 (2-byte) code in Thumb mode */
*ldr_pc = INSTR_CREATE_pop_list(dcontext, 1, opnd_create_reg(DR_REG_PC));
} else {
*ldr_pc = XINST_CREATE_load(dcontext,
opnd_create_reg(DR_REG_PC),
OPND_CREATE_MEMPTR(base, ldr_pc_disp));
}
instr_set_predicate(*ldr_pc, pred);
instr_set_translation(*ldr_pc, pc);
if (TEST(INSTR_CLOBBER_RETADDR, instr->flags))
(*ldr_pc)->flags |= INSTR_CLOBBER_RETADDR;
}
}
/* Mangling reglist write is complex: ldm{ia,ib,da,db} w/ and w/o writeback.
* One possible solution is to split the ldm into multiple ldm instructions.
* However it has several challenges, for examples:
* - we need additional base reg adjust instr for ldm w/o writeback
* as ldm does not have disp for the memlist,
* - we need different execution order of split-ldms for ldmia and ldmdb,
* - ldmib/ldmda add additional complexity,
* - we still need a "ldr pc" if it writes to pc
* - etc.
*
* Another solution is to convert them into a squence of ldr with base reg
* adjustments, which may cause large runtime overhead.
*
* Our approach is to convert any gpr_list write instrucition into five parts:
* 1. base reg adjustment
* 2. ldr r0 [base] # optional split for getting a scratch reg
* 3. ldmia base, {reglist}
* 4. base reg adjustment
* 5. ldr pc, [base, offset]
* and mangle each separately.
*/
static instr_t *
mangle_gpr_list_write(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
instr_t *pre_ldm_adjust, *pre_ldm_ldr, *post_ldm_adjust, *ldr_pc;
ASSERT(!instr_is_meta(instr) && instr_writes_gpr_list(instr));
/* convert ldm{*} instr to a sequence of instructions */
normalize_ldm_instr(dcontext, instr,
&pre_ldm_adjust, &pre_ldm_ldr, &post_ldm_adjust, &ldr_pc);
/* pc cannot be used as the base in ldm, so now we only care dr_reg_stolen */
if (pre_ldm_adjust != NULL) {
instrlist_preinsert(ilist, instr, pre_ldm_adjust); /* non-meta */
if (instr_uses_reg(pre_ldm_adjust, dr_reg_stolen)) {
mangle_stolen_reg(dcontext, ilist, pre_ldm_adjust,
/* dr_reg_stolen must be restored right after */
instr_get_next(pre_ldm_adjust), false);
}
}
if (pre_ldm_ldr != NULL) {
/* special case: ldm r0, {r0-rx}, separate ldr r0, [r0] clobbers base r0 */
if (opnd_get_reg(instr_get_dst(pre_ldm_ldr, 0)) == SCRATCH_REG0 &&
opnd_get_base(instr_get_src(pre_ldm_ldr, 0)) == SCRATCH_REG0) {
instr_t *mov;
/* save the r1 for possible context restore on signal */
insert_save_to_tls_if_necessary(dcontext, ilist, instr, SCRATCH_REG1,
TLS_REG1_SLOT);
/* mov r0 => r1, */
mov = XINST_CREATE_move(dcontext,
opnd_create_reg(SCRATCH_REG1),
opnd_create_reg(SCRATCH_REG0));
instr_set_predicate(mov, instr_get_predicate(instr));
PRE(ilist, instr, mov);
/* We will only come to here iff instr is "ldm r0, {r0-rx}",
* otherwise we will be able to pick a scratch reg without split.
* Thus the first dst reg must be r1 after split and the base is r0.
* Now we change "ldm r0, {r1-rx}" to "ldm r1, {r1-rx}".
*/
ASSERT(opnd_get_reg(instr_get_dst(instr, 0)) == SCRATCH_REG1 &&
opnd_get_base(instr_get_src(instr, 0)) == SCRATCH_REG0);
instr_set_src(instr, 0, OPND_CREATE_MEMLIST(SCRATCH_REG1));
}
instrlist_preinsert(ilist, instr, pre_ldm_ldr); /* non-meta */
if (instr_uses_reg(pre_ldm_ldr, dr_reg_stolen)) {
mangle_stolen_reg(dcontext, ilist, pre_ldm_ldr,
/* dr_reg_stolen must be restored right after */
instr_get_next(pre_ldm_ldr), false);
}
}
if (instr_uses_reg(instr, dr_reg_stolen)) {
/* dr_reg_stolen must be restored right after instr */
mangle_stolen_reg(dcontext, ilist, instr, instr_get_next(instr), false);
}
if (post_ldm_adjust != NULL) {
instrlist_preinsert(ilist, next_instr, post_ldm_adjust);
if (instr_uses_reg(post_ldm_adjust, dr_reg_stolen)) {
mangle_stolen_reg(dcontext, ilist, post_ldm_adjust,
/* dr_reg_stolen must be restored right after */
instr_get_next(post_ldm_adjust), false);
}
}
if (ldr_pc != NULL) {
/* we leave ldr_pc to mangle_indirect_jump */
instrlist_preinsert(ilist, next_instr, ldr_pc);
next_instr = ldr_pc;
}
return next_instr;
}
# endif /* !AARCH64 */
# ifdef AARCH64
/* We mangle a conditional branch that uses the stolen register like this:
*
* cbz x28, target # x28 is stolen register
* =>
* str x0, [x28] # spill x0
* ldr x0, [x28, #32] # x28 in memory loaded to x0
* cbnz x0, fall
* ldr x0, [x28] # restore x0 (original branch taken)
* b target
* fall:
* ldr x0, [x28] # restore x0 (original branch not taken)
*
* The CBNZ will need special handling when we decode from the cache for
* traces (i#1668).
*/
static void
mangle_cbr_stolen_reg(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr)
{
instr_t *fall = INSTR_CREATE_label(dcontext);
int opcode = instr_get_opcode(instr);
reg_id_t reg = DR_REG_X0;
ushort slot = TLS_REG0_SLOT;
opnd_t opnd;
PRE(ilist, instr, instr_create_save_to_tls(dcontext, reg, slot));
PRE(ilist, instr, instr_create_restore_from_tls(dcontext, reg,
TLS_REG_STOLEN_SLOT));
switch (opcode) {
case OP_cbnz:
case OP_cbz:
opnd = instr_get_src(instr, 1);
opnd = opnd_create_reg(reg_resize_to_opsz(reg, opnd_get_size(opnd)));
PRE(ilist, instr,
instr_create_0dst_2src(dcontext,
(opcode == OP_cbz ? OP_cbnz : OP_cbz),
opnd_create_instr(fall), opnd));
break;
case OP_tbnz:
case OP_tbz:
PRE(ilist, instr,
instr_create_0dst_3src(dcontext,
(opcode == OP_tbz ? OP_tbnz : OP_tbz),
opnd_create_instr(fall),
opnd_create_reg(reg),
instr_get_src(instr, 2)));
break;
default:
ASSERT_NOT_REACHED();
}
PRE(ilist, instr, instr_create_restore_from_tls(dcontext, reg, slot));
/* Replace original instruction with unconditional branch. */
opnd = instr_get_src(instr, 0);
instr_reset(dcontext, instr);
instr_set_opcode(instr, OP_b);
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_src(instr, 0, opnd);
PRE(ilist, next_instr, fall);
PRE(ilist, next_instr, instr_create_restore_from_tls(dcontext, reg, slot));
}
# endif /* AARCH64 */
/* On ARM, we need mangle app instr accessing registers pc and dr_reg_stolen.
* We use this centralized mangling routine here to handle complex issues with
* more efficient mangling code.
*/
instr_t *
mangle_special_registers(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
# ifdef AARCH64
if (!instr_uses_reg(instr, dr_reg_stolen))
return next_instr;
if (instr_is_cbr(instr))
mangle_cbr_stolen_reg(dcontext, ilist, instr, instr_get_next(instr));
else if (!instr_is_mbr(instr))
mangle_stolen_reg(dcontext, ilist, instr, instr_get_next(instr), false);
return next_instr;
# else
bool finished = false;
bool in_it = instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB &&
instr_is_predicated(instr);
instr_t *bound_start = NULL, *bound_end = next_instr;
if (in_it) {
/* split instr off from its IT block for easier mangling (we reinstate later) */
next_instr = mangle_remove_from_it_block(dcontext, ilist, instr);
/* We do NOT want the next_instr from mangle_gpr_list_write(), which can
* point at the split-off OP_ldr of pc: but we need to go past that.
*/
bound_end = next_instr;
bound_start = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, bound_start);
}
/* FIXME i#1551: for indirect branch mangling, we first mangle the instr here
* for possible pc read and dr_reg_stolen read/write,
* and leave pc write mangling later in mangle_indirect_jump, which is
* error-prone and inefficient.
* We should split the mangling and only mangle non-ind-branch instructions
* here and leave mbr instruction mangling to mangle_indirect_jump.
*/
/* special handling reglist read */
if (instr_reads_gpr_list(instr)) {
mangle_gpr_list_read(dcontext, ilist, instr, next_instr);
finished = true;
}
/* special handling reglist write */
if (!finished && instr_writes_gpr_list(instr)) {
next_instr = mangle_gpr_list_write(dcontext, ilist, instr, next_instr);
finished = true;
}
if (!finished && instr_reads_from_reg(instr, DR_REG_PC, DR_QUERY_INCLUDE_ALL))
mangle_pc_read(dcontext, ilist, instr, next_instr);
/* mangle_stolen_reg must happen after mangle_pc_read to avoid reg conflict */
if (!finished && instr_uses_reg(instr, dr_reg_stolen) && !instr_is_mbr(instr))
mangle_stolen_reg(dcontext, ilist, instr, instr_get_next(instr), false);
if (in_it) {
mangle_reinstate_it_blocks(dcontext, ilist, bound_start, bound_end);
}
return next_instr;
# endif
}
void
float_pc_update(dcontext_t *dcontext)
{
/* FIXME i#1551, i#1569: NYI on ARM */
ASSERT_NOT_REACHED();
}
# ifdef AARCH64
instr_t *
mangle_icache_op(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr, app_pc pc)
{
int opc = instr_get_opcode(instr);
if (opc == OP_sys) {
reg_id_t xt = opnd_get_reg(instr_get_src(instr, 1));
/* ic ivau, xT is replaced with: */
PRE(ilist, instr, /* stp x0, x30, [x28] */
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(dr_reg_stolen,
DR_REG_NULL, 0, 0, OPSZ_16),
opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X30)));
insert_mov_immed_arch(dcontext, NULL, NULL, (ptr_int_t)pc,
opnd_create_reg(DR_REG_X30), ilist, instr, NULL, NULL);
if (xt == dr_reg_stolen) {
PRE(ilist, instr, /* ldr x0, [x28, #32] */
instr_create_restore_from_tls(dcontext, DR_REG_X0, TLS_REG_STOLEN_SLOT));
}
PRE(ilist, instr, /* stp xT, x30, [x28, #16] */
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(dr_reg_stolen,
DR_REG_NULL, 0, 16, OPSZ_16),
opnd_create_reg(xt == dr_reg_stolen ? DR_REG_X0 : xt),
opnd_create_reg(DR_REG_X30)));
insert_mov_immed_arch(dcontext, NULL, NULL, (ptr_int_t)icache_op_ic_ivau_asm,
opnd_create_reg(DR_REG_X30), ilist, instr, NULL, NULL);
PRE(ilist, instr, /* mov x0, x28 */
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_X0),
opnd_create_reg(dr_reg_stolen)));
PRE(ilist, instr, /* blr x30 */
INSTR_CREATE_blr(dcontext, opnd_create_reg(DR_REG_X30)));
PRE(ilist, instr, /* ldp x0, x30, [x28] */
INSTR_CREATE_ldp(dcontext,
opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X30),
opnd_create_base_disp(dr_reg_stolen,
DR_REG_NULL, 0, 0, OPSZ_16)));
/* Remove original instruction. */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
} else if (opc == OP_isb) {
instr_t *label = INSTR_CREATE_label(dcontext);
instr = next_instr;
/* isb is followed by: */
PRE(ilist, instr, /* str x0, [x28] */
instr_create_save_to_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
insert_mov_immed_arch(dcontext, NULL, NULL, (ptr_int_t)&icache_op_struct.flag,
opnd_create_reg(DR_REG_X0), ilist, instr, NULL, NULL);
PRE(ilist, instr, /* ldr w0, [x0] */
XINST_CREATE_load(dcontext, opnd_create_reg(DR_REG_W0),
opnd_create_base_disp(DR_REG_X0, DR_REG_NULL,
0, 0, OPSZ_4)));
PRE(ilist, instr, /* cbz ... */
INSTR_CREATE_cbz(dcontext, opnd_create_instr(label),
opnd_create_reg(DR_REG_W0)));
PRE(ilist, instr, /* stp x1, x2, [x28, #8] */
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(dr_reg_stolen,
DR_REG_NULL, 0, 8, OPSZ_16),
opnd_create_reg(DR_REG_X1), opnd_create_reg(DR_REG_X2)));
insert_mov_immed_arch(dcontext, NULL, NULL, (ptr_int_t)icache_op_isb_asm,
opnd_create_reg(DR_REG_X2), ilist, instr, NULL, NULL);
insert_mov_immed_arch(dcontext, NULL, NULL, (ptr_int_t)pc,
opnd_create_reg(DR_REG_X1), ilist, instr, NULL, NULL);
PRE(ilist, instr, /* mov x0, x28 */
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_X0),
opnd_create_reg(dr_reg_stolen)));
PRE(ilist, instr, /* br x2 */
INSTR_CREATE_br(dcontext, opnd_create_reg(DR_REG_X2)));
PRE(ilist, instr, label);
PRE(ilist, instr, /* ldr x0, [x28] */
instr_create_restore_from_tls(dcontext, DR_REG_X0, TLS_REG0_SLOT));
/* Leave original instruction. */
} else
ASSERT_NOT_REACHED();
return next_instr;
}
# endif
/* END OF CONTROL-FLOW MANGLING ROUTINES
*###########################################################################
*###########################################################################
*/
#endif /* !STANDALONE_DECODER */
/***************************************************************************/
| 1 | 12,783 | I'm not sure where your 31 comes from? I see 15 here and 2 above for pc and flags, for 17 total beyond the SIMD. | DynamoRIO-dynamorio | c |
@@ -46,4 +46,10 @@ public class Info extends AbstractApiBean {
public Response getServer() {
return response( req -> ok(systemConfig.getDataverseServer()));
}
+
+ @GET
+ @Path("apiTermsOfUse")
+ public Response getTermsOfUse() {
+ return response( req -> ok(systemConfig.getApiTermsOfUse()));
+ }
} | 1 | package edu.harvard.iq.dataverse.api;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.SystemConfig;
import javax.ejb.EJB;
import javax.json.Json;
import javax.json.JsonValue;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Response;
@Path("info")
public class Info extends AbstractApiBean {
@EJB
SettingsServiceBean settingsService;
@EJB
SystemConfig systemConfig;
@GET
@Path("settings/:DatasetPublishPopupCustomText")
public Response getDatasetPublishPopupCustomText() {
String setting = settingsService.getValueForKey(SettingsServiceBean.Key.DatasetPublishPopupCustomText);
if (setting != null) {
return ok(Json.createObjectBuilder().add("message", setting));
} else {
return notFound("Setting " + SettingsServiceBean.Key.DatasetPublishPopupCustomText + " not found");
}
}
@GET
@Path("version")
public Response getInfo() {
String versionStr = systemConfig.getVersion(true);
String[] comps = versionStr.split("build",2);
String version = comps[0].trim();
JsonValue build = comps.length > 1 ? Json.createArrayBuilder().add(comps[1].trim()).build().get(0) : JsonValue.NULL;
return allowCors(response( req -> ok( Json.createObjectBuilder().add("version", version)
.add("build", build))));
}
@GET
@Path("server")
public Response getServer() {
return response( req -> ok(systemConfig.getDataverseServer()));
}
}
| 1 | 37,493 | If you want, you could change this to `allowCors(response` (like in "version", above) to allow Cross-Origin Resource Sharing. I'm sort of wondering what's returned if no API terms of use are set. | IQSS-dataverse | java |
@@ -1616,4 +1616,9 @@ public class MessageList extends K9Activity implements MessageListFragmentListen
}
}
}
+
+ @Override
+ public void onConfigurationChanged(Configuration newConfig) {
+ super.onConfigurationChanged(newConfig);
+ }
} | 1 | package com.fsck.k9.activity;
import java.util.Collection;
import java.util.List;
import android.annotation.SuppressLint;
import android.app.ActionBar;
import android.app.FragmentManager;
import android.app.FragmentManager.OnBackStackChangedListener;
import android.app.FragmentTransaction;
import android.app.SearchManager;
import android.content.Context;
import android.content.Intent;
import android.content.IntentSender;
import android.content.IntentSender.SendIntentException;
import android.content.res.Configuration;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.os.Parcelable;
import timber.log.Timber;
import android.view.KeyEvent;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import android.view.animation.AnimationUtils;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import com.fsck.k9.Account;
import com.fsck.k9.Account.SortType;
import com.fsck.k9.K9;
import com.fsck.k9.K9.SplitViewMode;
import com.fsck.k9.Preferences;
import com.fsck.k9.R;
import com.fsck.k9.activity.compose.MessageActions;
import com.fsck.k9.activity.misc.SwipeGestureDetector.OnSwipeGestureListener;
import com.fsck.k9.activity.setup.AccountSettings;
import com.fsck.k9.activity.setup.FolderSettings;
import com.fsck.k9.activity.setup.Prefs;
import com.fsck.k9.fragment.MessageListFragment;
import com.fsck.k9.fragment.MessageListFragment.MessageListFragmentListener;
import com.fsck.k9.helper.ParcelableUtil;
import com.fsck.k9.mailstore.StorageManager;
import com.fsck.k9.preferences.StorageEditor;
import com.fsck.k9.search.LocalSearch;
import com.fsck.k9.search.SearchAccount;
import com.fsck.k9.search.SearchSpecification;
import com.fsck.k9.search.SearchSpecification.Attribute;
import com.fsck.k9.search.SearchSpecification.SearchCondition;
import com.fsck.k9.search.SearchSpecification.SearchField;
import com.fsck.k9.ui.messageview.MessageViewFragment;
import com.fsck.k9.ui.messageview.MessageViewFragment.MessageViewFragmentListener;
import com.fsck.k9.view.MessageHeader;
import com.fsck.k9.view.MessageTitleView;
import com.fsck.k9.view.ViewSwitcher;
import com.fsck.k9.view.ViewSwitcher.OnSwitchCompleteListener;
import de.cketti.library.changelog.ChangeLog;
/**
* MessageList is the primary user interface for the program. This Activity
* shows a list of messages.
* From this Activity the user can perform all standard message operations.
*/
public class MessageList extends K9Activity implements MessageListFragmentListener,
MessageViewFragmentListener, OnBackStackChangedListener, OnSwipeGestureListener,
OnSwitchCompleteListener {
@Deprecated
//TODO: Remove after 2017-09-11
private static final String EXTRA_SEARCH_OLD = "search";
private static final String EXTRA_SEARCH = "search_bytes";
private static final String EXTRA_NO_THREADING = "no_threading";
private static final String ACTION_SHORTCUT = "shortcut";
private static final String EXTRA_SPECIAL_FOLDER = "special_folder";
private static final String EXTRA_MESSAGE_REFERENCE = "message_reference";
// used for remote search
public static final String EXTRA_SEARCH_ACCOUNT = "com.fsck.k9.search_account";
private static final String EXTRA_SEARCH_FOLDER = "com.fsck.k9.search_folder";
private static final String STATE_DISPLAY_MODE = "displayMode";
private static final String STATE_MESSAGE_LIST_WAS_DISPLAYED = "messageListWasDisplayed";
private static final String STATE_FIRST_BACK_STACK_ID = "firstBackstackId";
// Used for navigating to next/previous message
private static final int PREVIOUS = 1;
private static final int NEXT = 2;
public static final int REQUEST_MASK_PENDING_INTENT = 1 << 16;
public static void actionDisplaySearch(Context context, SearchSpecification search,
boolean noThreading, boolean newTask) {
actionDisplaySearch(context, search, noThreading, newTask, true);
}
public static void actionDisplaySearch(Context context, SearchSpecification search,
boolean noThreading, boolean newTask, boolean clearTop) {
context.startActivity(
intentDisplaySearch(context, search, noThreading, newTask, clearTop));
}
public static Intent intentDisplaySearch(Context context, SearchSpecification search,
boolean noThreading, boolean newTask, boolean clearTop) {
Intent intent = new Intent(context, MessageList.class);
intent.putExtra(EXTRA_SEARCH, ParcelableUtil.marshall(search));
intent.putExtra(EXTRA_NO_THREADING, noThreading);
if (clearTop) {
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
}
if (newTask) {
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
return intent;
}
public static Intent shortcutIntent(Context context, String specialFolder) {
Intent intent = new Intent(context, MessageList.class);
intent.setAction(ACTION_SHORTCUT);
intent.putExtra(EXTRA_SPECIAL_FOLDER, specialFolder);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
return intent;
}
public static Intent actionDisplayMessageIntent(Context context,
MessageReference messageReference) {
Intent intent = new Intent(context, MessageList.class);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
intent.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference.toIdentityString());
return intent;
}
private enum DisplayMode {
MESSAGE_LIST,
MESSAGE_VIEW,
SPLIT_VIEW
}
private StorageManager.StorageListener mStorageListener = new StorageListenerImplementation();
private ActionBar mActionBar;
private View mActionBarMessageList;
private View mActionBarMessageView;
private MessageTitleView mActionBarSubject;
private TextView mActionBarTitle;
private TextView mActionBarSubTitle;
private TextView mActionBarUnread;
private Menu mMenu;
private ViewGroup mMessageViewContainer;
private View mMessageViewPlaceHolder;
private MessageListFragment mMessageListFragment;
private MessageViewFragment mMessageViewFragment;
private int mFirstBackStackId = -1;
private Account mAccount;
private String mFolderName;
private LocalSearch mSearch;
private boolean mSingleFolderMode;
private boolean mSingleAccountMode;
private ProgressBar mActionBarProgress;
private MenuItem mMenuButtonCheckMail;
private View mActionButtonIndeterminateProgress;
private int mLastDirection = (K9.messageViewShowNext()) ? NEXT : PREVIOUS;
/**
* {@code true} if the message list should be displayed as flat list (i.e. no threading)
* regardless whether or not message threading was enabled in the settings. This is used for
* filtered views, e.g. when only displaying the unread messages in a folder.
*/
private boolean mNoThreading;
private DisplayMode mDisplayMode;
private MessageReference mMessageReference;
/**
* {@code true} when the message list was displayed once. This is used in
* {@link #onBackPressed()} to decide whether to go from the message view to the message list or
* finish the activity.
*/
private boolean mMessageListWasDisplayed = false;
private ViewSwitcher mViewSwitcher;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (UpgradeDatabases.actionUpgradeDatabases(this, getIntent())) {
finish();
return;
}
if (useSplitView()) {
setContentView(R.layout.split_message_list);
} else {
setContentView(R.layout.message_list);
mViewSwitcher = (ViewSwitcher) findViewById(R.id.container);
mViewSwitcher.setFirstInAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_in_left));
mViewSwitcher.setFirstOutAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_out_right));
mViewSwitcher.setSecondInAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_in_right));
mViewSwitcher.setSecondOutAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_out_left));
mViewSwitcher.setOnSwitchCompleteListener(this);
}
initializeActionBar();
// Enable gesture detection for MessageLists
setupGestureDetector(this);
if (!decodeExtras(getIntent())) {
return;
}
findFragments();
initializeDisplayMode(savedInstanceState);
initializeLayout();
initializeFragments();
displayViews();
ChangeLog cl = new ChangeLog(this);
if (cl.isFirstRun()) {
cl.getLogDialog().show();
}
}
@Override
public void onNewIntent(Intent intent) {
super.onNewIntent(intent);
if (isFinishing()) {
return;
}
setIntent(intent);
if (mFirstBackStackId >= 0) {
getFragmentManager().popBackStackImmediate(mFirstBackStackId,
FragmentManager.POP_BACK_STACK_INCLUSIVE);
mFirstBackStackId = -1;
}
removeMessageListFragment();
removeMessageViewFragment();
mMessageReference = null;
mSearch = null;
mFolderName = null;
if (!decodeExtras(intent)) {
return;
}
initializeDisplayMode(null);
initializeFragments();
displayViews();
}
/**
* Get references to existing fragments if the activity was restarted.
*/
private void findFragments() {
FragmentManager fragmentManager = getFragmentManager();
mMessageListFragment = (MessageListFragment) fragmentManager.findFragmentById(
R.id.message_list_container);
mMessageViewFragment = (MessageViewFragment) fragmentManager.findFragmentById(
R.id.message_view_container);
}
/**
* Create fragment instances if necessary.
*
* @see #findFragments()
*/
private void initializeFragments() {
FragmentManager fragmentManager = getFragmentManager();
fragmentManager.addOnBackStackChangedListener(this);
boolean hasMessageListFragment = (mMessageListFragment != null);
if (!hasMessageListFragment) {
FragmentTransaction ft = fragmentManager.beginTransaction();
mMessageListFragment = MessageListFragment.newInstance(mSearch, false,
(K9.isThreadedViewEnabled() && !mNoThreading));
ft.add(R.id.message_list_container, mMessageListFragment);
ft.commit();
}
// Check if the fragment wasn't restarted and has a MessageReference in the arguments. If
// so, open the referenced message.
if (!hasMessageListFragment && mMessageViewFragment == null &&
mMessageReference != null) {
openMessage(mMessageReference);
}
}
/**
* Set the initial display mode (message list, message view, or split view).
*
* <p><strong>Note:</strong>
* This method has to be called after {@link #findFragments()} because the result depends on
* the availability of a {@link MessageViewFragment} instance.
* </p>
*
* @param savedInstanceState
* The saved instance state that was passed to the activity as argument to
* {@link #onCreate(Bundle)}. May be {@code null}.
*/
private void initializeDisplayMode(Bundle savedInstanceState) {
if (useSplitView()) {
mDisplayMode = DisplayMode.SPLIT_VIEW;
return;
}
if (savedInstanceState != null) {
DisplayMode savedDisplayMode =
(DisplayMode) savedInstanceState.getSerializable(STATE_DISPLAY_MODE);
if (savedDisplayMode != DisplayMode.SPLIT_VIEW) {
mDisplayMode = savedDisplayMode;
return;
}
}
if (mMessageViewFragment != null || mMessageReference != null) {
mDisplayMode = DisplayMode.MESSAGE_VIEW;
} else {
mDisplayMode = DisplayMode.MESSAGE_LIST;
}
}
private boolean useSplitView() {
SplitViewMode splitViewMode = K9.getSplitViewMode();
int orientation = getResources().getConfiguration().orientation;
return (splitViewMode == SplitViewMode.ALWAYS ||
(splitViewMode == SplitViewMode.WHEN_IN_LANDSCAPE &&
orientation == Configuration.ORIENTATION_LANDSCAPE));
}
private void initializeLayout() {
mMessageViewContainer = (ViewGroup) findViewById(R.id.message_view_container);
LayoutInflater layoutInflater = getLayoutInflater();
mMessageViewPlaceHolder = layoutInflater.inflate(R.layout.empty_message_view, mMessageViewContainer, false);
}
private void displayViews() {
switch (mDisplayMode) {
case MESSAGE_LIST: {
showMessageList();
break;
}
case MESSAGE_VIEW: {
showMessageView();
break;
}
case SPLIT_VIEW: {
mMessageListWasDisplayed = true;
if (mMessageViewFragment == null) {
showMessageViewPlaceHolder();
} else {
MessageReference activeMessage = mMessageViewFragment.getMessageReference();
if (activeMessage != null) {
mMessageListFragment.setActiveMessage(activeMessage);
}
}
break;
}
}
}
private boolean decodeExtras(Intent intent) {
String action = intent.getAction();
if (Intent.ACTION_VIEW.equals(action) && intent.getData() != null) {
Uri uri = intent.getData();
List<String> segmentList = uri.getPathSegments();
String accountId = segmentList.get(0);
Collection<Account> accounts = Preferences.getPreferences(this).getAvailableAccounts();
for (Account account : accounts) {
if (String.valueOf(account.getAccountNumber()).equals(accountId)) {
String folderName = segmentList.get(1);
String messageUid = segmentList.get(2);
mMessageReference = new MessageReference(account.getUuid(), folderName, messageUid, null);
break;
}
}
} else if (ACTION_SHORTCUT.equals(action)) {
// Handle shortcut intents
String specialFolder = intent.getStringExtra(EXTRA_SPECIAL_FOLDER);
if (SearchAccount.UNIFIED_INBOX.equals(specialFolder)) {
mSearch = SearchAccount.createUnifiedInboxAccount(this).getRelatedSearch();
} else if (SearchAccount.ALL_MESSAGES.equals(specialFolder)) {
mSearch = SearchAccount.createAllMessagesAccount(this).getRelatedSearch();
}
} else if (intent.getStringExtra(SearchManager.QUERY) != null) {
// check if this intent comes from the system search ( remote )
if (Intent.ACTION_SEARCH.equals(intent.getAction())) {
//Query was received from Search Dialog
String query = intent.getStringExtra(SearchManager.QUERY).trim();
mSearch = new LocalSearch(getString(R.string.search_results));
mSearch.setManualSearch(true);
mNoThreading = true;
mSearch.or(new SearchCondition(SearchField.SENDER, Attribute.CONTAINS, query));
mSearch.or(new SearchCondition(SearchField.SUBJECT, Attribute.CONTAINS, query));
mSearch.or(new SearchCondition(SearchField.MESSAGE_CONTENTS, Attribute.CONTAINS, query));
Bundle appData = intent.getBundleExtra(SearchManager.APP_DATA);
if (appData != null) {
mSearch.addAccountUuid(appData.getString(EXTRA_SEARCH_ACCOUNT));
// searches started from a folder list activity will provide an account, but no folder
if (appData.getString(EXTRA_SEARCH_FOLDER) != null) {
mSearch.addAllowedFolder(appData.getString(EXTRA_SEARCH_FOLDER));
}
} else {
mSearch.addAccountUuid(LocalSearch.ALL_ACCOUNTS);
}
}
} else if (intent.hasExtra(EXTRA_SEARCH_OLD)) {
mSearch = intent.getParcelableExtra(EXTRA_SEARCH_OLD);
mNoThreading = intent.getBooleanExtra(EXTRA_NO_THREADING, false);
} else {
// regular LocalSearch object was passed
mSearch = intent.hasExtra(EXTRA_SEARCH) ?
ParcelableUtil.unmarshall(intent.getByteArrayExtra(EXTRA_SEARCH), LocalSearch.CREATOR) : null;
mNoThreading = intent.getBooleanExtra(EXTRA_NO_THREADING, false);
}
if (mMessageReference == null) {
String messageReferenceString = intent.getStringExtra(EXTRA_MESSAGE_REFERENCE);
mMessageReference = MessageReference.parse(messageReferenceString);
}
if (mMessageReference != null) {
mSearch = new LocalSearch();
mSearch.addAccountUuid(mMessageReference.getAccountUuid());
mSearch.addAllowedFolder(mMessageReference.getFolderName());
}
if (mSearch == null) {
// We've most likely been started by an old unread widget
String accountUuid = intent.getStringExtra("account");
String folderName = intent.getStringExtra("folder");
mSearch = new LocalSearch(folderName);
mSearch.addAccountUuid((accountUuid == null) ? "invalid" : accountUuid);
if (folderName != null) {
mSearch.addAllowedFolder(folderName);
}
}
Preferences prefs = Preferences.getPreferences(getApplicationContext());
String[] accountUuids = mSearch.getAccountUuids();
if (mSearch.searchAllAccounts()) {
List<Account> accounts = prefs.getAccounts();
mSingleAccountMode = (accounts.size() == 1);
if (mSingleAccountMode) {
mAccount = accounts.get(0);
}
} else {
mSingleAccountMode = (accountUuids.length == 1);
if (mSingleAccountMode) {
mAccount = prefs.getAccount(accountUuids[0]);
}
}
mSingleFolderMode = mSingleAccountMode && (mSearch.getFolderNames().size() == 1);
if (mSingleAccountMode && (mAccount == null || !mAccount.isAvailable(this))) {
Timber.i("not opening MessageList of unavailable account");
onAccountUnavailable();
return false;
}
if (mSingleFolderMode) {
mFolderName = mSearch.getFolderNames().get(0);
}
// now we know if we are in single account mode and need a subtitle
mActionBarSubTitle.setVisibility((!mSingleFolderMode) ? View.GONE : View.VISIBLE);
return true;
}
@Override
public void onPause() {
super.onPause();
StorageManager.getInstance(getApplication()).removeListener(mStorageListener);
}
@Override
public void onResume() {
super.onResume();
if (!(this instanceof Search)) {
//necessary b/c no guarantee Search.onStop will be called before MessageList.onResume
//when returning from search results
Search.setActive(false);
}
if (mAccount != null && !mAccount.isAvailable(this)) {
onAccountUnavailable();
return;
}
StorageManager.getInstance(getApplication()).addListener(mStorageListener);
}
@Override
public void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putSerializable(STATE_DISPLAY_MODE, mDisplayMode);
outState.putBoolean(STATE_MESSAGE_LIST_WAS_DISPLAYED, mMessageListWasDisplayed);
outState.putInt(STATE_FIRST_BACK_STACK_ID, mFirstBackStackId);
}
@Override
public void onRestoreInstanceState(Bundle savedInstanceState) {
mMessageListWasDisplayed = savedInstanceState.getBoolean(STATE_MESSAGE_LIST_WAS_DISPLAYED);
mFirstBackStackId = savedInstanceState.getInt(STATE_FIRST_BACK_STACK_ID);
}
private void initializeActionBar() {
mActionBar = getActionBar();
mActionBar.setDisplayShowCustomEnabled(true);
mActionBar.setCustomView(R.layout.actionbar_custom);
View customView = mActionBar.getCustomView();
mActionBarMessageList = customView.findViewById(R.id.actionbar_message_list);
mActionBarMessageView = customView.findViewById(R.id.actionbar_message_view);
mActionBarSubject = (MessageTitleView) customView.findViewById(R.id.message_title_view);
mActionBarTitle = (TextView) customView.findViewById(R.id.actionbar_title_first);
mActionBarSubTitle = (TextView) customView.findViewById(R.id.actionbar_title_sub);
mActionBarUnread = (TextView) customView.findViewById(R.id.actionbar_unread_count);
mActionBarProgress = (ProgressBar) customView.findViewById(R.id.actionbar_progress);
mActionButtonIndeterminateProgress = getActionButtonIndeterminateProgress();
mActionBar.setDisplayHomeAsUpEnabled(true);
}
@SuppressLint("InflateParams")
private View getActionButtonIndeterminateProgress() {
return getLayoutInflater().inflate(R.layout.actionbar_indeterminate_progress_actionview, null);
}
@Override
public boolean dispatchKeyEvent(KeyEvent event) {
boolean ret = false;
if (KeyEvent.ACTION_DOWN == event.getAction()) {
ret = onCustomKeyDown(event.getKeyCode(), event);
}
if (!ret) {
ret = super.dispatchKeyEvent(event);
}
return ret;
}
@Override
public void onBackPressed() {
if (mDisplayMode == DisplayMode.MESSAGE_VIEW && mMessageListWasDisplayed) {
showMessageList();
} else {
super.onBackPressed();
}
}
/**
* Handle hotkeys
*
* <p>
* This method is called by {@link #dispatchKeyEvent(KeyEvent)} before any view had the chance
* to consume this key event.
* </p>
*
* @param keyCode
* The value in {@code event.getKeyCode()}.
* @param event
* Description of the key event.
*
* @return {@code true} if this event was consumed.
*/
public boolean onCustomKeyDown(final int keyCode, final KeyEvent event) {
switch (keyCode) {
case KeyEvent.KEYCODE_VOLUME_UP: {
if (mMessageViewFragment != null && mDisplayMode != DisplayMode.MESSAGE_LIST &&
K9.useVolumeKeysForNavigationEnabled()) {
showPreviousMessage();
return true;
} else if (mDisplayMode != DisplayMode.MESSAGE_VIEW &&
K9.useVolumeKeysForListNavigationEnabled()) {
mMessageListFragment.onMoveUp();
return true;
}
break;
}
case KeyEvent.KEYCODE_VOLUME_DOWN: {
if (mMessageViewFragment != null && mDisplayMode != DisplayMode.MESSAGE_LIST &&
K9.useVolumeKeysForNavigationEnabled()) {
showNextMessage();
return true;
} else if (mDisplayMode != DisplayMode.MESSAGE_VIEW &&
K9.useVolumeKeysForListNavigationEnabled()) {
mMessageListFragment.onMoveDown();
return true;
}
break;
}
case KeyEvent.KEYCODE_C: {
mMessageListFragment.onCompose();
return true;
}
case KeyEvent.KEYCODE_Q: {
if (mMessageListFragment != null && mMessageListFragment.isSingleAccountMode()) {
onShowFolderList();
}
return true;
}
case KeyEvent.KEYCODE_O: {
mMessageListFragment.onCycleSort();
return true;
}
case KeyEvent.KEYCODE_I: {
mMessageListFragment.onReverseSort();
return true;
}
case KeyEvent.KEYCODE_DEL:
case KeyEvent.KEYCODE_D: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onDelete();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onDelete();
}
return true;
}
case KeyEvent.KEYCODE_S: {
mMessageListFragment.toggleMessageSelect();
return true;
}
case KeyEvent.KEYCODE_G: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onToggleFlagged();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onToggleFlagged();
}
return true;
}
case KeyEvent.KEYCODE_M: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onMove();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onMove();
}
return true;
}
case KeyEvent.KEYCODE_V: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onArchive();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onArchive();
}
return true;
}
case KeyEvent.KEYCODE_Y: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onCopy();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onCopy();
}
return true;
}
case KeyEvent.KEYCODE_Z: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onToggleRead();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onToggleRead();
}
return true;
}
case KeyEvent.KEYCODE_F: {
if (mMessageViewFragment != null) {
mMessageViewFragment.onForward();
}
return true;
}
case KeyEvent.KEYCODE_A: {
if (mMessageViewFragment != null) {
mMessageViewFragment.onReplyAll();
}
return true;
}
case KeyEvent.KEYCODE_R: {
if (mMessageViewFragment != null) {
mMessageViewFragment.onReply();
}
return true;
}
case KeyEvent.KEYCODE_J:
case KeyEvent.KEYCODE_P: {
if (mMessageViewFragment != null) {
showPreviousMessage();
}
return true;
}
case KeyEvent.KEYCODE_N:
case KeyEvent.KEYCODE_K: {
if (mMessageViewFragment != null) {
showNextMessage();
}
return true;
}
/* FIXME
case KeyEvent.KEYCODE_Z: {
mMessageViewFragment.zoom(event);
return true;
}*/
case KeyEvent.KEYCODE_H: {
Toast toast = Toast.makeText(this, R.string.message_list_help_key, Toast.LENGTH_LONG);
toast.show();
return true;
}
case KeyEvent.KEYCODE_DPAD_LEFT: {
if (mMessageViewFragment != null && mDisplayMode == DisplayMode.MESSAGE_VIEW) {
return showPreviousMessage();
}
return false;
}
case KeyEvent.KEYCODE_DPAD_RIGHT: {
if (mMessageViewFragment != null && mDisplayMode == DisplayMode.MESSAGE_VIEW) {
return showNextMessage();
}
return false;
}
}
return false;
}
@Override
public boolean onKeyUp(int keyCode, KeyEvent event) {
// Swallow these events too to avoid the audible notification of a volume change
if (K9.useVolumeKeysForListNavigationEnabled()) {
if ((keyCode == KeyEvent.KEYCODE_VOLUME_UP) || (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN)) {
Timber.v("Swallowed key up.");
return true;
}
}
return super.onKeyUp(keyCode, event);
}
private void onAccounts() {
Accounts.listAccounts(this);
finish();
}
private void onShowFolderList() {
FolderList.actionHandleAccount(this, mAccount);
finish();
}
private void onEditPrefs() {
Prefs.actionPrefs(this);
}
private void onEditAccount() {
AccountSettings.actionSettings(this, mAccount);
}
@Override
public boolean onSearchRequested() {
return mMessageListFragment.onSearchRequested();
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int itemId = item.getItemId();
switch (itemId) {
case android.R.id.home: {
goBack();
return true;
}
case R.id.compose: {
mMessageListFragment.onCompose();
return true;
}
case R.id.toggle_message_view_theme: {
onToggleTheme();
return true;
}
// MessageList
case R.id.check_mail: {
mMessageListFragment.checkMail();
return true;
}
case R.id.set_sort_date: {
mMessageListFragment.changeSort(SortType.SORT_DATE);
return true;
}
case R.id.set_sort_arrival: {
mMessageListFragment.changeSort(SortType.SORT_ARRIVAL);
return true;
}
case R.id.set_sort_subject: {
mMessageListFragment.changeSort(SortType.SORT_SUBJECT);
return true;
}
case R.id.set_sort_sender: {
mMessageListFragment.changeSort(SortType.SORT_SENDER);
return true;
}
case R.id.set_sort_flag: {
mMessageListFragment.changeSort(SortType.SORT_FLAGGED);
return true;
}
case R.id.set_sort_unread: {
mMessageListFragment.changeSort(SortType.SORT_UNREAD);
return true;
}
case R.id.set_sort_attach: {
mMessageListFragment.changeSort(SortType.SORT_ATTACHMENT);
return true;
}
case R.id.select_all: {
mMessageListFragment.selectAll();
return true;
}
case R.id.app_settings: {
onEditPrefs();
return true;
}
case R.id.account_settings: {
onEditAccount();
return true;
}
case R.id.search: {
mMessageListFragment.onSearchRequested();
return true;
}
case R.id.search_remote: {
mMessageListFragment.onRemoteSearch();
return true;
}
case R.id.mark_all_as_read: {
mMessageListFragment.confirmMarkAllAsRead();
return true;
}
case R.id.show_folder_list: {
onShowFolderList();
return true;
}
// MessageView
case R.id.next_message: {
showNextMessage();
return true;
}
case R.id.previous_message: {
showPreviousMessage();
return true;
}
case R.id.delete: {
mMessageViewFragment.onDelete();
return true;
}
case R.id.reply: {
mMessageViewFragment.onReply();
return true;
}
case R.id.reply_all: {
mMessageViewFragment.onReplyAll();
return true;
}
case R.id.forward: {
mMessageViewFragment.onForward();
return true;
}
case R.id.share: {
mMessageViewFragment.onSendAlternate();
return true;
}
case R.id.toggle_unread: {
mMessageViewFragment.onToggleRead();
return true;
}
case R.id.archive:
case R.id.refile_archive: {
mMessageViewFragment.onArchive();
return true;
}
case R.id.spam:
case R.id.refile_spam: {
mMessageViewFragment.onSpam();
return true;
}
case R.id.move:
case R.id.refile_move: {
mMessageViewFragment.onMove();
return true;
}
case R.id.copy:
case R.id.refile_copy: {
mMessageViewFragment.onCopy();
return true;
}
case R.id.select_text: {
mMessageViewFragment.onSelectText();
return true;
}
case R.id.show_headers:
case R.id.hide_headers: {
mMessageViewFragment.onToggleAllHeadersView();
updateMenu();
return true;
}
}
if (!mSingleFolderMode) {
// None of the options after this point are "safe" for search results
//TODO: This is not true for "unread" and "starred" searches in regular folders
return false;
}
switch (itemId) {
case R.id.send_messages: {
mMessageListFragment.onSendPendingMessages();
return true;
}
case R.id.folder_settings: {
if (mFolderName != null) {
FolderSettings.actionSettings(this, mAccount, mFolderName);
}
return true;
}
case R.id.expunge: {
mMessageListFragment.onExpunge();
return true;
}
default: {
return super.onOptionsItemSelected(item);
}
}
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.message_list_option, menu);
mMenu = menu;
mMenuButtonCheckMail= menu.findItem(R.id.check_mail);
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
super.onPrepareOptionsMenu(menu);
configureMenu(menu);
return true;
}
/**
* Hide menu items not appropriate for the current context.
*
* <p><strong>Note:</strong>
* Please adjust the comments in {@code res/menu/message_list_option.xml} if you change the
* visibility of a menu item in this method.
* </p>
*
* @param menu
* The {@link Menu} instance that should be modified. May be {@code null}; in that case
* the method does nothing and immediately returns.
*/
private void configureMenu(Menu menu) {
if (menu == null) {
return;
}
// Set visibility of account/folder settings menu items
if (mMessageListFragment == null) {
menu.findItem(R.id.account_settings).setVisible(false);
menu.findItem(R.id.folder_settings).setVisible(false);
} else {
menu.findItem(R.id.account_settings).setVisible(
mMessageListFragment.isSingleAccountMode());
menu.findItem(R.id.folder_settings).setVisible(
mMessageListFragment.isSingleFolderMode());
}
/*
* Set visibility of menu items related to the message view
*/
if (mDisplayMode == DisplayMode.MESSAGE_LIST
|| mMessageViewFragment == null
|| !mMessageViewFragment.isInitialized()) {
menu.findItem(R.id.next_message).setVisible(false);
menu.findItem(R.id.previous_message).setVisible(false);
menu.findItem(R.id.single_message_options).setVisible(false);
menu.findItem(R.id.delete).setVisible(false);
menu.findItem(R.id.compose).setVisible(false);
menu.findItem(R.id.archive).setVisible(false);
menu.findItem(R.id.move).setVisible(false);
menu.findItem(R.id.copy).setVisible(false);
menu.findItem(R.id.spam).setVisible(false);
menu.findItem(R.id.refile).setVisible(false);
menu.findItem(R.id.toggle_unread).setVisible(false);
menu.findItem(R.id.select_text).setVisible(false);
menu.findItem(R.id.toggle_message_view_theme).setVisible(false);
menu.findItem(R.id.show_headers).setVisible(false);
menu.findItem(R.id.hide_headers).setVisible(false);
} else {
// hide prev/next buttons in split mode
if (mDisplayMode != DisplayMode.MESSAGE_VIEW) {
menu.findItem(R.id.next_message).setVisible(false);
menu.findItem(R.id.previous_message).setVisible(false);
} else {
MessageReference ref = mMessageViewFragment.getMessageReference();
boolean initialized = (mMessageListFragment != null &&
mMessageListFragment.isLoadFinished());
boolean canDoPrev = (initialized && !mMessageListFragment.isFirst(ref));
boolean canDoNext = (initialized && !mMessageListFragment.isLast(ref));
MenuItem prev = menu.findItem(R.id.previous_message);
prev.setEnabled(canDoPrev);
prev.getIcon().setAlpha(canDoPrev ? 255 : 127);
MenuItem next = menu.findItem(R.id.next_message);
next.setEnabled(canDoNext);
next.getIcon().setAlpha(canDoNext ? 255 : 127);
}
MenuItem toggleTheme = menu.findItem(R.id.toggle_message_view_theme);
if (K9.useFixedMessageViewTheme()) {
toggleTheme.setVisible(false);
} else {
// Set title of menu item to switch to dark/light theme
if (K9.getK9MessageViewTheme() == K9.Theme.DARK) {
toggleTheme.setTitle(R.string.message_view_theme_action_light);
} else {
toggleTheme.setTitle(R.string.message_view_theme_action_dark);
}
toggleTheme.setVisible(true);
}
// Set title of menu item to toggle the read state of the currently displayed message
if (mMessageViewFragment.isMessageRead()) {
menu.findItem(R.id.toggle_unread).setTitle(R.string.mark_as_unread_action);
} else {
menu.findItem(R.id.toggle_unread).setTitle(R.string.mark_as_read_action);
}
// Jellybean has built-in long press selection support
menu.findItem(R.id.select_text).setVisible(Build.VERSION.SDK_INT < 16);
menu.findItem(R.id.delete).setVisible(K9.isMessageViewDeleteActionVisible());
/*
* Set visibility of copy, move, archive, spam in action bar and refile submenu
*/
if (mMessageViewFragment.isCopyCapable()) {
menu.findItem(R.id.copy).setVisible(K9.isMessageViewCopyActionVisible());
menu.findItem(R.id.refile_copy).setVisible(true);
} else {
menu.findItem(R.id.copy).setVisible(false);
menu.findItem(R.id.refile_copy).setVisible(false);
}
if (mMessageViewFragment.isMoveCapable()) {
boolean canMessageBeArchived = mMessageViewFragment.canMessageBeArchived();
boolean canMessageBeMovedToSpam = mMessageViewFragment.canMessageBeMovedToSpam();
menu.findItem(R.id.move).setVisible(K9.isMessageViewMoveActionVisible());
menu.findItem(R.id.archive).setVisible(canMessageBeArchived &&
K9.isMessageViewArchiveActionVisible());
menu.findItem(R.id.spam).setVisible(canMessageBeMovedToSpam &&
K9.isMessageViewSpamActionVisible());
menu.findItem(R.id.refile_move).setVisible(true);
menu.findItem(R.id.refile_archive).setVisible(canMessageBeArchived);
menu.findItem(R.id.refile_spam).setVisible(canMessageBeMovedToSpam);
} else {
menu.findItem(R.id.move).setVisible(false);
menu.findItem(R.id.archive).setVisible(false);
menu.findItem(R.id.spam).setVisible(false);
menu.findItem(R.id.refile).setVisible(false);
}
if (mMessageViewFragment.allHeadersVisible()) {
menu.findItem(R.id.show_headers).setVisible(false);
} else {
menu.findItem(R.id.hide_headers).setVisible(false);
}
}
/*
* Set visibility of menu items related to the message list
*/
// Hide both search menu items by default and enable one when appropriate
menu.findItem(R.id.search).setVisible(false);
menu.findItem(R.id.search_remote).setVisible(false);
if (mDisplayMode == DisplayMode.MESSAGE_VIEW || mMessageListFragment == null ||
!mMessageListFragment.isInitialized()) {
menu.findItem(R.id.check_mail).setVisible(false);
menu.findItem(R.id.set_sort).setVisible(false);
menu.findItem(R.id.select_all).setVisible(false);
menu.findItem(R.id.send_messages).setVisible(false);
menu.findItem(R.id.expunge).setVisible(false);
menu.findItem(R.id.mark_all_as_read).setVisible(false);
menu.findItem(R.id.show_folder_list).setVisible(false);
} else {
menu.findItem(R.id.set_sort).setVisible(true);
menu.findItem(R.id.select_all).setVisible(true);
menu.findItem(R.id.compose).setVisible(true);
menu.findItem(R.id.mark_all_as_read).setVisible(
mMessageListFragment.isMarkAllAsReadSupported());
if (!mMessageListFragment.isSingleAccountMode()) {
menu.findItem(R.id.expunge).setVisible(false);
menu.findItem(R.id.send_messages).setVisible(false);
menu.findItem(R.id.show_folder_list).setVisible(false);
} else {
menu.findItem(R.id.send_messages).setVisible(mMessageListFragment.isOutbox());
menu.findItem(R.id.expunge).setVisible(mMessageListFragment.isRemoteFolder() &&
mMessageListFragment.isAccountExpungeCapable());
menu.findItem(R.id.show_folder_list).setVisible(true);
}
menu.findItem(R.id.check_mail).setVisible(mMessageListFragment.isCheckMailSupported());
// If this is an explicit local search, show the option to search on the server
if (!mMessageListFragment.isRemoteSearch() &&
mMessageListFragment.isRemoteSearchAllowed()) {
menu.findItem(R.id.search_remote).setVisible(true);
} else if (!mMessageListFragment.isManualSearch()) {
menu.findItem(R.id.search).setVisible(true);
}
}
}
protected void onAccountUnavailable() {
finish();
// TODO inform user about account unavailability using Toast
Accounts.listAccounts(this);
}
public void setActionBarTitle(String title) {
mActionBarTitle.setText(title);
}
public void setActionBarSubTitle(String subTitle) {
mActionBarSubTitle.setText(subTitle);
}
public void setActionBarUnread(int unread) {
if (unread == 0) {
mActionBarUnread.setVisibility(View.GONE);
} else {
mActionBarUnread.setVisibility(View.VISIBLE);
mActionBarUnread.setText(String.format("%d", unread));
}
}
@Override
public void setMessageListTitle(String title) {
setActionBarTitle(title);
}
@Override
public void setMessageListSubTitle(String subTitle) {
setActionBarSubTitle(subTitle);
}
@Override
public void setUnreadCount(int unread) {
setActionBarUnread(unread);
}
@Override
public void setMessageListProgress(int progress) {
setProgress(progress);
}
@Override
public void openMessage(MessageReference messageReference) {
Preferences prefs = Preferences.getPreferences(getApplicationContext());
Account account = prefs.getAccount(messageReference.getAccountUuid());
String folderName = messageReference.getFolderName();
if (folderName.equals(account.getDraftsFolderName())) {
MessageActions.actionEditDraft(this, messageReference);
} else {
mMessageViewContainer.removeView(mMessageViewPlaceHolder);
if (mMessageListFragment != null) {
mMessageListFragment.setActiveMessage(messageReference);
}
MessageViewFragment fragment = MessageViewFragment.newInstance(messageReference);
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.replace(R.id.message_view_container, fragment);
mMessageViewFragment = fragment;
ft.commit();
if (mDisplayMode != DisplayMode.SPLIT_VIEW) {
showMessageView();
}
}
}
@Override
public void onResendMessage(MessageReference messageReference) {
MessageActions.actionEditDraft(this, messageReference);
}
@Override
public void onForward(MessageReference messageReference) {
onForward(messageReference, null);
}
@Override
public void onForward(MessageReference messageReference, Parcelable decryptionResultForReply) {
MessageActions.actionForward(this, messageReference, decryptionResultForReply);
}
@Override
public void onReply(MessageReference messageReference) {
onReply(messageReference, null);
}
@Override
public void onReply(MessageReference messageReference, Parcelable decryptionResultForReply) {
MessageActions.actionReply(this, messageReference, false, decryptionResultForReply);
}
@Override
public void onReplyAll(MessageReference messageReference) {
onReplyAll(messageReference, null);
}
@Override
public void onReplyAll(MessageReference messageReference, Parcelable decryptionResultForReply) {
MessageActions.actionReply(this, messageReference, true, decryptionResultForReply);
}
@Override
public void onCompose(Account account) {
MessageActions.actionCompose(this, account);
}
@Override
public void showMoreFromSameSender(String senderAddress) {
LocalSearch tmpSearch = new LocalSearch("From " + senderAddress);
tmpSearch.addAccountUuids(mSearch.getAccountUuids());
tmpSearch.and(SearchField.SENDER, senderAddress, Attribute.CONTAINS);
MessageListFragment fragment = MessageListFragment.newInstance(tmpSearch, false, false);
addMessageListFragment(fragment, true);
}
@Override
public void onBackStackChanged() {
findFragments();
if (mDisplayMode == DisplayMode.SPLIT_VIEW) {
showMessageViewPlaceHolder();
}
configureMenu(mMenu);
}
@Override
public void onSwipeRightToLeft(MotionEvent e1, MotionEvent e2) {
if (mMessageListFragment != null && mDisplayMode != DisplayMode.MESSAGE_VIEW) {
mMessageListFragment.onSwipeRightToLeft(e1, e2);
}
}
@Override
public void onSwipeLeftToRight(MotionEvent e1, MotionEvent e2) {
if (mMessageListFragment != null && mDisplayMode != DisplayMode.MESSAGE_VIEW) {
mMessageListFragment.onSwipeLeftToRight(e1, e2);
}
}
private final class StorageListenerImplementation implements StorageManager.StorageListener {
@Override
public void onUnmount(String providerId) {
if (mAccount != null && providerId.equals(mAccount.getLocalStorageProviderId())) {
runOnUiThread(new Runnable() {
@Override
public void run() {
onAccountUnavailable();
}
});
}
}
@Override
public void onMount(String providerId) {
// no-op
}
}
private void addMessageListFragment(MessageListFragment fragment, boolean addToBackStack) {
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.replace(R.id.message_list_container, fragment);
if (addToBackStack)
ft.addToBackStack(null);
mMessageListFragment = fragment;
int transactionId = ft.commit();
if (transactionId >= 0 && mFirstBackStackId < 0) {
mFirstBackStackId = transactionId;
}
}
@Override
public boolean startSearch(Account account, String folderName) {
// If this search was started from a MessageList of a single folder, pass along that folder info
// so that we can enable remote search.
if (account != null && folderName != null) {
final Bundle appData = new Bundle();
appData.putString(EXTRA_SEARCH_ACCOUNT, account.getUuid());
appData.putString(EXTRA_SEARCH_FOLDER, folderName);
startSearch(null, false, appData, false);
} else {
// TODO Handle the case where we're searching from within a search result.
startSearch(null, false, null, false);
}
return true;
}
@Override
public void showThread(Account account, String folderName, long threadRootId) {
showMessageViewPlaceHolder();
LocalSearch tmpSearch = new LocalSearch();
tmpSearch.addAccountUuid(account.getUuid());
tmpSearch.and(SearchField.THREAD_ID, String.valueOf(threadRootId), Attribute.EQUALS);
MessageListFragment fragment = MessageListFragment.newInstance(tmpSearch, true, false);
addMessageListFragment(fragment, true);
}
private void showMessageViewPlaceHolder() {
removeMessageViewFragment();
// Add placeholder view if necessary
if (mMessageViewPlaceHolder.getParent() == null) {
mMessageViewContainer.addView(mMessageViewPlaceHolder);
}
mMessageListFragment.setActiveMessage(null);
}
/**
* Remove MessageViewFragment if necessary.
*/
private void removeMessageViewFragment() {
if (mMessageViewFragment != null) {
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.remove(mMessageViewFragment);
mMessageViewFragment = null;
ft.commit();
showDefaultTitleView();
}
}
private void removeMessageListFragment() {
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.remove(mMessageListFragment);
mMessageListFragment = null;
ft.commit();
}
@Override
public void remoteSearchStarted() {
// Remove action button for remote search
configureMenu(mMenu);
}
@Override
public void goBack() {
FragmentManager fragmentManager = getFragmentManager();
if (mDisplayMode == DisplayMode.MESSAGE_VIEW) {
showMessageList();
} else if (fragmentManager.getBackStackEntryCount() > 0) {
fragmentManager.popBackStack();
} else if (mMessageListFragment.isManualSearch()) {
finish();
} else if (!mSingleFolderMode) {
onAccounts();
} else {
onShowFolderList();
}
}
@Override
public void enableActionBarProgress(boolean enable) {
if (mMenuButtonCheckMail != null && mMenuButtonCheckMail.isVisible()) {
mActionBarProgress.setVisibility(ProgressBar.GONE);
if (enable) {
mMenuButtonCheckMail
.setActionView(mActionButtonIndeterminateProgress);
} else {
mMenuButtonCheckMail.setActionView(null);
}
} else {
if (mMenuButtonCheckMail != null)
mMenuButtonCheckMail.setActionView(null);
if (enable) {
mActionBarProgress.setVisibility(ProgressBar.VISIBLE);
} else {
mActionBarProgress.setVisibility(ProgressBar.GONE);
}
}
}
@Override
public void displayMessageSubject(String subject) {
if (mDisplayMode == DisplayMode.MESSAGE_VIEW) {
mActionBarSubject.setText(subject);
} else {
mActionBarSubject.showSubjectInMessageHeader();
}
}
@Override
public void showNextMessageOrReturn() {
if (K9.messageViewReturnToList() || !showLogicalNextMessage()) {
if (mDisplayMode == DisplayMode.SPLIT_VIEW) {
showMessageViewPlaceHolder();
} else {
showMessageList();
}
}
}
/**
* Shows the next message in the direction the user was displaying messages.
*
* @return {@code true}
*/
private boolean showLogicalNextMessage() {
boolean result = false;
if (mLastDirection == NEXT) {
result = showNextMessage();
} else if (mLastDirection == PREVIOUS) {
result = showPreviousMessage();
}
if (!result) {
result = showNextMessage() || showPreviousMessage();
}
return result;
}
@Override
public void setProgress(boolean enable) {
setProgressBarIndeterminateVisibility(enable);
}
@Override
public void messageHeaderViewAvailable(MessageHeader header) {
mActionBarSubject.setMessageHeader(header);
}
private boolean showNextMessage() {
MessageReference ref = mMessageViewFragment.getMessageReference();
if (ref != null) {
if (mMessageListFragment.openNext(ref)) {
mLastDirection = NEXT;
return true;
}
}
return false;
}
private boolean showPreviousMessage() {
MessageReference ref = mMessageViewFragment.getMessageReference();
if (ref != null) {
if (mMessageListFragment.openPrevious(ref)) {
mLastDirection = PREVIOUS;
return true;
}
}
return false;
}
private void showMessageList() {
mMessageListWasDisplayed = true;
mDisplayMode = DisplayMode.MESSAGE_LIST;
mViewSwitcher.showFirstView();
mMessageListFragment.setActiveMessage(null);
showDefaultTitleView();
configureMenu(mMenu);
}
private void showMessageView() {
mDisplayMode = DisplayMode.MESSAGE_VIEW;
if (!mMessageListWasDisplayed) {
mViewSwitcher.setAnimateFirstView(false);
}
mViewSwitcher.showSecondView();
showMessageTitleView();
configureMenu(mMenu);
}
@Override
public void updateMenu() {
invalidateOptionsMenu();
}
@Override
public void disableDeleteAction() {
mMenu.findItem(R.id.delete).setEnabled(false);
}
private void onToggleTheme() {
if (K9.getK9MessageViewTheme() == K9.Theme.DARK) {
K9.setK9MessageViewThemeSetting(K9.Theme.LIGHT);
} else {
K9.setK9MessageViewThemeSetting(K9.Theme.DARK);
}
new Thread(new Runnable() {
@Override
public void run() {
Context appContext = getApplicationContext();
Preferences prefs = Preferences.getPreferences(appContext);
StorageEditor editor = prefs.getStorage().edit();
K9.save(editor);
editor.commit();
}
}).start();
recreate();
}
private void showDefaultTitleView() {
mActionBarMessageView.setVisibility(View.GONE);
mActionBarMessageList.setVisibility(View.VISIBLE);
if (mMessageListFragment != null) {
mMessageListFragment.updateTitle();
}
mActionBarSubject.setMessageHeader(null);
}
private void showMessageTitleView() {
mActionBarMessageList.setVisibility(View.GONE);
mActionBarMessageView.setVisibility(View.VISIBLE);
if (mMessageViewFragment != null) {
displayMessageSubject(null);
mMessageViewFragment.updateTitle();
}
}
@Override
public void onSwitchComplete(int displayedChild) {
if (displayedChild == 0) {
removeMessageViewFragment();
}
}
@Override
public void startIntentSenderForResult(IntentSender intent, int requestCode, Intent fillInIntent,
int flagsMask, int flagsValues, int extraFlags) throws SendIntentException {
requestCode |= REQUEST_MASK_PENDING_INTENT;
super.startIntentSenderForResult(intent, requestCode, fillInIntent, flagsMask, flagsValues, extraFlags);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if ((requestCode & REQUEST_MASK_PENDING_INTENT) == REQUEST_MASK_PENDING_INTENT) {
requestCode ^= REQUEST_MASK_PENDING_INTENT;
if (mMessageViewFragment != null) {
mMessageViewFragment.onPendingIntentResult(requestCode, resultCode, data);
}
}
}
}
| 1 | 16,102 | Why do we need to override this if all we're doing is passing it up? | k9mail-k-9 | java |
@@ -356,6 +356,9 @@ void CmpSeabaseDDL::createSeabaseTableLike(
query += keyClause;
}
+ // send any user CQDs down
+ Lng32 retCode = sendAllControls(FALSE, FALSE, TRUE);
+
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: CmpSeabaseDDLtable.cpp
* Description: Implements ddl operations for Seabase tables.
*
*
* Created: 6/30/2013
* Language: C++
*
*
*****************************************************************************
*/
#include "CmpSeabaseDDLincludes.h"
#include "CmpSeabaseDDLauth.h"
#include "ElemDDLColDefault.h"
#include "NumericType.h"
#include "ComUser.h"
#include "keycolumns.h"
#include "ElemDDLColRef.h"
#include "ElemDDLColName.h"
#include "CmpDDLCatErrorCodes.h"
#include "Globals.h"
#include "CmpMain.h"
#include "Context.h"
#include "PrivMgrCommands.h"
#include "PrivMgrRoles.h"
#include "PrivMgrComponentPrivileges.h"
// defined in CmpDescribe.cpp
extern short CmpDescribeSeabaseTable (
const CorrName &dtName,
short type, // 1, invoke. 2, showddl. 3, createLike
char* &outbuf,
ULng32 &outbuflen,
CollHeap *heap,
const char * pkeyStr = NULL,
NABoolean withPartns = FALSE,
NABoolean withoutSalt = FALSE,
NABoolean withoutDivisioning = FALSE,
NABoolean noTrailingSemi = FALSE,
// used to add or remove column definition from col list.
// valid for 'createLike' mode. Used for 'alter add/drop col'.
char * colName = NULL,
NABoolean isAdd = FALSE,
const NAColumn * nacol = NULL);
static bool checkSpecifiedPrivs(
ElemDDLPrivActArray & privActsArray,
const char * externalObjectName,
ComObjectType objectType,
NATable * naTable,
std::vector<PrivType> & objectPrivs,
std::vector<ColPrivSpec> & colPrivs);
static bool ElmPrivToPrivType(
OperatorTypeEnum elmPriv,
PrivType & privType,
bool forRevoke = false);
static bool hasValue(
const std::vector<ColPrivSpec> & container,
PrivType value);
static bool hasValue(
const std::vector<PrivType> & container,
PrivType value);
static bool isMDGrantRevokeOK(
const std::vector<PrivType> & objectPrivs,
const std::vector<ColPrivSpec> & colPrivs,
bool isGrant);
static bool isValidPrivTypeForObject(
ComObjectType objectType,
PrivType privType);
void CmpSeabaseDDL::convertVirtTableColumnInfoToDescStruct(
const ComTdbVirtTableColumnInfo * colInfo,
const ComObjectName * objectName,
desc_struct * column_desc)
{
column_desc->body.columns_desc.tablename =
convertNAString(objectName->getExternalName(), STMTHEAP);
char * col_name = new(STMTHEAP) char[strlen(colInfo->colName) + 1];
strcpy(col_name, colInfo->colName);
column_desc->body.columns_desc.colname = col_name;
column_desc->body.columns_desc.colnumber = colInfo->colNumber;
column_desc->body.columns_desc.datatype = colInfo->datatype;
column_desc->body.columns_desc.length = colInfo->length;
if (!(DFS2REC::isInterval(colInfo->datatype)))
column_desc->body.columns_desc.scale = colInfo->scale;
else
column_desc->body.columns_desc.scale = 0;
column_desc->body.columns_desc.precision = colInfo->precision;
column_desc->body.columns_desc.datetimestart = (rec_datetime_field) colInfo->dtStart;
column_desc->body.columns_desc.datetimeend = (rec_datetime_field) colInfo->dtEnd;
if (DFS2REC::isDateTime(colInfo->datatype) || DFS2REC::isInterval(colInfo->datatype))
column_desc->body.columns_desc.datetimefractprec = colInfo->scale;
else
column_desc->body.columns_desc.datetimefractprec = 0;
if (DFS2REC::isInterval(colInfo->datatype))
column_desc->body.columns_desc.intervalleadingprec = colInfo->precision;
else
column_desc->body.columns_desc.intervalleadingprec = 0 ;
column_desc->body.columns_desc.null_flag = colInfo->nullable;
column_desc->body.columns_desc.upshift = colInfo->upshifted;
column_desc->body.columns_desc.character_set = (CharInfo::CharSet) colInfo->charset;
switch (colInfo->columnClass)
{
case COM_USER_COLUMN:
column_desc->body.columns_desc.colclass = 'U';
break;
case COM_SYSTEM_COLUMN:
column_desc->body.columns_desc.colclass = 'S';
break;
default:
CMPASSERT(0);
}
column_desc->body.columns_desc.defaultClass = colInfo->defaultClass;
column_desc->body.columns_desc.colFlags = colInfo->colFlags;
column_desc->body.columns_desc.pictureText =
(char *)STMTHEAP->allocateMemory(340);
NAType::convertTypeToText(column_desc->body.columns_desc.pictureText, //OUT
column_desc->body.columns_desc.datatype,
column_desc->body.columns_desc.length,
column_desc->body.columns_desc.precision,
column_desc->body.columns_desc.scale,
column_desc->body.columns_desc.datetimestart,
column_desc->body.columns_desc.datetimeend,
column_desc->body.columns_desc.datetimefractprec,
column_desc->body.columns_desc.intervalleadingprec,
column_desc->body.columns_desc.upshift,
column_desc->body.columns_desc.caseinsensitive,
(CharInfo::CharSet)column_desc->body.columns_desc.character_set,
(CharInfo::Collation) 1, // default collation
NULL, // displayDataType
0); // displayCaseSpecific
column_desc->body.columns_desc.offset = -1; // not present in colInfo
column_desc->body.columns_desc.caseinsensitive = (short)FALSE; // not present in colInfo
column_desc->body.columns_desc.encoding_charset = (CharInfo::CharSet) column_desc->body.columns_desc.character_set ; // not present in colInfo so we go with the column's charset here.
column_desc->body.columns_desc.collation_sequence = (CharInfo::Collation)1; // not present in colInfo, so we go with default collation here (used in buildEncodeTree for some error handling)
column_desc->body.columns_desc.uec = (Cardinality)0; // not present in colInfo
column_desc->body.columns_desc.highval = 0; // not present in colInfo
column_desc->body.columns_desc.lowval = 0; // not present in colInfo
column_desc->body.columns_desc.defaultvalue = NULL ; // not present in colInfo
column_desc->body.columns_desc.stored_on_disk = 0 ; // not present in colInfo
column_desc->body.columns_desc.computed_column_text = NULL; // not present in colInfo
}
desc_struct * CmpSeabaseDDL::convertVirtTableColumnInfoArrayToDescStructs(
const ComObjectName * objectName,
const ComTdbVirtTableColumnInfo * colInfoArray,
Lng32 numCols)
{
desc_struct * prev_column_desc = NULL;
desc_struct * first_column_desc = NULL;
for (Int32 i = 0; i < numCols; i++)
{
const ComTdbVirtTableColumnInfo* colInfo = &(colInfoArray[i]);
// readtabledef_allocate_desc() requires that HEAP (STMTHEAP)
// be used for operator new herein
desc_struct * column_desc = readtabledef_allocate_desc(DESC_COLUMNS_TYPE);
if (prev_column_desc != NULL)
prev_column_desc->header.next = column_desc;
else
first_column_desc = column_desc;
prev_column_desc = column_desc;
convertVirtTableColumnInfoToDescStruct(colInfo, objectName, column_desc);
}
return first_column_desc;
}
desc_struct * CmpSeabaseDDL::convertVirtTableKeyInfoArrayToDescStructs(
const ComTdbVirtTableKeyInfo *keyInfoArray,
const ComTdbVirtTableColumnInfo *colInfoArray,
Lng32 numKeys)
{
desc_struct * prev_key_desc = NULL;
desc_struct * first_key_desc = NULL;
for (Int32 i = 0; i < numKeys; i++)
{
const ComTdbVirtTableColumnInfo * colInfo = &(colInfoArray[keyInfoArray[i].tableColNum]);
desc_struct * key_desc = readtabledef_allocate_desc(DESC_KEYS_TYPE);
if (prev_key_desc != NULL)
prev_key_desc->header.next = key_desc;
else
first_key_desc = key_desc;
prev_key_desc = key_desc;
key_desc->body.keys_desc.tablecolnumber = keyInfoArray[i].tableColNum;
key_desc->body.keys_desc.keyseqnumber = i;
key_desc->body.keys_desc.ordering = keyInfoArray[i].ordering;
}
return first_key_desc;
}
void CmpSeabaseDDL::createSeabaseTableLike(
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
ComObjectName tgtTableName(createTableNode->getTableName(), COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tgtTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString extTgtTableName = tgtTableName.getExternalName(TRUE);
ComObjectName srcTableName(createTableNode->getLikeSourceTableName(), COM_TABLE_NAME);
srcTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
CorrName cn(srcTableName.getObjectNamePart().getInternalName(),
STMTHEAP,
srcTableName.getSchemaNamePart().getInternalName(),
srcTableName.getCatalogNamePart().getInternalName());
ElemDDLColRefArray &keyArray =
(createTableNode->getIsConstraintPKSpecified() ?
createTableNode->getPrimaryKeyColRefArray() :
(createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION ?
createTableNode->getKeyColumnArray() :
createTableNode->getPrimaryKeyColRefArray()));
NAString keyClause;
if ((keyArray.entries() > 0) &&
((createTableNode->getIsConstraintPKSpecified()) ||
(createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION)))
{
if (createTableNode->getIsConstraintPKSpecified())
keyClause = " primary key ( ";
else if (createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION)
keyClause = " store by ( ";
for (CollIndex i = 0; i < keyArray.entries(); i++)
{
NAString colName = keyArray[i]->getColumnName();
// Generate a delimited identifier if source colName is delimited
// Launchpad bug: 1383531
colName/*InExternalFormat*/ = ToAnsiIdentifier (colName/*InInternalFormat*/);
keyClause += colName;
if (i < (keyArray.entries() - 1))
keyClause += ", ";
}
keyClause += ")";
// NOTE: This is not currently supported
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("PRIMARY KEY/STORE BY");
return;
}
// Check for other common options that are currently not supported
// with CREATE TABLE LIKE. Those could all be passed into
// CmpDescribeSeabaseTable as strings if we wanted to support them.
if (createTableNode->isPartitionSpecified() ||
createTableNode->isPartitionBySpecified())
{
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("PARTITION BY");
return;
}
if (createTableNode->isDivisionClauseSpecified())
{
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("DIVISION BY");
return;
}
if (createTableNode->isHbaseOptionsSpecified())
{
*CmpCommon::diags() << DgSqlCode(-3111)
<< DgString0("HBASE table options");
return;
}
ParDDLLikeOptsCreateTable &likeOptions = createTableNode->getLikeOptions();
char * buf = NULL;
ULng32 buflen = 0;
retcode = CmpDescribeSeabaseTable(cn, 3/*createlike*/, buf, buflen, STMTHEAP,
NULL,
likeOptions.getIsWithHorizontalPartitions(),
likeOptions.getIsWithoutSalt(),
likeOptions.getIsWithoutDivision(),
TRUE);
if (retcode)
return;
NAString query = "create table ";
query += extTgtTableName;
query += " ";
NABoolean done = FALSE;
Lng32 curPos = 0;
while (NOT done)
{
short len = *(short*)&buf[curPos];
NAString frag(&buf[curPos+sizeof(short)],
len - ((buf[curPos+len-1]== '\n') ? 1 : 0));
query += frag;
curPos += ((((len+sizeof(short))-1)/8)+1)*8;
if (curPos >= buflen)
done = TRUE;
}
if (NOT keyClause.isNull())
{
// add the keyClause
query += keyClause;
}
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Lng32 cliRC = 0;
cliRC = cliInterface.executeImmediate((char*)query.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
return;
}
// ----------------------------------------------------------------------------
// Method: createSeabaseTableExternal
//
// This method creates a Trafodion table that represents a Hive or HBase table
//
// in:
// cliInterface - references to the cli execution structure
// createTableNode - representation of the CREATE TABLE statement
// tgtTableName - the Trafodion external table name to create
// srcTableName - the native source table
//
// returns: 0 - successful, -1 error
//
// any error detected is added to the diags area
// ----------------------------------------------------------------------------
short CmpSeabaseDDL::createSeabaseTableExternal(
ExeCliInterface &cliInterface,
StmtDDLCreateTable * createTableNode,
const ComObjectName &tgtTableName,
const ComObjectName &srcTableName)
{
Int32 retcode = 0;
NABoolean isHive = tgtTableName.isExternalHive();
// go create the schema - if it does not already exist.
NAString createSchemaStmt ("CREATE SCHEMA IF NOT EXISTS ");
createSchemaStmt += tgtTableName.getSchemaNamePartAsAnsiString();
if (isAuthorizationEnabled())
{
createSchemaStmt += " AUTHORIZATION ";
createSchemaStmt += (isHive) ? DB__HIVEROLE : DB__HBASEROLE;
}
Lng32 cliRC = cliInterface.executeImmediate((char*)createSchemaStmt.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
const NAString catalogNamePart = tgtTableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tgtTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tgtTableName.getObjectNamePartAsAnsiString(TRUE);
// Make sure current user has privileges
Int32 objectOwnerID = SUPER_USER;
Int32 schemaOwnerID = SUPER_USER;
ComSchemaClass schemaClass;
retcode = verifyDDLCreateOperationAuthorized(&cliInterface,
SQLOperation::CREATE_TABLE,
catalogNamePart,
schemaNamePart,
schemaClass,
objectOwnerID,
schemaOwnerID);
if (retcode != 0)
{
handleDDLCreateAuthorizationError(retcode,catalogNamePart,schemaNamePart);
return -1;
}
const NAString extTgtTableName = tgtTableName.getExternalName(TRUE);
CorrName cnSrc(srcTableName.getObjectNamePart().getInternalName(),
STMTHEAP,
srcTableName.getSchemaNamePart().getInternalName(),
srcTableName.getCatalogNamePart().getInternalName());
// build the structures needed to create the table
// tableInfo contains data inserted into OBJECTS and TABLES
ComTdbVirtTableTableInfo * tableInfo = new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = NULL;
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = 0;
tableInfo->isAudited = 0;
tableInfo->validDef = 1;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->numSaltPartns = 0;
tableInfo->rowFormat = (isHive) ? COM_HIVE_EXTERNAL_FORMAT_TYPE :
COM_HBASE_EXTERNAL_FORMAT_TYPE;
tableInfo->objectFlags = (isHive) ? SEABASE_OBJECT_IS_EXTERNAL_HIVE :
SEABASE_OBJECT_IS_EXTERNAL_HBASE;
if (isAuthorizationEnabled())
{
if (srcTableName.isExternalHive())
{
tableInfo->objOwnerID = HIVE_ROLE_ID;
tableInfo->schemaOwnerID = HIVE_ROLE_ID;
}
else
{
tableInfo->objOwnerID = HBASE_ROLE_ID;
tableInfo->schemaOwnerID = HBASE_ROLE_ID;
}
}
else
{
tableInfo->objOwnerID = SUPER_USER;
tableInfo->schemaOwnerID = SUPER_USER;
}
// Column information
Lng32 datatype, length, precision, scale, dtStart, dtEnd, nullable, upshifted;
NAString charset;
CharInfo::Collation collationSequence = CharInfo::DefaultCollation;
ULng32 hbaseColFlags;
NABoolean alignedFormat = FALSE;
Lng32 serializedOption = -1;
Int32 numCols = 0;
ComTdbVirtTableColumnInfo * colInfoArray = NULL;
// Get a description of the source table
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
NATable *naTable = bindWA.getNATable(cnSrc);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cnSrc.getExposedNameAsAnsiString());
return -1;
}
// convert column array from NATable into a ComTdbVirtTableColumnInfo struct
const NAColumnArray &naColArray = naTable->getNAColumnArray();
numCols = naColArray.entries();
colInfoArray = new(STMTHEAP) ComTdbVirtTableColumnInfo[numCols];
for (CollIndex index = 0; index < numCols; index++)
{
const NAColumn *naCol = naColArray[index];
// call: CmpSeabaseDDL::getTypeInfo to get column details
retcode = getTypeInfo(naCol->getType(), alignedFormat, serializedOption,
datatype, length, precision, scale, dtStart, dtEnd, upshifted, nullable,
charset, collationSequence, hbaseColFlags);
if (retcode)
return -1;
colInfoArray[index].colName = naCol->getColName().data();
colInfoArray[index].colNumber = index;
colInfoArray[index].columnClass = COM_USER_COLUMN;
colInfoArray[index].datatype = datatype;
colInfoArray[index].length = length;
colInfoArray[index].nullable = nullable;
colInfoArray[index].charset = (SQLCHARSET_CODE)CharInfo::getCharSetEnum(charset);
colInfoArray[index].precision = precision;
colInfoArray[index].scale = scale;
colInfoArray[index].dtStart = dtStart;
colInfoArray[index].dtEnd = dtEnd;
colInfoArray[index].upshifted = upshifted;
colInfoArray[index].colHeading = NULL;
colInfoArray[index].hbaseColFlags = naCol->getHbaseColFlags();
colInfoArray[index].defaultClass = COM_NULL_DEFAULT;
colInfoArray[index].defVal = NULL;
colInfoArray[index].hbaseColFam = naCol->getHbaseColFam();
colInfoArray[index].hbaseColQual = naCol->getHbaseColQual();
strcpy(colInfoArray[index].paramDirection, COM_UNKNOWN_PARAM_DIRECTION_LIT);
colInfoArray[index].isOptional = FALSE;
colInfoArray[index].colFlags = 0;
}
Int64 objUID = -1;
cliRC = 0;
if (updateSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
COM_NO_LIT,
tableInfo,
numCols,
colInfoArray,
0 /*numKeys*/,
NULL /*keyInfoArray*/,
0, NULL,
objUID /*returns generated UID*/))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTgtTableName);
return -1;
}
cliRC = updateObjectValidDef(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, COM_YES_LIT);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTgtTableName);
return -1;
}
// remove cached definition - this code exists in other create stmte,
// is it required?
CorrName cnTgt(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable(cnTgt,
NATableDB::REMOVE_MINE_ONLY,
COM_BASE_TABLE_OBJECT);
return 0;
}
short CmpSeabaseDDL::genPKeyName(StmtDDLAddConstraintPK *addPKNode,
const char * catName,
const char * schName,
const char * objName,
NAString &pkeyName)
{
ComObjectName tableName( (addPKNode ? addPKNode->getTableName() : " "), COM_TABLE_NAME);
ElemDDLConstraintPK *constraintNode =
(addPKNode ? (addPKNode->getConstraint())->castToElemDDLConstraintPK() : NULL);
ComString specifiedConstraint;
ComString constraintName;
if( !constraintNode || (constraintNode->getConstraintName().isNull()))
{
specifiedConstraint.append( catName);
specifiedConstraint.append(".");
specifiedConstraint.append("\"");
specifiedConstraint.append( schName);
specifiedConstraint.append("\"");
specifiedConstraint.append(".");
ComString oName = "\"";
oName += objName;
oName += "\"";
Lng32 status = ToInternalIdentifier ( oName // in/out - from external- to internal-format
, TRUE // in - NABoolean upCaseInternalNameIfRegularIdent
, TRUE // in - NABoolean acceptCircumflex
);
ComDeriveRandomInternalName ( ComGetNameInterfaceCharSet()
, /*internalFormat*/oName // in - const ComString &
, /*internalFormat*/constraintName // out - ComString &
, STMTHEAP
);
// Generate a delimited identifier if objectName was delimited
constraintName/*InExternalFormat*/ = ToAnsiIdentifier (constraintName/*InInternalFormat*/);
specifiedConstraint.append(constraintName);
}
else
{
specifiedConstraint = constraintNode->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
}
pkeyName = specifiedConstraint;
return 0;
}
short CmpSeabaseDDL::updatePKeyInfo(
StmtDDLAddConstraintPK *addPKNode,
const char * catName,
const char * schName,
const char * objName,
const Int32 ownerID,
const Int32 schemaOwnerID,
Lng32 numKeys,
Int64 * outPkeyUID,
Int64 * outTableUID,
const ComTdbVirtTableKeyInfo * keyInfoArray,
ExeCliInterface *cliInterface)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char buf[4000];
// update primary key constraint info
NAString pkeyStr;
if (genPKeyName(addPKNode, catName, schName, objName, pkeyStr))
{
return -1;
}
Int64 createTime = NA_JulianTimestamp();
ComUID comUID;
comUID.make_UID();
Int64 pkeyUID = comUID.get_value();
if (outPkeyUID)
*outPkeyUID = pkeyUID;
ComObjectName pkeyName(pkeyStr);
const NAString catalogNamePart = pkeyName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = pkeyName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = pkeyName.getObjectNamePartAsAnsiString(TRUE);
NAString quotedSchName;
ToQuotedString(quotedSchName, NAString(schemaNamePart), FALSE);
NAString quotedObjName;
ToQuotedString(quotedObjName, NAString(objectNamePart), FALSE);
str_sprintf(buf, "insert into %s.\"%s\".%s values ('%s', '%s', '%s', '%s', %Ld, %Ld, %Ld, '%s', '%s', %d, %d, 0 )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catalogNamePart.data(), quotedSchName.data(), quotedObjName.data(),
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT,
pkeyUID,
createTime,
createTime,
" ",
COM_NO_LIT,
ownerID,
schemaOwnerID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
*CmpCommon::diags() << DgSqlCode(-1423)
<< DgString0(SEABASE_OBJECTS);
return -1;
}
Int64 tableUID =
getObjectUID(cliInterface,
catName, schName, objName,
COM_BASE_TABLE_OBJECT_LIT);
if (outTableUID)
*outTableUID = tableUID;
Int64 validatedTime = NA_JulianTimestamp();
Int64 indexUID = 0;
str_sprintf(buf, "insert into %s.\"%s\".%s values (%Ld, %Ld, '%s', '%s', '%s', '%s', '%s', '%s', %Ld, %d, %Ld, 0 )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
tableUID, pkeyUID,
COM_PRIMARY_KEY_CONSTRAINT_LIT,
COM_NO_LIT,
COM_NO_LIT,
COM_NO_LIT,
COM_YES_LIT,
COM_YES_LIT,
validatedTime,
numKeys,
indexUID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
*CmpCommon::diags() << DgSqlCode(-1423)
<< DgString0(SEABASE_TABLE_CONSTRAINTS);
return -1;
}
if (keyInfoArray)
{
for (Lng32 i = 0; i < numKeys; i++)
{
str_sprintf(buf, "insert into %s.\"%s\".%s values (%Ld, '%s', %d, %d, %d, %d, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
pkeyUID,
keyInfoArray[i].colName,
i+1,
keyInfoArray[i].tableColNum,
0,
0);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
*CmpCommon::diags() << DgSqlCode(-1423)
<< DgString0(SEABASE_KEYS);
return -1;
}
}
}
return 0;
}
// ----------------------------------------------------------------------------
// Method: getPKeyInfoForTable
//
// This method reads the metadata to get the primary key constraint name and UID
// for a table.
//
// Params:
// In: catName, schName, objName describing the table
// In: cliInterface - pointer to the cli handle
// Out: constrName and constrUID
//
// Returns 0 if found, -1 otherwise
// ComDiags is set up with error
// ----------------------------------------------------------------------------
short CmpSeabaseDDL::getPKeyInfoForTable (
const char *catName,
const char *schName,
const char *objName,
ExeCliInterface *cliInterface,
NAString &constrName,
Int64 &constrUID)
{
char query[4000];
constrUID = -1;
// get constraint info
str_sprintf(query, "select O.object_name, C.constraint_uid "
"from %s.\"%s\".%s O, %s.\"%s\".%s C "
"where O.object_uid = C.constraint_uid "
" and C.constraint_type = '%s' and C.table_uid = "
" (select object_uid from %s.\"%s\".%s "
" where catalog_name = '%s' "
" and schema_name = '%s' "
" and object_name = '%s')",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
COM_PRIMARY_KEY_CONSTRAINT_LIT,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catName, schName, objName);
Queue * constrInfoQueue = NULL;
Lng32 cliRC = cliInterface->fetchAllRows(constrInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return -1;
}
assert (constrInfoQueue->numEntries() == 1);
constrInfoQueue->position();
OutputInfo * vi = (OutputInfo*)constrInfoQueue->getNext();
char * pConstrName = (char*)vi->get(0);
constrName = pConstrName;
constrUID = *(Int64*)vi->get(1);
return 0;
}
short CmpSeabaseDDL::constraintErrorChecks(
ExeCliInterface * cliInterface,
StmtDDLAddConstraint *addConstrNode,
NATable * naTable,
ComConstraintType ct,
NAList<NAString> &keyColList)
{
const NAString &addConstrName = addConstrNode->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
// make sure that there is no other constraint on this table with this name.
NABoolean foundConstr = FALSE;
const CheckConstraintList &checkList = naTable->getCheckConstraints();
for (Int32 i = 0; i < checkList.entries(); i++)
{
CheckConstraint *checkConstr = (CheckConstraint*)checkList[i];
const NAString &tableConstrName =
checkConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (addConstrName == tableConstrName)
{
foundConstr = TRUE;
}
} // for
if (NOT foundConstr)
{
const AbstractRIConstraintList &ariList = naTable->getUniqueConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = (AbstractRIConstraint*)ariList[i];
const NAString &tableConstrName =
ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (addConstrName == tableConstrName)
{
foundConstr = TRUE;
}
} // for
}
if (NOT foundConstr)
{
const AbstractRIConstraintList &ariList = naTable->getRefConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = (AbstractRIConstraint*)ariList[i];
const NAString &tableConstrName =
ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (addConstrName == tableConstrName)
{
foundConstr = TRUE;
}
} // for
}
if (NOT foundConstr)
{
const NAString &constrCatName = addConstrNode->
getConstraintNameAsQualifiedName().getCatalogName();
const NAString &constrSchName = addConstrNode->
getConstraintNameAsQualifiedName().getSchemaName();
const NAString &constrObjName = addConstrNode->
getConstraintNameAsQualifiedName().getObjectName();
// check to see if this constraint was defined on some other table and
// exists in metadata
Lng32 retcode = existsInSeabaseMDTable(cliInterface,
constrCatName, constrSchName, constrObjName,
COM_UNKNOWN_OBJECT, FALSE, FALSE);
if (retcode == 1) // exists
{
foundConstr = TRUE;
}
}
if (foundConstr)
{
*CmpCommon::diags()
<< DgSqlCode(-1043)
<< DgConstraintName(addConstrName);
processReturn();
return -1;
}
if ((ct == COM_UNIQUE_CONSTRAINT) ||
(ct == COM_FOREIGN_KEY_CONSTRAINT) ||
(ct == COM_PRIMARY_KEY_CONSTRAINT))
{
const NAColumnArray & naColArray = naTable->getNAColumnArray();
// Now process each column defined in the parseColList to see if
// it exists in the table column list and it isn't a duplicate.
NABitVector seenIt;
NAString keyColNameStr;
for (CollIndex i = 0; i < keyColList.entries(); i++)
{
NAColumn * nac = naColArray.getColumn(keyColList[i]);
if (! nac)
{
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName( ToAnsiIdentifier(keyColList[i]));
return -1;
}
if (nac->isSystemColumn())
{
*CmpCommon::diags() << DgSqlCode((ct == COM_FOREIGN_KEY_CONSTRAINT) ?
-CAT_SYSTEM_COL_NOT_ALLOWED_IN_RI_CNSTRNT :
-CAT_SYSTEM_COL_NOT_ALLOWED_IN_UNIQUE_CNSTRNT)
<< DgColumnName(ToAnsiIdentifier(keyColList[i]))
<< DgTableName(addConstrNode->getTableName());
return -1;
}
// If column is a LOB column , error
Lng32 datatype = nac->getType()->getFSDatatype();
if ((datatype == REC_BLOB) || (datatype == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COL_CANNOT_BE_INDEX_OR_KEY)
<< DgColumnName( ToAnsiIdentifier(keyColList[i]));
processReturn();
return -1;
}
Lng32 colNumber = nac->getPosition();
// If the column has already been found, return error
if( seenIt.contains(colNumber))
{
*CmpCommon::diags() << DgSqlCode(-CAT_REDUNDANT_COLUMN_REF_PK)
<< DgColumnName( ToAnsiIdentifier(keyColList[i]));
return -1;
}
seenIt.setBit(colNumber);
}
if (ct == COM_UNIQUE_CONSTRAINT)
{
// Compare the column list from parse tree to the unique and primary
// key constraints already defined for the table. The new unique
// constraint list must be distinct. The order of the existing constraint
// does not have to be in the same order as the new constraint.
//
if (naTable->getCorrespondingConstraint(keyColList,
TRUE, // unique constraint
NULL))
{
*CmpCommon::diags() << DgSqlCode(-CAT_DUPLICATE_UNIQUE_CONSTRAINT_ON_SAME_COL);
return -1;
}
}
}
return 0;
}
short CmpSeabaseDDL::genUniqueName(StmtDDLAddConstraint *addUniqueNode,
NAString &uniqueName)
{
ComObjectName tableName( addUniqueNode->getTableName(), COM_TABLE_NAME);
ElemDDLConstraint *constraintNode =
(addUniqueNode->getConstraint())->castToElemDDLConstraint();
ComString specifiedConstraint;
ComString constraintName;
if( constraintNode->getConstraintName().isNull() )
{
specifiedConstraint.append( tableName.getCatalogNamePartAsAnsiString() );
specifiedConstraint.append(".");
specifiedConstraint.append( tableName.getSchemaNamePartAsAnsiString() );
specifiedConstraint.append(".");
ComString oName = tableName.getObjectNamePartAsAnsiString() ;
Lng32 status = ToInternalIdentifier ( oName // in/out - from external- to internal-format
, TRUE // in - NABoolean upCaseInternalNameIfRegularIdent
, TRUE // in - NABoolean acceptCircumflex
);
ComDeriveRandomInternalName ( ComGetNameInterfaceCharSet()
, /*internalFormat*/oName // in - const ComString &
, /*internalFormat*/constraintName // out - ComString &
, STMTHEAP
);
// Generate a delimited identifier if objectName was delimited
constraintName/*InExternalFormat*/ = ToAnsiIdentifier (constraintName/*InInternalFormat*/);
specifiedConstraint.append(constraintName);
}
else
{
specifiedConstraint = constraintNode->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
}
uniqueName = specifiedConstraint;
return 0;
}
short CmpSeabaseDDL::updateConstraintMD(
NAList<NAString> &keyColList,
NAList<NAString> &keyColOrderList,
NAString &uniqueStr,
Int64 tableUID,
Int64 constrUID,
NATable * naTable,
ComConstraintType ct,
NABoolean enforced,
ExeCliInterface *cliInterface)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char buf[4000];
const NAColumnArray & naColArray = naTable->getNAColumnArray();
Int64 createTime = NA_JulianTimestamp();
ComObjectName uniqueName(uniqueStr);
const NAString catalogNamePart = uniqueName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = uniqueName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = uniqueName.getObjectNamePartAsAnsiString(TRUE);
NAString quotedSchName;
ToQuotedString(quotedSchName, NAString(schemaNamePart), FALSE);
NAString quotedObjName;
ToQuotedString(quotedObjName, NAString(objectNamePart), FALSE);
str_sprintf(buf, "insert into %s.\"%s\".%s values ('%s', '%s', '%s', '%s', %Ld, %Ld, %Ld, '%s', '%s', %d, %d, 0 )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catalogNamePart.data(), quotedSchName.data(), quotedObjName.data(),
((ct == COM_UNIQUE_CONSTRAINT) ? COM_UNIQUE_CONSTRAINT_OBJECT_LIT :
((ct == COM_FOREIGN_KEY_CONSTRAINT) ? COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT : COM_CHECK_CONSTRAINT_OBJECT_LIT)),
constrUID,
createTime,
createTime,
" ",
COM_NO_LIT,
naTable->getOwner(),
naTable->getSchemaOwner());
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
Int64 indexUID = 0;
str_sprintf(buf, "insert into %s.\"%s\".%s values (%Ld, %Ld, '%s', '%s', '%s', '%s', '%s', '%s', %Ld, %d, %Ld, 0 )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
tableUID, constrUID,
((ct == COM_UNIQUE_CONSTRAINT) ? COM_UNIQUE_CONSTRAINT_LIT :
((ct == COM_FOREIGN_KEY_CONSTRAINT) ? COM_FOREIGN_KEY_CONSTRAINT_LIT : COM_CHECK_CONSTRAINT_LIT)),
COM_NO_LIT,
COM_NO_LIT,
COM_NO_LIT,
(enforced ? COM_YES_LIT : COM_NO_LIT),
COM_YES_LIT,
createTime,
keyColList.entries(),
indexUID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
for (Lng32 i = 0; i < keyColList.entries(); i++)
{
NAColumn * nac = naColArray.getColumn(keyColList[i]);
Lng32 colNumber = nac->getPosition();
str_sprintf(buf, "insert into %s.\"%s\".%s values (%Ld, '%s', %d, %d, %d, %d, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
constrUID,
keyColList[i].data(),
i+1,
colNumber,
(keyColOrderList[i] == "DESC" ? 1 : 0),
0);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
}
return 0;
}
short CmpSeabaseDDL::updateRIConstraintMD(
Int64 ringConstrUID,
Int64 refdConstrUID,
ExeCliInterface *cliInterface)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char buf[4000];
str_sprintf(buf, "insert into %s.\"%s\".%s values (%Ld, %Ld, '%s', '%s', '%s', 0 )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_REF_CONSTRAINTS,
ringConstrUID, refdConstrUID,
COM_FULL_MATCH_OPTION_LIT,
COM_RESTRICT_UPDATE_RULE_LIT,
COM_RESTRICT_DELETE_RULE_LIT);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
str_sprintf(buf, "insert into %s.\"%s\".%s values (%Ld, %Ld, 0)",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_UNIQUE_REF_CONSTR_USAGE,
refdConstrUID, ringConstrUID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
return 0;
}
short CmpSeabaseDDL::updateIndexInfo(
NAList<NAString> &ringKeyColList,
NAList<NAString> &ringKeyColOrderList,
NAList<NAString> &refdKeyColList,
NAString &uniqueStr,
Int64 constrUID,
const char * catName,
const char * schName,
const char * objName,
NATable * naTable,
NABoolean isUnique, // TRUE: uniq constr. FALSE: ref constr.
NABoolean noPopulate,
NABoolean sameSequenceOfCols,
ExeCliInterface *cliInterface)
{
// Now we need to determine if an index has to be created for
// the unique or ref constraint.
NABoolean createIndex = TRUE;
NAString existingIndexName;
if (naTable->getCorrespondingIndex(ringKeyColList,
TRUE, // explicit index only
isUnique, //TRUE, look for unique index.
TRUE, //isUnique, //TRUE, look for primary key.
(NOT isUnique), // TRUE, look for any index or pkey
TRUE, // exclude system computed cols like salt, division
sameSequenceOfCols,
&existingIndexName))
createIndex = FALSE;
ComObjectName indexName(createIndex ? uniqueStr : existingIndexName);
const NAString catalogNamePart = indexName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = indexName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = indexName.getObjectNamePartAsAnsiString(TRUE);
NAString quotedSchName;
ToQuotedString(quotedSchName, NAString(schemaNamePart), FALSE);
NAString quotedObjName;
ToQuotedString(quotedObjName, NAString(objectNamePart), FALSE);
char buf[5000];
Lng32 cliRC;
Int64 tableUID = naTable->objectUid().castToInt64();
if (createIndex)
{
NAString keyColNameStr;
for (CollIndex i = 0; i < ringKeyColList.entries(); i++)
{
keyColNameStr += "\"";
keyColNameStr += ringKeyColList[i];
keyColNameStr += "\" ";
keyColNameStr += ringKeyColOrderList[i];
if (i+1 < ringKeyColList.entries())
keyColNameStr += ", ";
}
char noPopStr[100];
if (noPopulate)
strcpy(noPopStr, " no populate ");
else
strcpy(noPopStr, " ");
if (isUnique)
str_sprintf(buf, "create unique index \"%s\" on \"%s\".\"%s\".\"%s\" ( %s ) %s",
quotedObjName.data(),
catName, schName, objName,
keyColNameStr.data(),
noPopStr);
else
str_sprintf(buf, "create index \"%s\" on \"%s\".\"%s\".\"%s\" ( %s ) %s",
quotedObjName.data(),
catName, schName, objName,
keyColNameStr.data(),
noPopStr);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// update indexes table and mark this index as an implicit index.
str_sprintf(buf, "update %s.\"%s\".%s set is_explicit = 0 where base_table_uid = %Ld and index_uid = (select object_uid from %s.\"%s\".%s where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' and object_type = 'IX') ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_INDEXES,
tableUID,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catName, schemaNamePart.data(), objectNamePart.data());
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
if (noPopulate)
{
if (updateObjectValidDef(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_INDEX_OBJECT_LIT,
COM_YES_LIT))
{
return -1;
}
}
}
// update table_constraints table with the uid of this index.
Int64 indexUID =
getObjectUID(cliInterface,
catName, schemaNamePart, objectNamePart,
COM_INDEX_OBJECT_LIT);
if (indexUID < 0)
{
// primary key. Clear diags area since getObjectUID sets up diags entry.
CmpCommon::diags()->clear();
}
str_sprintf(buf, "update %s.\"%s\".%s set index_uid = %Ld where table_uid = %Ld and constraint_uid = %Ld and constraint_type = '%s'",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
indexUID,
tableUID, constrUID,
(isUnique ? COM_UNIQUE_CONSTRAINT_LIT : COM_FOREIGN_KEY_CONSTRAINT_LIT));
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
return 0;
}
// RETURN: -1, no need to cleanup. -2, caller need to call cleanup
// 0, all ok.
short CmpSeabaseDDL::createSeabaseTable2(
ExeCliInterface &cliInterface,
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName,
NABoolean isCompound)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ComObjectName tableName(createTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
// Make some additional checks if creating an external table
ComObjectName *srcTableName = NULL;
if (createTableNode->isExternal())
{
// The schema name of the target table, if specified, must match the
// schema name of the source table
NAString origSchemaName =
createTableNode->getOrigTableNameAsQualifiedName().getSchemaName();
srcTableName = new(STMTHEAP) ComObjectName
(createTableNode->getLikeSourceTableName(), COM_TABLE_NAME);
srcTableName->applyDefaults(currCatAnsiName, currSchAnsiName);
// Convert the native table name to its trafodion name
NAString tabName = ComConvertNativeNameToTrafName
(srcTableName->getCatalogNamePartAsAnsiString(),
srcTableName->getSchemaNamePartAsAnsiString(),
tableName.getObjectNamePartAsAnsiString());
ComObjectName adjustedName(tabName, COM_TABLE_NAME);
NAString type = adjustedName.isExternalHive() ? "HIVE" : "HBASE";
tableName = adjustedName;
// Verify that the name with prepending is not too long
if (tableName.getSchemaNamePartAsAnsiString(TRUE).length() >
ComMAX_ANSI_IDENTIFIER_INTERNAL_LEN)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_EXTERNAL_SCHEMA_NAME_TOO_LONG)
<< DgString0(type.data())
<< DgTableName(tableName.getSchemaNamePartAsAnsiString(FALSE))
<< DgInt0(ComMAX_ANSI_IDENTIFIER_INTERNAL_LEN - sizeof(HIVE_EXT_SCHEMA_PREFIX));
return -1;
}
if ((origSchemaName.length() > 0)&&
(origSchemaName != srcTableName->getSchemaNamePart().getExternalName()))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_EXTERNAL_NAME_MISMATCH)
<< DgString0 (type.data())
<< DgTableName(origSchemaName)
<< DgString1((srcTableName->getSchemaNamePart().getExternalName()));
return -1;
}
// For now the object name of the target table must match the
// object name of the source table
if (tableName.getObjectNamePart().getExternalName() !=
srcTableName->getObjectNamePart().getExternalName())
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_EXTERNAL_NAME_MISMATCH)
<< DgString0 (type.data())
<< DgTableName(tableName.getObjectNamePart().getExternalName())
<< DgString1((srcTableName->getObjectNamePart().getExternalName()));
return -1;
}
}
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return -1;
}
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CREATE_TABLE_NOT_ALLOWED_IN_SMD)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_UNKNOWN_OBJECT, FALSE);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (retcode == 1) // already exists
{
if (NOT createTableNode->createIfNotExists())
{
if (createTableNode->isVolatile())
*CmpCommon::diags() << DgSqlCode(-1390)
<< DgString0(objectNamePart);
else
*CmpCommon::diags() << DgSqlCode(-1390)
<< DgString0(extTableName);
}
deallocEHI(ehi);
processReturn();
return -1;
}
// If creating an external table, go perform operation
if (createTableNode->isExternal())
{
retcode = createSeabaseTableExternal
(cliInterface, createTableNode, tableName, *srcTableName);
if (retcode != 0 && CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("creating external HIVE table");
deallocEHI(ehi);
processReturn();
return retcode;
}
ElemDDLColDefArray &colArray = createTableNode->getColDefArray();
ElemDDLColRefArray &keyArray =
(createTableNode->getIsConstraintPKSpecified() ?
createTableNode->getPrimaryKeyColRefArray() :
(createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION ?
createTableNode->getKeyColumnArray() :
createTableNode->getPrimaryKeyColRefArray()));
Int32 objectOwnerID = SUPER_USER;
Int32 schemaOwnerID = SUPER_USER;
ComSchemaClass schemaClass;
retcode = verifyDDLCreateOperationAuthorized(&cliInterface,
SQLOperation::CREATE_TABLE,
catalogNamePart,
schemaNamePart,
schemaClass,
objectOwnerID,
schemaOwnerID);
if (retcode != 0)
{
handleDDLCreateAuthorizationError(retcode,catalogNamePart,schemaNamePart);
deallocEHI(ehi);
processReturn();
return -1;
}
// If the schema name specified is external HIVE or HBase name, users cannot
// create them.
if (ComIsTrafodionExternalSchemaName(schemaNamePart) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
// error.
*SqlParser_Diags << DgSqlCode(-CAT_CREATE_TABLE_NOT_ALLOWED_IN_SMD)
<< DgTableName(extTableName.data());
return -1;
}
if (createTableNode->getIsLikeOptionSpecified())
{
createSeabaseTableLike(createTableNode, currCatName, currSchName);
deallocEHI(ehi);
processReturn();
return -1;
}
// For shared schemas, histogram tables should be owned by the schema owner,
// not the first user to run UPDATE STATISTICS in the schema.
if (schemaClass == COM_SCHEMA_CLASS_SHARED && isHistogramTable(objectNamePart))
objectOwnerID = schemaOwnerID;
// check if SYSKEY is specified as a column name.
NABoolean explicitSyskeySpecified = FALSE;
for (Lng32 i = 0; i < colArray.entries(); i++)
{
if (colArray[i]->getColumnName() == "SYSKEY")
explicitSyskeySpecified = TRUE;
}
NABoolean implicitPK = FALSE;
NAString syskeyColName("SYSKEY");
SQLLargeInt * syskeyType = new(STMTHEAP) SQLLargeInt(TRUE, FALSE, STMTHEAP);
ElemDDLColDef syskeyColDef(NULL, &syskeyColName, syskeyType, NULL, NULL,
STMTHEAP);
ElemDDLColRef edcr("SYSKEY", COM_ASCENDING_ORDER);
CollIndex numSysCols = 0;
CollIndex numSaltCols = 0;
CollIndex numDivCols = 0;
syskeyColDef.setColumnClass(COM_SYSTEM_COLUMN);
if (((createTableNode->getStoreOption() == COM_KEY_COLUMN_LIST_STORE_OPTION) &&
(NOT createTableNode->getIsConstraintPKSpecified())) ||
(keyArray.entries() == 0))
{
colArray.insertAt(0, &syskeyColDef);
keyArray.insert(&edcr);
implicitPK = TRUE;
numSysCols++;
}
if ((implicitPK) && (explicitSyskeySpecified))
{
*CmpCommon::diags() << DgSqlCode(-1080)
<< DgColumnName("SYSKEY");
deallocEHI(ehi);
processReturn();
return -1;
}
int numSaltPartns = 0; // # of "_SALT_" values
int numSplits = 0; // # of initial region splits
Lng32 numSaltPartnsFromCQD =
CmpCommon::getDefaultNumeric(TRAF_NUM_OF_SALT_PARTNS);
if ((createTableNode->getSaltOptions()) ||
((numSaltPartnsFromCQD > 0) &&
(NOT (implicitPK || explicitSyskeySpecified))))
{
// add a system column SALT INTEGER NOT NULL with a computed
// default value HASH2PARTFUNC(<salting cols> FOR <num salt partitions>)
ElemDDLSaltOptionsClause * saltOptions = createTableNode->getSaltOptions();
ElemDDLColRefArray *saltArray = createTableNode->getSaltColRefArray();
NAString saltExprText("HASH2PARTFUNC(");
NABoolean firstSaltCol = TRUE;
char numSaltPartnsStr[20];
if (saltArray == NULL || saltArray->entries() == 0)
{
// if no salting columns are specified, use all key columns
saltArray = &keyArray;
}
else
{
// Validate that salting columns refer to real key columns
for (CollIndex s=0; s<saltArray->entries(); s++)
if (keyArray.getColumnIndex((*saltArray)[s]->getColumnName()) < 0)
{
*CmpCommon::diags() << DgSqlCode(-1195)
<< DgColumnName((*saltArray)[s]->getColumnName());
deallocEHI(ehi);
processReturn();
return -1;
}
}
for (CollIndex i=0; i<saltArray->entries(); i++)
{
const NAString &colName = (*saltArray)[i]->getColumnName();
ComAnsiNamePart cnp(colName, ComAnsiNamePart::INTERNAL_FORMAT);
CollIndex colIx = colArray.getColumnIndex(colName);
NAType *colType = colArray[colIx]->getColumnDataType();
NAString typeText;
short rc = colType->getMyTypeAsText(&typeText, FALSE);
// don't include SYSKEY in the list of salt columns
if (colName != "SYSKEY")
{
if (firstSaltCol)
firstSaltCol = FALSE;
else
saltExprText += ",";
saltExprText += "CAST(";
if (NOT cnp.isDelimitedIdentifier())
saltExprText += "\"";
saltExprText += cnp.getExternalName();
if (NOT cnp.isDelimitedIdentifier())
saltExprText += "\"";
saltExprText += " AS ";
saltExprText += typeText;
if (!colType->supportsSQLnull())
saltExprText += " NOT NULL";
saltExprText += ")";
if (colType->getTypeQualifier() == NA_NUMERIC_TYPE &&
!(((NumericType *) colType)->isExact()))
{
*CmpCommon::diags() << DgSqlCode(-1120);
deallocEHI(ehi);
processReturn();
return -1;
}
}
else if (saltArray != &keyArray || saltArray->entries() == 1)
{
// SYSKEY was explicitly specified in salt column or is the only column,
// this is an error
*CmpCommon::diags() << DgSqlCode(-1195)
<< DgColumnName((*saltArray)[i]->getColumnName());
deallocEHI(ehi);
processReturn();
return -1;
}
}
numSaltPartns =
(saltOptions ? saltOptions->getNumPartitions() : numSaltPartnsFromCQD);
saltExprText += " FOR ";
sprintf(numSaltPartnsStr,"%d", numSaltPartns);
saltExprText += numSaltPartnsStr;
saltExprText += ")";
if (numSaltPartns <= 1 || numSaltPartns > 1024)
{
// number of salt partitions is out of bounds
*CmpCommon::diags() << DgSqlCode(-1196)
<< DgInt0(2)
<< DgInt1(1024);
deallocEHI(ehi);
processReturn();
return -1;
}
NAString saltColName(ElemDDLSaltOptionsClause::getSaltSysColName());
SQLInt * saltType = new(STMTHEAP) SQLInt(FALSE, FALSE, STMTHEAP);
ElemDDLColDefault *saltDef =
new(STMTHEAP) ElemDDLColDefault(
ElemDDLColDefault::COL_COMPUTED_DEFAULT);
saltDef->setComputedDefaultExpr(saltExprText);
ElemDDLColDef * saltColDef =
new(STMTHEAP) ElemDDLColDef(NULL, &saltColName, saltType, saltDef, NULL,
STMTHEAP);
ElemDDLColRef * edcrs =
new(STMTHEAP) ElemDDLColRef(saltColName, COM_ASCENDING_ORDER);
saltColDef->setColumnClass(COM_SYSTEM_COLUMN);
// add this new salt column at the end
// and also as key column 0
colArray.insert(saltColDef);
keyArray.insertAt(0, edcrs);
numSysCols++;
numSaltCols++;
numSplits = numSaltPartns - 1;
}
// create table in seabase
ParDDLFileAttrsCreateTable &fileAttribs =
createTableNode->getFileAttributes();
NABoolean alignedFormat = FALSE;
if (fileAttribs.isRowFormatSpecified() == TRUE)
{
if (fileAttribs.getRowFormat() == ElemDDLFileAttrRowFormat::eALIGNED)
{
alignedFormat = TRUE;
}
}
else if(CmpCommon::getDefault(TRAF_ALIGNED_ROW_FORMAT) == DF_ON)
{
if ( NOT isSeabaseReservedSchema(tableName))
alignedFormat = TRUE;
}
const NAString &defaultColFam = fileAttribs.getColFam();
// allow nullable clustering key or unique constraints based on the
// CQD settings. If a volatile table is being created and cqd
// VOLATILE_TABLE_FIND_SUITABLE_KEY is ON, then allow it.
// If ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT is set, then allow it.
NABoolean allowNullableUniqueConstr = FALSE;
if (((CmpCommon::getDefault(VOLATILE_TABLE_FIND_SUITABLE_KEY) != DF_OFF) &&
(createTableNode->isVolatile())) ||
(CmpCommon::getDefault(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT) == DF_ON))
allowNullableUniqueConstr = TRUE;
int numIterationsToCompleteColumnList = 1;
Lng32 numCols = 0;
Lng32 numKeys = 0;
ComTdbVirtTableColumnInfo * colInfoArray = NULL;
ComTdbVirtTableKeyInfo * keyInfoArray = NULL;
Lng32 identityColPos = -1;
std::vector<NAString> userColFamVec;
std::vector<NAString> trafColFamVec;
// build colInfoArray and keyInfoArray, this may take two
// iterations if we need to add a divisioning column
for (int iter=0; iter < numIterationsToCompleteColumnList; iter++)
{
numCols = colArray.entries();
numKeys = keyArray.entries();
colInfoArray = new(STMTHEAP) ComTdbVirtTableColumnInfo[numCols];
keyInfoArray = new(STMTHEAP) ComTdbVirtTableKeyInfo[numKeys];
if (buildColInfoArray(COM_BASE_TABLE_OBJECT,
&colArray, colInfoArray, implicitPK,
alignedFormat, &identityColPos,
&userColFamVec, &trafColFamVec, defaultColFam.data()))
{
processReturn();
return -1;
}
if (buildKeyInfoArray(&colArray, &keyArray, colInfoArray, keyInfoArray, allowNullableUniqueConstr))
{
processReturn();
return -1;
}
if (iter == 0 && createTableNode->isDivisionClauseSpecified())
{
// We need the colArray to be able to bind the divisioning
// expression, check it and compute its type. Once we have the
// type, we will add a divisioning column of that type and
// also add that column to the key. Then we will need to go
// through this loop once more and create the updated colArray.
numIterationsToCompleteColumnList = 2;
NAColumnArray *naColArrayForBindingDivExpr = new(STMTHEAP) NAColumnArray(STMTHEAP);
NAColumnArray *keyColArrayForBindingDivExpr = new(STMTHEAP) NAColumnArray(STMTHEAP);
ItemExprList * divExpr = createTableNode->getDivisionExprList();
ElemDDLColRefArray *divColNamesFromDDL = createTableNode->getDivisionColRefArray();
CmpSeabaseDDL::convertColAndKeyInfoArrays(
numCols,
colInfoArray,
numKeys,
keyInfoArray,
naColArrayForBindingDivExpr,
keyColArrayForBindingDivExpr);
for (CollIndex d=0; d<divExpr->entries(); d++)
{
NABoolean exceptionOccurred = FALSE;
ComColumnOrdering divKeyOrdering = COM_ASCENDING_ORDER;
ItemExpr *boundDivExpr =
bindDivisionExprAtDDLTime((*divExpr)[d],
keyColArrayForBindingDivExpr,
STMTHEAP);
if (!boundDivExpr)
{
processReturn();
return -1;
}
if (boundDivExpr->getOperatorType() == ITM_INVERSE)
{
divKeyOrdering = COM_DESCENDING_ORDER;
boundDivExpr = boundDivExpr->child(0);
if (boundDivExpr->getOperatorType() == ITM_INVERSE)
{
// in rare cases we could have two inverse operators
// stacked on top of each other, indicating ascending
divKeyOrdering = COM_ASCENDING_ORDER;
boundDivExpr = boundDivExpr->child(0);
}
}
try
{
// put this into a try/catch block because it could throw
// an exception when type synthesis fails and that would leave
// the transaction begun by the DDL operation in limbo
boundDivExpr->synthTypeAndValueId();
}
catch (...)
{
// diags area should be set
CMPASSERT(CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) > 0);
exceptionOccurred = TRUE;
}
if (exceptionOccurred ||
boundDivExpr->getValueId() == NULL_VALUE_ID)
{
processReturn();
return -1;
}
if (validateDivisionByExprForDDL(boundDivExpr))
{
processReturn();
return -1;
}
// Add a divisioning column to the list of columns and the key
char buf[16];
snprintf(buf, sizeof(buf), "_DIVISION_%d_", d+1);
NAString divColName(buf);
// if the division column name was specified in the DDL, use that instead
if (divColNamesFromDDL && divColNamesFromDDL->entries() > d)
divColName = (*divColNamesFromDDL)[d]->getColumnName();
NAType * divColType =
boundDivExpr->getValueId().getType().newCopy(STMTHEAP);
ElemDDLColDefault *divColDefault =
new(STMTHEAP) ElemDDLColDefault(
ElemDDLColDefault::COL_COMPUTED_DEFAULT);
NAString divExprText;
boundDivExpr->unparse(divExprText, PARSER_PHASE, COMPUTED_COLUMN_FORMAT);
divColDefault->setComputedDefaultExpr(divExprText);
ElemDDLColDef * divColDef =
new(STMTHEAP) ElemDDLColDef(NULL, &divColName, divColType, divColDefault, NULL,
STMTHEAP);
ElemDDLColRef * edcrs =
new(STMTHEAP) ElemDDLColRef(divColName, divKeyOrdering);
divColDef->setColumnClass(COM_SYSTEM_COLUMN);
divColDef->setDivisionColumnFlag(TRUE);
divColDef->setDivisionColumnSequenceNumber(d);
// add this new divisioning column to the end of the row
// and also to the key, right after any existing salt and divisioning columns
colArray.insert(divColDef);
keyArray.insertAt(numSaltCols+numDivCols, edcrs);
numSysCols++;
numDivCols++;
}
}
} // iterate 1 or 2 times to get all columns, including divisioning columns
Int32 keyLength = 0;
for(CollIndex i = 0; i < keyArray.entries(); i++)
{
const NAString &colName = keyArray[i]->getColumnName();
Lng32 colIx = colArray.getColumnIndex(colName);
if (colIx < 0)
{
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName(colName);
deallocEHI(ehi);
processReturn();
return -1;
}
NAType *colType = colArray[colIx]->getColumnDataType();
if (colType->getFSDatatype() == REC_BLOB || colType->getFSDatatype() == REC_CLOB)
//Cannot allow LOB in primary or clustering key
{
*CmpCommon::diags() << DgSqlCode(CAT_LOB_COL_CANNOT_BE_INDEX_OR_KEY)
<< DgColumnName(colName);
deallocEHI(ehi);
processReturn();
return -1;
}
keyLength += colType->getEncodedKeyLength();
}
char ** encodedKeysBuffer = NULL;
if (numSplits > 0) {
desc_struct * colDescs =
convertVirtTableColumnInfoArrayToDescStructs(&tableName,
colInfoArray,
numCols) ;
desc_struct * keyDescs =
convertVirtTableKeyInfoArrayToDescStructs(keyInfoArray,
colInfoArray,
numKeys) ;
if (createEncodedKeysBuffer(encodedKeysBuffer/*out*/,
numSplits /*out*/,
colDescs, keyDescs,
numSaltPartns,
numSplits,
NULL,
numKeys,
keyLength, FALSE))
{
processReturn();
return -1;
}
}
ComTdbVirtTableTableInfo * tableInfo = new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = NULL;
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = 0;
tableInfo->isAudited = (fileAttribs.getIsAudit() ? 1 : 0);
tableInfo->validDef = 1;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->objectFlags = 0;
if (fileAttribs.isOwnerSpecified())
{
// Fixed bug: if BY CLAUSE specified an unregistered user, then the object
// owner is set to 0 in metadata. Once 0, the table could not be dropped.
NAString owner = fileAttribs.getOwner();
Int16 retcode = (ComUser::getUserIDFromUserName(owner.data(),objectOwnerID));
if (retcode == FENOTFOUND)
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHID_DOES_NOT_EXIST_ERROR)
<< DgString0(owner.data());
processReturn();
return -1;
}
else if (retcode != FEOK)
{
*CmpCommon::diags() << DgSqlCode (-CAT_INTERNAL_EXCEPTION_ERROR)
<< DgString0(__FILE__)
<< DgInt0(__LINE__)
<< DgString1("verifying grantee");
processReturn();
return -1;
}
if (schemaClass == COM_SCHEMA_CLASS_PRIVATE &&
objectOwnerID != schemaOwnerID)
{
*CmpCommon::diags() << DgSqlCode(-CAT_BY_CLAUSE_IN_PRIVATE_SCHEMA);
deallocEHI(ehi);
processReturn();
return -1;
}
}
tableInfo->objOwnerID = objectOwnerID;
tableInfo->schemaOwnerID = schemaOwnerID;
tableInfo->numSaltPartns = (numSplits > 0 ? numSplits+1 : 0);
tableInfo->rowFormat = (alignedFormat ? COM_ALIGNED_FORMAT_TYPE : COM_HBASE_FORMAT_TYPE);
NAList<HbaseCreateOption*> hbaseCreateOptions;
NAString hco;
short retVal = setupHbaseOptions(createTableNode->getHbaseOptionsClause(),
numSplits, extTableName,
hbaseCreateOptions, hco);
if (retVal)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (alignedFormat)
{
hco += "ROW_FORMAT=>ALIGNED ";
}
tableInfo->hbaseCreateOptions = (hco.isNull() ? NULL : hco.data());
tableInfo->defaultColFam = NULL;
tableInfo->allColFams = NULL;
Int64 objUID = -1;
if (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
const char* v = ActiveSchemaDB()->getDefaults().
getValue(TRAF_CREATE_TABLE_WITH_UID);
if ((v) and (strlen(v) > 0))
{
objUID = str_atoi(v, strlen(v));
}
}
if (updateSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
COM_NO_LIT,
tableInfo,
numCols,
colInfoArray,
numKeys,
keyInfoArray,
0, NULL,
objUID))
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
// update TEXT table with column families.
// Column families are stored separated by a blank space character.
NAString allColFams;
NABoolean addToTextTab = FALSE;
if (defaultColFam != SEABASE_DEFAULT_COL_FAMILY)
addToTextTab = TRUE;
else if (userColFamVec.size() > 1)
addToTextTab = TRUE;
else if ((userColFamVec.size() == 1) && (userColFamVec[0] != SEABASE_DEFAULT_COL_FAMILY))
addToTextTab = TRUE;
if (addToTextTab)
{
allColFams = defaultColFam + " ";
for (int i = 0; i < userColFamVec.size(); i++)
{
allColFams += userColFamVec[i];
allColFams += " ";
}
cliRC = updateTextTable(&cliInterface, objUID,
COM_HBASE_COL_FAMILY_TEXT, 0,
allColFams);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
if (createTableNode->getAddConstraintPK())
{
if (updatePKeyInfo(createTableNode->getAddConstraintPK(),
catalogNamePart, schemaNamePart, objectNamePart,
objectOwnerID,
schemaOwnerID,
keyArray.entries(),
NULL,
NULL,
keyInfoArray,
&cliInterface))
{
return -1;
}
}
if (identityColPos >= 0)
{
ElemDDLColDef *colDef = colArray[identityColPos];
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, objectNamePart, colDef->getColumnName(),
seqName);
if (colDef->getSGOptions())
{
colDef->getSGOptions()->setFSDataType((ComFSDataType)colDef->getColumnDataType()->getFSDatatype());
if (colDef->getSGOptions()->validate(2/*identity*/))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
SequenceGeneratorAttributes sga;
colDef->getSGOptions()->genSGA(sga);
NAString idOptions;
sga.display(NULL, &idOptions, TRUE);
char buf[4000];
str_sprintf(buf, "create internal sequence %s.\"%s\".\"%s\" %s",
catalogNamePart.data(), schemaNamePart.data(), seqName.data(),
idOptions.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -1;
}
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_MINE_ONLY, COM_BASE_TABLE_OBJECT);
// update datatype for this sequence
str_sprintf(buf, "update %s.\"%s\".%s set fs_data_type = %d where seq_type = '%s' and seq_uid = (select object_uid from %s.\"%s\".\"%s\" where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' and object_type = '%s') ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_SEQ_GEN,
colDef->getColumnDataType()->getFSDatatype(),
COM_INTERNAL_SG_LIT,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
catalogNamePart.data(), schemaNamePart.data(), seqName.data(),
COM_SEQUENCE_GENERATOR_OBJECT_LIT);
Int64 rowsAffected = 0;
cliRC = cliInterface.executeImmediate(buf, NULL, NULL, FALSE, &rowsAffected);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -1;
}
}
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
if (createHbaseTable(ehi, &hbaseTable, trafColFamVec,
&hbaseCreateOptions,
numSplits, keyLength,
encodedKeysBuffer) == -1)
{
deallocEHI(ehi);
processReturn();
return -2;
}
// if this table has lob columns, create the lob files
short *lobNumList = new (STMTHEAP) short[numCols];
short *lobTypList = new (STMTHEAP) short[numCols];
char **lobLocList = new (STMTHEAP) char*[numCols];
Lng32 j = 0;
for (Int32 i = 0; i < colArray.entries(); i++)
{
ElemDDLColDef *column = colArray[i];
Lng32 datatype = column->getColumnDataType()->getFSDatatype();
if ((datatype == REC_BLOB) ||
(datatype == REC_CLOB))
{
lobNumList[j] = i; //column->getColumnNumber();
lobTypList[j] =
(short)(column->getLobStorage() == Lob_Invalid_Storage
? Lob_HDFS_File : column->getLobStorage());
// lobTypList[j] = (short)
// CmpCommon::getDefaultNumeric(LOB_STORAGE_TYPE);
char * loc = new (STMTHEAP) char[1024];
const char* f = ActiveSchemaDB()->getDefaults().
getValue(LOB_STORAGE_FILE_DIR);
strcpy(loc, f);
lobLocList[j] = loc;
j++;
}
}
Int64 lobMaxSize = CmpCommon::getDefaultNumeric(LOB_MAX_SIZE)*1024*1024;
if (j > 0)
{
//if the table is a volatile table return an error
if (createTableNode->isVolatile())
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_LOB_COLUMN_IN_VOLATILE_TABLE)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComString newSchName = "\"";
newSchName += catalogNamePart;
newSchName.append("\".\"");
newSchName.append(schemaNamePart);
newSchName += "\"";
Lng32 rc = SQL_EXEC_LOBddlInterface((char*)newSchName.data(),
newSchName.length(),
objUID,
j,
LOB_CLI_CREATE,
lobNumList,
lobTypList,
lobLocList,
lobMaxSize);
if (rc < 0)
{
//sss TBD need to retrive the cli diags here.
*CmpCommon::diags() << DgSqlCode(-CAT_CREATE_OBJECT_ERROR)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -2;
}
}
// if not a compound create, update valid def to true.
if (NOT ((createTableNode->getAddConstraintUniqueArray().entries() > 0) ||
(createTableNode->getAddConstraintRIArray().entries() > 0) ||
(createTableNode->getAddConstraintCheckArray().entries() > 0)))
{
cliRC = updateObjectValidDef(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, COM_YES_LIT);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -2;
}
}
if (NOT isCompound)
{
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_MINE_ONLY,
COM_BASE_TABLE_OBJECT);
}
processReturn();
return 0;
}
void CmpSeabaseDDL::createSeabaseTable(
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName,
NABoolean isCompound)
{
NABoolean xnWasStartedHere = FALSE;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return;
short rc =
createSeabaseTable2(cliInterface, createTableNode, currCatName, currSchName,
isCompound);
if ((CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)) &&
(rc < 0))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
if (rc == -2) // cleanup before returning error..
{
ComObjectName tableName(createTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT);
}
return;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
return;
}
void CmpSeabaseDDL::addConstraints(
ComObjectName &tableName,
ComAnsiNamePart &currCatAnsiName,
ComAnsiNamePart &currSchAnsiName,
StmtDDLAddConstraintPK * pkConstr,
StmtDDLAddConstraintUniqueArray &uniqueConstrArr,
StmtDDLAddConstraintRIArray &riConstrArr,
StmtDDLAddConstraintCheckArray &checkConstrArr,
NABoolean isCompound)
{
Lng32 cliRC = 0;
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
char buf[5000];
if (pkConstr)
{
StmtDDLAddConstraintUnique *uniqConstr = pkConstr;
NAString uniqueName;
genUniqueName(uniqConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
ElemDDLConstraintUnique *constraintNode =
( uniqConstr->getConstraint() )->castToElemDDLConstraintUnique();
ElemDDLColRefArray &keyColumnArray = constraintNode->getKeyColumnArray();
NAString keyColNameStr;
for (CollIndex i = 0; i < keyColumnArray.entries(); i++)
{
keyColNameStr += "\"";
keyColNameStr += keyColumnArray[i]->getColumnName();
keyColNameStr += "\"";
if (keyColumnArray[i]->getColumnOrdering() == COM_DESCENDING_ORDER)
keyColNameStr += "DESC";
else
keyColNameStr += "ASC";
if (i+1 < keyColumnArray.entries())
keyColNameStr += ", ";
}
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" unique (%s)",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
keyColNameStr.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_return;
}
}
if (uniqueConstrArr.entries() > 0)
{
for (Lng32 i = 0; i < uniqueConstrArr.entries(); i++)
{
StmtDDLAddConstraintUnique *uniqConstr =
uniqueConstrArr[i];
NAString uniqueName;
genUniqueName(uniqConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
ElemDDLConstraintUnique *constraintNode =
( uniqConstr->getConstraint() )->castToElemDDLConstraintUnique();
ElemDDLColRefArray &keyColumnArray = constraintNode->getKeyColumnArray();
NAString keyColNameStr;
for (CollIndex i = 0; i < keyColumnArray.entries(); i++)
{
keyColNameStr += "\"";
keyColNameStr += keyColumnArray[i]->getColumnName();
keyColNameStr += "\"";
if (keyColumnArray[i]->getColumnOrdering() == COM_DESCENDING_ORDER)
keyColNameStr += "DESC";
else
keyColNameStr += "ASC";
if (i+1 < keyColumnArray.entries())
keyColNameStr += ", ";
}
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" unique (%s)",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
keyColNameStr.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_return;
}
} // for
} // if
if (riConstrArr.entries() > 0)
{
for (Lng32 i = 0; i < riConstrArr.entries(); i++)
{
StmtDDLAddConstraintRI *refConstr = riConstrArr[i];
ComObjectName refdTableName(refConstr->getReferencedTableName(), COM_TABLE_NAME);
refdTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString refdCatNamePart = refdTableName.getCatalogNamePartAsAnsiString();
const NAString refdSchNamePart = refdTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString refdObjNamePart = refdTableName.getObjectNamePartAsAnsiString(TRUE);
NAString uniqueName;
genUniqueName(refConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
const NAString &addConstrName = constrName.getExternalName();
ElemDDLConstraintRI *constraintNode =
( refConstr->getConstraint() )->castToElemDDLConstraintRI();
ElemDDLColNameArray &ringColumnArray = constraintNode->getReferencingColumns();
NAString ringColNameStr;
for (CollIndex i = 0; i < ringColumnArray.entries(); i++)
{
ringColNameStr += "\"";
ringColNameStr += ringColumnArray[i]->getColumnName();
ringColNameStr += "\"";
if (i+1 < ringColumnArray.entries())
ringColNameStr += ", ";
}
ElemDDLColNameArray &refdColumnArray = constraintNode->getReferencedColumns();
NAString refdColNameStr;
if (refdColumnArray.entries() > 0)
refdColNameStr = "(";
for (CollIndex i = 0; i < refdColumnArray.entries(); i++)
{
refdColNameStr += "\"";
refdColNameStr += refdColumnArray[i]->getColumnName();
refdColNameStr += "\"";
if (i+1 < refdColumnArray.entries())
refdColNameStr += ", ";
}
if (refdColumnArray.entries() > 0)
refdColNameStr += ")";
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" foreign key (%s) references \"%s\".\"%s\".\"%s\" %s %s",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
ringColNameStr.data(),
refdCatNamePart.data(), refdSchNamePart.data(), refdObjNamePart.data(),
(refdColumnArray.entries() > 0 ? refdColNameStr.data() : " "),
(NOT constraintNode->isEnforced() ? " not enforced " : ""));
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
}
if (NOT isCompound)
{
CorrName cn2(refdObjNamePart.data(),
STMTHEAP,
refdSchNamePart.data(),
refdCatNamePart.data());
// remove natable for the table being referenced
ActiveSchemaDB()->getNATableDB()->removeNATable(cn2,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
if (cliRC < 0)
goto label_return;
if (NOT constraintNode->isEnforced())
{
*CmpCommon::diags()
<< DgSqlCode(1313)
<< DgString0(addConstrName);
}
} // for
} // if
if (checkConstrArr.entries() > 0)
{
for (Lng32 i = 0; i < checkConstrArr.entries(); i++)
{
StmtDDLAddConstraintCheck *checkConstr = checkConstrArr[i];
NAString uniqueName;
genUniqueName(checkConstr, uniqueName);
ComObjectName constrName(uniqueName);
constrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString constrCatalogNamePart = constrName.getCatalogNamePartAsAnsiString();
const NAString constrSchemaNamePart = constrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString constrObjectNamePart = constrName.getObjectNamePartAsAnsiString(TRUE);
NAString constrText;
getCheckConstraintText(checkConstr, constrText);
str_sprintf(buf, "alter table \"%s\".\"%s\".\"%s\" add constraint \"%s\".\"%s\".\"%s\" check %s",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
constrCatalogNamePart.data(), constrSchemaNamePart.data(), constrObjectNamePart.data(),
constrText.data()
);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_return;
}
}
}
label_return:
if (NOT isCompound)
{
// remove NATable cache entries for this table
CorrName cn(objectNamePart.data(),
STMTHEAP,
schemaNamePart.data(),
catalogNamePart.data());
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
return;
}
void CmpSeabaseDDL::createSeabaseTableCompound(
StmtDDLCreateTable * createTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ComObjectName tableName(createTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
NABoolean xnWasStartedHere = FALSE;
if ((createTableNode->isVolatile()) &&
((createTableNode->getAddConstraintUniqueArray().entries() > 0) ||
(createTableNode->getAddConstraintRIArray().entries() > 0) ||
(createTableNode->getAddConstraintCheckArray().entries() > 0)))
{
*CmpCommon::diags() << DgSqlCode(-1283);
processReturn();
goto label_error;
}
createSeabaseTable(createTableNode, currCatName, currSchName, TRUE);
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_))
{
return;
}
cliRC = cliInterface.holdAndSetCQD("TRAF_NO_CONSTR_VALIDATION", "ON");
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_error;
}
addConstraints(tableName, currCatAnsiName, currSchAnsiName,
NULL,
createTableNode->getAddConstraintUniqueArray(),
createTableNode->getAddConstraintRIArray(),
createTableNode->getAddConstraintCheckArray(),
TRUE);
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_))
{
if (cliInterface.statusXn() == 0) // xn in progress
{
rollbackXn(&cliInterface);
}
*CmpCommon::diags() << DgSqlCode(-1029)
<< DgTableName(extTableName);
processReturn();
goto label_error;
}
cliRC = cliInterface.restoreCQD("traf_no_constr_validation");
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
goto label_error;
cliRC = updateObjectValidDef(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT, COM_YES_LIT);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
goto label_error;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
{
CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
return;
label_error:
cliRC = cliInterface.restoreCQD("traf_no_constr_validation");
if (NOT createTableNode->isVolatile())
{
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT);
return;
}
}
// RETURN: -1, no need to cleanup. -2, caller need to call cleanup
// 0, all ok.
short CmpSeabaseDDL::dropSeabaseTable2(
ExeCliInterface *cliInterface,
StmtDDLDropTable * dropTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
NAString tabName = (NAString&)dropTableNode->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComObjectName volTabName;
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
if (dropTableNode->isExternal())
{
// Convert the native name to its Trafodion form
tabName = ComConvertNativeNameToTrafName
(tableName.getCatalogNamePartAsAnsiString(),
tableName.getSchemaNamePartAsAnsiString(),
tableName.getObjectNamePartAsAnsiString());
ComObjectName adjustedName(tabName, COM_TABLE_NAME);
tableName = adjustedName;
}
NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
// allowExternalTables: true to allow an NATable entry to be created for an external table
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
bindWA.setAllowExternalTables(TRUE);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return -1;
}
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_USER_CANNOT_DROP_SMD_TABLE)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
NABoolean isVolatile = FALSE;
if ((dropTableNode->isVolatile()) &&
(CmpCommon::context()->sqlSession()->volatileSchemaInUse()))
{
volTabName = tableName;
isVolatile = TRUE;
}
if ((NOT dropTableNode->isVolatile()) &&
(CmpCommon::context()->sqlSession()->volatileSchemaInUse()))
{
// updateVolatileQualifiedName qualifies the object name with a
// volatile catalog and schema name (if a volatile schema exists)
QualifiedName *qn =
CmpCommon::context()->sqlSession()->
updateVolatileQualifiedName
(dropTableNode->getOrigTableNameAsQualifiedName().getObjectName());
// don't believe it is possible to get a null pointer returned
if (qn == NULL)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_DROP_OBJECT)
<< DgTableName(dropTableNode->getOrigTableNameAsQualifiedName().
getQualifiedNameAsAnsiString(TRUE));
deallocEHI(ehi);
processReturn();
return -1;
}
volTabName = qn->getQualifiedNameAsAnsiString();
volTabName.applyDefaults(currCatAnsiName, currSchAnsiName);
NAString vtCatNamePart = volTabName.getCatalogNamePartAsAnsiString();
NAString vtSchNamePart = volTabName.getSchemaNamePartAsAnsiString(TRUE);
NAString vtObjNamePart = volTabName.getObjectNamePartAsAnsiString(TRUE);
retcode = existsInSeabaseMDTable(cliInterface,
vtCatNamePart, vtSchNamePart, vtObjNamePart,
COM_BASE_TABLE_OBJECT);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (retcode == 1)
{
// table found in volatile schema
// Validate volatile table name.
if (CmpCommon::context()->sqlSession()->
validateVolatileQualifiedName
(dropTableNode->getOrigTableNameAsQualifiedName()))
{
// Valid volatile table. Drop it.
tabName = volTabName.getExternalName(TRUE);
catalogNamePart = vtCatNamePart;
schemaNamePart = vtSchNamePart;
objectNamePart = vtObjNamePart;
isVolatile = TRUE;
}
else
{
// volatile table found but the name is not a valid
// volatile name. Look for the input name in the regular
// schema.
// But first clear the diags area.
CmpCommon::diags()->clear();
}
}
else
{
CmpCommon::diags()->clear();
}
}
retcode = existsInSeabaseMDTable(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (retcode == 0) // does not exist
{
if (NOT dropTableNode->dropIfExists())
{
CmpCommon::diags()->clear();
if (isVolatile)
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(objectNamePart);
else
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(extTableName);
}
deallocEHI(ehi);
processReturn();
return -1;
}
// if this table does not exist in hbase but exists in metadata, return error.
// This is an internal inconsistency which needs to be fixed by running cleanup.
// If this is an external (native HIVE or HBASE) table, then skip
if (!isSeabaseExternalSchema(catalogNamePart, schemaNamePart))
{
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
if ((NOT isVolatile)&& (ehi->exists(hbaseTable) == 0)) // does not exist in hbase
{
*CmpCommon::diags() << DgSqlCode(-4254)
<< DgString0(extTableName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
// Check to see if the user has the authority to drop the table
ComObjectName verifyName;
if (isVolatile)
verifyName = volTabName;
else
verifyName = tableName;
// save the current parserflags setting
ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF);
Set_SqlParser_Flags(ALLOW_VOLATILE_SCHEMA_IN_TABLE_NAME);
CorrName cn(objectNamePart,
STMTHEAP,
schemaNamePart,
catalogNamePart);
NATable *naTable = bindWA.getNATable(cn);
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
// Restore parser flags settings to what they originally were
Set_SqlParser_Flags (savedParserFlags);
if (naTable == NULL || bindWA.errStatus())
{
CmpCommon::diags()->clear();
if (NOT dropTableNode->dropIfExists())
{
CmpCommon::diags()->clear();
if (isVolatile)
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(objectNamePart);
else
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(extTableName);
}
deallocEHI(ehi);
processReturn();
return -1;
}
if ((dropTableNode->isVolatile()) &&
(NOT CmpCommon::context()->sqlSession()->isValidVolatileSchemaName(schemaNamePart)))
{
*CmpCommon::diags() << DgSqlCode(-1279);
deallocEHI(ehi);
processReturn();
return -1;
}
Int64 objUID = getObjectUID(cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
if (objUID < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
// Make sure user has necessary privileges to perform drop
if (!isDDLOperationAuthorized(SQLOperation::DROP_TABLE,
naTable->getOwner(),
naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return -1;
}
Queue * usingViewsQueue = NULL;
if (dropTableNode->getDropBehavior() == COM_RESTRICT_DROP_BEHAVIOR)
{
NAString usingObjName;
cliRC = getUsingObject(cliInterface, objUID, usingObjName);
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (cliRC != 100) // found an object
{
*CmpCommon::diags() << DgSqlCode(-CAT_DEPENDENT_VIEW_EXISTS)
<< DgTableName(usingObjName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
else if (dropTableNode->getDropBehavior() == COM_CASCADE_DROP_BEHAVIOR)
{
cliRC = getUsingViews(cliInterface, objUID, usingViewsQueue);
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
const AbstractRIConstraintList &uniqueList = naTable->getUniqueConstraints();
// return error if cascade is not specified and a referential constraint exists on
// any of the unique constraints.
if (dropTableNode->getDropBehavior() == COM_RESTRICT_DROP_BEHAVIOR)
{
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
AbstractRIConstraint *ariConstr = uniqueList[i];
if (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT)
continue;
UniqueConstraint * uniqConstr = (UniqueConstraint*)ariConstr;
if (uniqConstr->hasRefConstraintsReferencingMe())
{
const ComplementaryRIConstraint * rc = uniqConstr->getRefConstraintReferencingMe(0);
if (rc->getTableName() != naTable->getTableName())
{
const NAString &constrName =
(rc ? rc->getConstraintName().getObjectName() : " ");
*CmpCommon::diags() << DgSqlCode(-1059)
<< DgConstraintName(constrName);
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
}
// Drop referencing objects
char query[4000];
if (usingViewsQueue)
{
usingViewsQueue->position();
for (int idx = 0; idx < usingViewsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)usingViewsQueue->getNext();
char * viewName = vi->get(0);
if (dropOneTableorView(*cliInterface,viewName,COM_VIEW_OBJECT,false))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
// drop all referential constraints referencing me.
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
AbstractRIConstraint *ariConstr = uniqueList[i];
if (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT)
continue;
UniqueConstraint * uniqConstr = (UniqueConstraint*)ariConstr;
// We will only reach here is cascade option is specified.
// drop all constraints referencing me.
if (uniqConstr->hasRefConstraintsReferencingMe())
{
for (Lng32 j = 0; j < uniqConstr->getNumRefConstraintsReferencingMe(); j++)
{
const ComplementaryRIConstraint * rc =
uniqConstr->getRefConstraintReferencingMe(j);
str_sprintf(query, "alter table \"%s\".\"%s\".\"%s\" drop constraint \"%s\".\"%s\".\"%s\"",
rc->getTableName().getCatalogName().data(),
rc->getTableName().getSchemaName().data(),
rc->getTableName().getObjectName().data(),
rc->getConstraintName().getCatalogName().data(),
rc->getConstraintName().getSchemaName().data(),
rc->getConstraintName().getObjectName().data());
cliRC = cliInterface->executeImmediate(query);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -2;
}
} // for
} // if
} // for
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
AbstractRIConstraint *ariConstr = uniqueList[i];
if (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT)
continue;
UniqueConstraint * uniqConstr = (UniqueConstraint*)ariConstr;
const NAString& constrCatName =
uniqConstr->getConstraintName().getCatalogName();
const NAString& constrSchName =
uniqConstr->getConstraintName().getSchemaName();
NAString constrObjName =
(NAString) uniqConstr->getConstraintName().getObjectName();
// Get the constraint UID
Int64 constrUID = -1;
// If the table being dropped is from a metadata schema, setup
// an UniqueConstraint entry for the table being dropped describing its
// primary key. This is temporary until metadata is changed to create
// primary keys with a known name.
if (isSeabasePrivMgrMD(catalogNamePart, schemaNamePart) ||
isSeabaseMD(catalogNamePart, schemaNamePart, objectNamePart))
{
assert (uniqueList.entries() == 1);
assert (uniqueList[0]->getOperatorType() == ITM_UNIQUE_CONSTRAINT);
UniqueConstraint * uniqConstr = (UniqueConstraint*)uniqueList[0];
assert (uniqConstr->isPrimaryKeyConstraint());
NAString adjustedConstrName;
if (getPKeyInfoForTable (catalogNamePart.data(),
schemaNamePart.data(),
objectNamePart.data(),
cliInterface,
constrObjName,
constrUID) == -1)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
// Read the metadata to get the constraint UID
else
{
constrUID = getObjectUID(cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
(uniqConstr->isPrimaryKeyConstraint() ?
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT :
COM_UNIQUE_CONSTRAINT_OBJECT_LIT));
if (constrUID == -1)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
if (deleteConstraintInfoFromSeabaseMDTables(cliInterface,
naTable->objectUid().castToInt64(),
0,
constrUID,
0,
constrCatName,
constrSchName,
constrObjName,
(uniqConstr->isPrimaryKeyConstraint() ?
COM_PRIMARY_KEY_CONSTRAINT_OBJECT :
COM_UNIQUE_CONSTRAINT_OBJECT)))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
// drop all referential constraints from metadata
const AbstractRIConstraintList &refList = naTable->getRefConstraints();
for (Int32 i = 0; i < refList.entries(); i++)
{
AbstractRIConstraint *ariConstr = refList[i];
if (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT)
continue;
RefConstraint * refConstr = (RefConstraint*)ariConstr;
// if self referencing constraint, then it was already dropped as part of
// dropping 'ri constraints referencing me' earlier.
if (refConstr->selfRef())
continue;
const NAString& constrCatName =
refConstr->getConstraintName().getCatalogName();
const NAString& constrSchName =
refConstr->getConstraintName().getSchemaName();
const NAString& constrObjName =
refConstr->getConstraintName().getObjectName();
Int64 constrUID = getObjectUID(cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT);
if (constrUID < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
NATable *otherNaTable = NULL;
CorrName otherCN(refConstr->getUniqueConstraintReferencedByMe().getTableName());
otherNaTable = bindWA.getNATable(otherCN);
if (otherNaTable == NULL || bindWA.errStatus())
{
deallocEHI(ehi);
processReturn();
return -1;
}
AbstractRIConstraint * otherConstr =
refConstr->findConstraint(&bindWA, refConstr->getUniqueConstraintReferencedByMe());
const NAString& otherSchName =
otherConstr->getConstraintName().getSchemaName();
const NAString& otherConstrName =
otherConstr->getConstraintName().getObjectName();
Int64 otherConstrUID = getObjectUID(cliInterface,
constrCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_UNIQUE_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
CmpCommon::diags()->clear();
otherConstrUID = getObjectUID(cliInterface,
constrCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
if (deleteConstraintInfoFromSeabaseMDTables(cliInterface,
naTable->objectUid().castToInt64(),
otherNaTable->objectUid().castToInt64(),
constrUID,
otherConstrUID,
constrCatName,
constrSchName,
constrObjName,
COM_REFERENTIAL_CONSTRAINT_OBJECT))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
// drop all check constraints from metadata if 'no check' is not specified.
if (NOT (dropTableNode->getDropBehavior() == COM_NO_CHECK_DROP_BEHAVIOR))
{
const CheckConstraintList &checkList = naTable->getCheckConstraints();
for (Int32 i = 0; i < checkList.entries(); i++)
{
CheckConstraint *checkConstr = checkList[i];
const NAString& constrCatName =
checkConstr->getConstraintName().getCatalogName();
const NAString& constrSchName =
checkConstr->getConstraintName().getSchemaName();
const NAString& constrObjName =
checkConstr->getConstraintName().getObjectName();
Int64 constrUID = getObjectUID(cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
COM_CHECK_CONSTRAINT_OBJECT_LIT);
if (constrUID < 0)
{
deallocEHI(ehi);
processReturn();
return -1;
}
if (deleteConstraintInfoFromSeabaseMDTables(cliInterface,
naTable->objectUid().castToInt64(),
0,
constrUID,
0,
constrCatName,
constrSchName,
constrObjName,
COM_CHECK_CONSTRAINT_OBJECT))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
const NAFileSetList &indexList = naTable->getIndexList();
// first drop all index objects from metadata.
Queue * indexInfoQueue = NULL;
if (getAllIndexes(cliInterface, objUID, TRUE, indexInfoQueue))
{
deallocEHI(ehi);
processReturn();
return -1;
}
SQL_QIKEY *qiKeys = new (STMTHEAP) SQL_QIKEY[indexInfoQueue->numEntries()];
indexInfoQueue->position();
for (int idx = 0; idx < indexInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)indexInfoQueue->getNext();
NAString idxCatName = (char*)vi->get(0);
NAString idxSchName = (char*)vi->get(1);
NAString idxObjName = (char*)vi->get(2);
// set up a qiKey for this index, later we will removed the
// index cache entry from concurrent processes
Int64 objUID = *(Int64*)vi->get(3);
qiKeys[idx].ddlObjectUID = objUID;
qiKeys[idx].operation[0] = 'O';
qiKeys[idx].operation[1] = 'R';
NAString qCatName = "\"" + idxCatName + "\"";
NAString qSchName = "\"" + idxSchName + "\"";
NAString qObjName = "\"" + idxObjName + "\"";
ComObjectName coName(qCatName, qSchName, qObjName);
NAString ansiName = coName.getExternalName(TRUE);
if (dropSeabaseObject(ehi, ansiName,
idxCatName, idxSchName, COM_INDEX_OBJECT, TRUE, FALSE))
{
NADELETEBASIC (qiKeys, STMTHEAP);
deallocEHI(ehi);
processReturn();
return -1;
}
} // for
// Remove index entries from other processes cache
// Fix for bug 1396774 & bug 1396746
if (indexInfoQueue->numEntries() > 0)
SQL_EXEC_SetSecInvalidKeys(indexInfoQueue->numEntries(), qiKeys);
NADELETEBASIC (qiKeys, STMTHEAP);
// if there is an identity column, drop sequence corresponding to it.
NABoolean found = FALSE;
Lng32 idPos = 0;
NAColumn *col = NULL;
while ((NOT found) && (idPos < naTable->getColumnCount()))
{
col = naTable->getNAColumnArray()[idPos];
if (col->isIdentityColumn())
{
found = TRUE;
continue;
}
idPos++;
}
if (found)
{
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, objectNamePart, col->getColName(),
seqName);
char buf[4000];
str_sprintf(buf, "drop sequence %s.\"%s\".\"%s\"",
catalogNamePart.data(), schemaNamePart.data(), seqName.data());
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
deallocEHI(ehi);
processReturn();
return -1;
}
}
// drop SB_HISTOGRAMS and SB_HISTOGRAM_INTERVALS entries, if any
// if the table that we are dropping itself is not a SB_HISTOGRAMS or SB_HISTOGRAM_INTERVALS table
// TBD: need to change once we start updating statistics for external
// tables
if (! (tableName.isExternalHive() || tableName.isExternalHbase()) )
{
if (objectNamePart != "SB_HISTOGRAMS" &&
objectNamePart != "SB_HISTOGRAM_INTERVALS")
{
if (dropSeabaseStats(cliInterface,
catalogNamePart.data(),
schemaNamePart.data(),
objUID))
{
deallocEHI(ehi);
processReturn();
return -1;
}
}
}
// if metadata drop succeeds, drop indexes from hbase.
indexInfoQueue->position();
for (int idx = 0; idx < indexInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)indexInfoQueue->getNext();
NAString idxCatName = (char*)vi->get(0);
NAString idxSchName = (char*)vi->get(1);
NAString idxObjName = (char*)vi->get(2);
NAString qCatName = "\"" + idxCatName + "\"";
NAString qSchName = "\"" + idxSchName + "\"";
NAString qObjName = "\"" + idxObjName + "\"";
ComObjectName coName(qCatName, qSchName, qObjName);
NAString ansiName = coName.getExternalName(TRUE);
if (dropSeabaseObject(ehi, ansiName,
idxCatName, idxSchName, COM_INDEX_OBJECT, FALSE, TRUE))
{
deallocEHI(ehi);
processReturn();
return -2;
}
CorrName cni(qObjName, STMTHEAP, qSchName, qCatName);
ActiveSchemaDB()->getNATableDB()->removeNATable(cni,
NATableDB::REMOVE_FROM_ALL_USERS, COM_INDEX_OBJECT);
cni.setSpecialType(ExtendedQualName::INDEX_TABLE);
ActiveSchemaDB()->getNATableDB()->removeNATable(cni,
NATableDB::REMOVE_MINE_ONLY, COM_INDEX_OBJECT);
} // for
// If blob/clob columns are present, drop all the dependent files.
Lng32 numCols = nacolArr.entries();
// if this table has lob columns, drop the lob files
short *lobNumList = new (STMTHEAP) short[numCols];
short *lobTypList = new (STMTHEAP) short[numCols];
char **lobLocList = new (STMTHEAP) char*[numCols];
Lng32 j = 0;
for (Int32 i = 0; i < nacolArr.entries(); i++)
{
NAColumn *naColumn = nacolArr[i];
Lng32 datatype = naColumn->getType()->getFSDatatype();
if ((datatype == REC_BLOB) ||
(datatype == REC_CLOB))
{
lobNumList[j] = i; //column->getColumnNumber();
lobTypList[j] =
(short)(naColumn->lobStorageType() == Lob_Invalid_Storage
? Lob_HDFS_File : naColumn->lobStorageType());
// lobTypList[j] = (short)
// CmpCommon::getDefaultNumeric(LOB_STORAGE_TYPE);
char * loc = new (STMTHEAP) char[1024];
const char* f = ActiveSchemaDB()->getDefaults().
getValue(LOB_STORAGE_FILE_DIR);
strcpy(loc, f);
lobLocList[j] = loc;
j++;
}
}
if (j > 0)
{
Int64 objUID = getObjectUID(cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComString newSchName = "\"";
newSchName += catalogNamePart;
newSchName.append("\".\"");
newSchName.append(schemaNamePart);
newSchName += "\"";
Lng32 rc = SQL_EXEC_LOBddlInterface((char*)newSchName.data(),
newSchName.length(),
objUID,
j,
LOB_CLI_DROP,
lobNumList,
lobTypList,
lobLocList,0);
if (rc < 0)
{
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_DROP_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return -2;
}
}
//Finally drop the table
if (dropSeabaseObject(ehi, tabName,
currCatName, currSchName, COM_BASE_TABLE_OBJECT))
{
deallocEHI(ehi);
processReturn();
return -2;
}
deallocEHI(ehi);
processReturn();
CorrName cn2(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable(cn2,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
for (Int32 i = 0; i < refList.entries(); i++)
{
AbstractRIConstraint *ariConstr = refList[i];
if (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT)
continue;
RefConstraint * refConstr = (RefConstraint*)ariConstr;
CorrName otherCN(refConstr->getUniqueConstraintReferencedByMe().getTableName());
ActiveSchemaDB()->getNATableDB()->removeNATable(otherCN,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
}
for (Int32 i = 0; i < uniqueList.entries(); i++)
{
UniqueConstraint * uniqConstr = (UniqueConstraint*)uniqueList[i];
// We will only reach here is cascade option is specified.
// drop all constraints referencing me.
if (uniqConstr->hasRefConstraintsReferencingMe())
{
for (Lng32 j = 0; j < uniqConstr->getNumRefConstraintsReferencingMe(); j++)
{
const ComplementaryRIConstraint * rc =
uniqConstr->getRefConstraintReferencingMe(j);
// remove this ref constr entry from natable cache
CorrName cnr(rc->getTableName().getObjectName().data(), STMTHEAP,
rc->getTableName().getSchemaName().data(),
rc->getTableName().getCatalogName().data());
ActiveSchemaDB()->getNATableDB()->removeNATable(cnr,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
} // for
} // if
} // for
return 0;
}
void CmpSeabaseDDL::dropSeabaseTable(
StmtDDLDropTable * dropTableNode,
NAString &currCatName, NAString &currSchName)
{
NABoolean xnWasStartedHere = FALSE;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return;
short rc =
dropSeabaseTable2(&cliInterface, dropTableNode, currCatName, currSchName);
if ((CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)) &&
(rc < 0))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
if (rc == -2) // cleanup before returning error..
{
ComObjectName tableName(dropTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT);
}
return;
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
return;
}
void CmpSeabaseDDL::renameSeabaseTable(
StmtDDLAlterTableRename * renameTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ComObjectName tableName(renameTableNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ComObjectName newTableName(renameTableNode->getNewNameAsAnsiString());
// newTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
newTableName.applyDefaults(catalogNamePart, schemaNamePart);
const NAString newObjectNamePart = newTableName.getObjectNamePartAsAnsiString(TRUE);
const NAString newExtTableName = newTableName.getExternalName(TRUE);
const NAString newExtNameForHbase = catalogNamePart + "." + schemaNamePart + "." + newObjectNamePart;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CREATE_TABLE_NOT_ALLOWED_IN_SMD)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
if (CmpCommon::context()->sqlSession()->volatileSchemaInUse())
{
QualifiedName *qn =
CmpCommon::context()->sqlSession()->
updateVolatileQualifiedName
(renameTableNode->getTableNameAsQualifiedName().getObjectName());
if (qn == NULL)
{
*CmpCommon::diags()
<< DgSqlCode(-1427);
processReturn();
return;
}
ComObjectName volTabName (qn->getQualifiedNameAsAnsiString());
volTabName.applyDefaults(currCatAnsiName, currSchAnsiName);
NAString vtCatNamePart = volTabName.getCatalogNamePartAsAnsiString();
NAString vtSchNamePart = volTabName.getSchemaNamePartAsAnsiString(TRUE);
NAString vtObjNamePart = volTabName.getObjectNamePartAsAnsiString(TRUE);
retcode = existsInSeabaseMDTable(&cliInterface,
vtCatNamePart, vtSchNamePart, vtObjNamePart,
COM_BASE_TABLE_OBJECT);
if (retcode < 0)
{
processReturn();
return;
}
if (retcode == 1)
{
// table found in volatile schema. cannot rename it.
*CmpCommon::diags()
<< DgSqlCode(-1427)
<< DgString0("Reason: Operation not allowed on volatile tables.");
processReturn();
return;
}
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(objectNamePart,
STMTHEAP,
schemaNamePart,
catalogNamePart);
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
CmpCommon::diags()->clear();
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(extTableName);
processReturn();
return;
}
// Make sure user has the privilege to perform the rename
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
processReturn ();
return;
}
CorrName newcn(newObjectNamePart,
STMTHEAP,
schemaNamePart,
catalogNamePart);
NATable *newNaTable = bindWA.getNATable(newcn);
if (naTable != NULL && (NOT bindWA.errStatus()))
{
*CmpCommon::diags() << DgSqlCode(-1390)
<< DgString0(newExtTableName);
processReturn();
return;
}
CmpCommon::diags()->clear();
// cannot rename a view
if (naTable->getViewText())
{
*CmpCommon::diags()
<< DgSqlCode(-1427)
<< DgString0("Reason: Operation not allowed on a view.");
processReturn();
return;
}
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
if (objUID < 0)
{
processReturn();
return;
}
// cannot rename if views are using this table
Queue * usingViewsQueue = NULL;
cliRC = getUsingViews(&cliInterface, objUID, usingViewsQueue);
if (cliRC < 0)
{
processReturn();
return;
}
if (usingViewsQueue->numEntries() > 0)
{
*CmpCommon::diags() << DgSqlCode(-1427)
<< DgString0("Reason: Dependent views exist.");
processReturn();
return;
}
cliRC = updateObjectName(&cliInterface,
objUID,
catalogNamePart.data(), schemaNamePart.data(),
newObjectNamePart.data());
if (cliRC < 0)
{
processReturn();
return;
}
// rename the underlying hbase object
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
HbaseStr newHbaseTable;
newHbaseTable.val = (char*)newExtNameForHbase.data();
newHbaseTable.len = newExtNameForHbase.length();
retcode = ehi->copy(hbaseTable, newHbaseTable);
if (retcode < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::copy()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr().data());
deallocEHI(ehi);
processReturn();
return;
}
retcode = dropHbaseTable(ehi, &hbaseTable);
if (retcode < 0)
{
return;
}
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
ActiveSchemaDB()->getNATableDB()->removeNATable(newcn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
return;
}
void CmpSeabaseDDL::alterSeabaseTableHBaseOptions(
StmtDDLAlterTableHBaseOptions * hbaseOptionsNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ComObjectName tableName(hbaseOptionsNode->getTableName());
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
// Disallow this ALTER on system metadata schema objects
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_ALTER_NOT_ALLOWED_IN_SMD)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
// Note: In the rename code (CmpSeabaseDDL::renameSeabaseTable), there
// is logic about here to forbid a rename on a volatile table. There doesn't
// seem to be any reason to forbid changing HBASE_OPTIONS on a volatile
// table (and indeed it appears to work fine), so we don't have this
// 'forbid' logic here.
// Make sure this object exists
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(objectNamePart,
STMTHEAP,
schemaNamePart,
catalogNamePart);
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
CmpCommon::diags()->clear();
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
// Make sure user has the privilege to perform the ALTER
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return;
}
CmpCommon::diags()->clear();
// Get the object UID so we can update the metadata
Int64 objUID = getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
if (objUID < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
// update HBase options in the metadata
ElemDDLHbaseOptions * edhbo = hbaseOptionsNode->getHBaseOptions();
short result = updateHbaseOptionsInMetadata(&cliInterface,objUID,edhbo);
if (result < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
// tell HBase to change the options
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
result = alterHbaseTable(ehi,
&hbaseTable,
naTable->allColFams(),
&(edhbo->getHbaseOptions()));
if (result < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
// invalidate cached NATable info on this table for all users
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
deallocEHI(ehi);
return;
}
/////////////////////////////////////////////////////////////////////
// currTab: table on which column is being added to or dropped from
// newTempTab: temporary table with new definition
// currTempTab: name of table that currTab that will be renamed to
//
// Steps:
// create newTempTab based on currTab and added/dropped column
// insert data into newTempTab from currTab
// rename currTab to currTempTab
// rename newTempTab to currTab
// drop currTempTab
//
/////////////////////////////////////////////////////////////////////
short CmpSeabaseDDL::alignedFormatTableAddDropColumn
(
Int64 objUID,
NABoolean isAdd,
const NAString &catalogNamePart,
const NAString &schemaNamePart,
const NAString &objectNamePart,
char * colName, const NAColumn * nacol)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
NABoolean xnWasStartedHere = FALSE;
Queue * usingViewsQueue = NULL;
cliRC = getUsingViews(&cliInterface, objUID, usingViewsQueue);
if (cliRC < 0)
{
processReturn();
return -1;
}
if (usingViewsQueue->numEntries() > 0)
{
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return -1;
}
NAList<NAString> viewNameList(STMTHEAP, usingViewsQueue->numEntries());
NAList<NAString> viewDefnList(STMTHEAP, usingViewsQueue->numEntries());
// create temp table based on the current table DDL and added/dropped column.
// add/drop col name is colName.
// Added col defn is contained in nacol.
NAString newTempTab;
ComDeriveRandomInternalName ( ComGetNameInterfaceCharSet(),
objectNamePart, newTempTab, STMTHEAP);
char newTempTabStr[1000];
str_sprintf(newTempTabStr, "%s.\"%s\".%s",
catalogNamePart.data(), schemaNamePart.data(), newTempTab.data());
CorrName newTempTabCN(newTempTab,
STMTHEAP, schemaNamePart, catalogNamePart);
// current table and curr temp table
char currTabStr[1000];
str_sprintf(currTabStr, "%s.\"%s\".%s",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data());
CorrName currTabCN(objectNamePart, STMTHEAP,
schemaNamePart, catalogNamePart);
NAString currTempTab;
ComDeriveRandomInternalName ( ComGetNameInterfaceCharSet(),
objectNamePart, currTempTab, STMTHEAP);
char currTempTabStr[1000];
str_sprintf(currTempTabStr, "%s.\"%s\".%s",
catalogNamePart.data(), schemaNamePart.data(), currTempTab.data());
// create DDL for newTempTab
char * buf = NULL;
ULng32 buflen = 0;
retcode = CmpDescribeSeabaseTable(currTabCN, 3/*createlike*/, buf, buflen,
STMTHEAP,
NULL,
FALSE, FALSE, FALSE,
TRUE,
colName, isAdd, nacol);
if (retcode)
return -1;
// find out any views on this table.
// save their definition and drop them.
// they will be recreated before return.
usingViewsQueue->position();
for (int idx = 0; idx < usingViewsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)usingViewsQueue->getNext();
char * viewName = vi->get(0);
viewNameList.insert(viewName);
ComObjectName viewCO(viewName, COM_TABLE_NAME);
const NAString catName = viewCO.getCatalogNamePartAsAnsiString();
const NAString schName = viewCO.getSchemaNamePartAsAnsiString(TRUE);
const NAString objName = viewCO.getObjectNamePartAsAnsiString(TRUE);
Int64 viewUID = getObjectUID(&cliInterface,
catName.data(), schName.data(), objName.data(),
COM_VIEW_OBJECT_LIT);
if (viewUID < 0 )
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
return -1;
}
NAString viewText;
if (getTextFromMD(&cliInterface, viewUID, COM_VIEW_TEXT, 0, viewText))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
return -1;
}
viewDefnList.insert(viewText);
if (dropOneTableorView(cliInterface,viewName,COM_VIEW_OBJECT,false))
{
endXnIfStartedHere(&cliInterface, xnWasStartedHere, -1);
processReturn();
return -1;
}
}
endXnIfStartedHere(&cliInterface, xnWasStartedHere, 0);
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE);
NATable * naTable = NULL;
NAString colNames;
char queryBuf[1000];
NAString query = "create table ";
query += newTempTabStr;
query += " ";
NABoolean done = FALSE;
Lng32 curPos = 0;
while (NOT done)
{
short len = *(short*)&buf[curPos];
NAString frag(&buf[curPos+sizeof(short)],
len - ((buf[curPos+len-1]== '\n') ? 1 : 0));
query += frag;
curPos += ((((len+sizeof(short))-1)/8)+1)*8;
if (curPos >= buflen)
done = TRUE;
}
cliRC = cliInterface.executeImmediate((char*)query.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
// naTable = bindWA.getNATable((isAdd ? currTabCN : newTempTabCN));
naTable = bindWA.getNATable(newTempTabCN);
if (! naTable)
{
goto label_error;
}
// update metadata to change column type to 'A'(added)
if (isAdd)
{
str_sprintf(queryBuf, "update %s.\"%s\".%s set column_class = 'A' where object_uid = %Ld and column_name = '%s' ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
naTable->objectUid().castToInt64(), colName);
cliRC = cliInterface.executeImmediate(queryBuf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
}
// insert data from current table into new temp table.
// query = "upsert using load into ";
query = "insert into ";
query += newTempTabStr;
query += " ";
for (Lng32 i = 0; i < naTable->getNAColumnArray().entries(); i++)
{
const NAColumn *nac = naTable->getNAColumnArray()[i];
if (nac->isSystemColumn())
continue;
if ((isAdd) && (colName == nac->getColName()))
continue;
colNames += nac->getColName();
colNames += ",";
}
// remove last comma
colNames = colNames.strip(NAString::trailing, ',');
query += "(" + colNames + ")";
query += " select ";
query += colNames;
query += " from ";
query += currTabStr;
query += ";";
cliRC = cliInterface.executeImmediate(query.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_error;
}
// rename current table to temp
str_sprintf(queryBuf, "alter table \"%s\".\"%s\".\"%s\" rename to \"%s\" ",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
currTempTab.data());
cliRC = cliInterface.executeImmediate(queryBuf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_error;
}
// rename new temp table to current table
str_sprintf(queryBuf, "alter table \"%s\".\"%s\".\"%s\" rename to \"%s\" ",
catalogNamePart.data(), schemaNamePart.data(), newTempTab.data(),
objectNamePart.data());
cliRC = cliInterface.executeImmediate(queryBuf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_error1;
}
// drop curr temp table
str_sprintf(queryBuf, "drop table \"%s\".\"%s\".\"%s\" ",
catalogNamePart.data(), schemaNamePart.data(), currTempTab.data(),
objectNamePart.data());
cliRC = cliInterface.executeImmediate(queryBuf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
goto label_error0;
}
if (recreateViews(cliInterface, viewNameList, viewDefnList))
{
return -1;
}
return 0;
label_error1:
// rename current temp table to current
str_sprintf(queryBuf, "alter table \"%s\".\"%s\".\"%s\" rename to \"%s\" ",
catalogNamePart.data(), schemaNamePart.data(), currTempTab.data(),
objectNamePart.data());
cliRC = cliInterface.executeImmediate(queryBuf);
label_error:
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, newTempTab,
COM_BASE_TABLE_OBJECT);
recreateViews(cliInterface, viewNameList, viewDefnList);
return -1;
label_error0:
cleanupObjectAfterError(cliInterface,
catalogNamePart, schemaNamePart, currTempTab,
COM_BASE_TABLE_OBJECT);
recreateViews(cliInterface, viewNameList, viewDefnList);
return -1;
}
short CmpSeabaseDDL::recreateViews(ExeCliInterface &cliInterface,
NAList<NAString> &viewNameList,
NAList<NAString> &viewDefnList)
{
Lng32 cliRC = 0;
for (Lng32 i = 0; i < viewDefnList.entries(); i++)
{
cliRC = cliInterface.executeImmediate(viewDefnList[i]);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
}
return 0;
}
void CmpSeabaseDDL::alterSeabaseTableAddColumn(
StmtDDLAlterTableAddColumn * alterAddColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterAddColNode->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->useCache();
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
CmpCommon::diags()->clear();
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return;
}
// Make sure user has the privilege to perform the add column
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
processReturn ();
return;
}
// return an error if trying to add a column to a volatile table
if (naTable->isVolatileTable())
{
*CmpCommon::diags() << DgSqlCode(-CAT_REGULAR_OPERATION_ON_VOLATILE_OBJECT);
processReturn ();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
ElemDDLColDefArray ColDefArray = alterAddColNode->getColDefArray();
ElemDDLColDef *pColDef = ColDefArray[0];
// Do not allow to using a NOT NULL constraint without a default
// clause. Do not allow DEFAULT NULL together with NOT NULL.
if (pColDef->getIsConstraintNotNullSpecified())
{
if (pColDef->getDefaultClauseStatus() != ElemDDLColDef::DEFAULT_CLAUSE_SPEC)
{
*CmpCommon::diags() << DgSqlCode(-CAT_DEFAULT_REQUIRED);
processReturn();
return;
}
ConstValue *pDefVal = (ConstValue *)pColDef->getDefaultValueExpr();
if ((pDefVal) &&
(pDefVal->origOpType() != ITM_CURRENT_USER) &&
(pDefVal->origOpType() != ITM_CURRENT_TIMESTAMP) &&
(pDefVal->origOpType() != ITM_CAST))
{
if (pDefVal->isNull())
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_BE_DEFAULT_NULL_AND_NOT_NULL);
processReturn();
return;
}
}
}
//Do not allow NO DEFAULT
if (pColDef->getDefaultClauseStatus() == ElemDDLColDef::NO_DEFAULT_CLAUSE_SPEC)
{
*CmpCommon::diags() << DgSqlCode(-CAT_DEFAULT_REQUIRED);
processReturn();
return;
}
if (pColDef->getSGOptions())
{
*CmpCommon::diags() << DgSqlCode(-1514);
processReturn();
return;
}
char query[4000];
NAString colFamily;
NAString colName;
Lng32 datatype, length, precision, scale, dt_start, dt_end, nullable, upshifted;
ComColumnClass colClass;
ComColumnDefaultClass defaultClass;
NAString charset, defVal;
NAString heading;
ULng32 hbaseColFlags;
Int64 colFlags;
LobsStorage lobStorage;
if (getColInfo(pColDef,
colFamily,
colName,
naTable->isSQLMXAlignedTable(),
datatype, length, precision, scale, dt_start, dt_end, upshifted, nullable,
charset, colClass, defaultClass, defVal, heading, lobStorage, hbaseColFlags, colFlags))
{
processReturn();
return;
}
if (colFamily.isNull())
{
colFamily = naTable->defaultColFam();
}
NABoolean addFam = FALSE;
NAString trafColFam;
if (colFamily == SEABASE_DEFAULT_COL_FAMILY)
trafColFam = colFamily;
else
{
CollIndex idx = naTable->allColFams().index(colFamily);
if (idx == NULL_COLL_INDEX) // doesnt exist, add it
{
idx = naTable->allColFams().entries();
addFam = TRUE;
}
genTrafColFam(idx, trafColFam);
}
const NAColumn * nacol = nacolArr.getColumn(colName);
if (nacol)
{
// column exists. Error or return, depending on 'if not exists' option.
if (NOT alterAddColNode->addIfNotExists())
{
*CmpCommon::diags() << DgSqlCode(-CAT_DUPLICATE_COLUMNS)
<< DgColumnName(colName);
}
processReturn();
return;
}
// If column is a LOB column , error
if ((datatype == REC_BLOB) || (datatype == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COLUMN_ALTER)
<< DgColumnName(colName);
processReturn();
return;
}
char * col_name = new(STMTHEAP) char[colName.length() + 1];
strcpy(col_name, (char*)colName.data());
ULng32 maxColQual = nacolArr.getMaxTrafHbaseColQualifier();
NAString quotedHeading;
if (NOT heading.isNull())
{
ToQuotedString(quotedHeading, heading, FALSE);
}
NAString quotedDefVal;
if (NOT defVal.isNull())
{
ToQuotedString(quotedDefVal, defVal, FALSE);
}
Int64 objUID = naTable->objectUid().castToInt64();
str_sprintf(query, "insert into %s.\"%s\".%s values (%Ld, '%s', %d, '%s', %d, '%s', %d, %d, %d, %d, %d, '%s', %d, %d, '%s', %d, '%s', '%s', '%s', '%u', '%s', '%s', %Ld )",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
col_name,
naTable->getColumnCount(),
COM_ADDED_USER_COLUMN_LIT,
datatype,
getAnsiTypeStrFromFSType(datatype),
length,
precision,
scale,
dt_start,
dt_end,
(upshifted ? "Y" : "N"),
hbaseColFlags,
nullable,
(char*)charset.data(),
(Lng32)defaultClass,
(quotedDefVal.isNull() ? "" : quotedDefVal.data()),
(quotedHeading.isNull() ? "" : quotedHeading.data()),
trafColFam.data(),
maxColQual+1,
COM_UNKNOWN_PARAM_DIRECTION_LIT,
"N",
colFlags);
cliRC = cliInterface.executeImmediate(query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
// if column family of added col doesnt exist in the table, add it
if (addFam)
{
NAString currColFams;
if (getTextFromMD(&cliInterface, objUID, COM_HBASE_COL_FAMILY_TEXT,
0, currColFams))
{
deallocEHI(ehi);
processReturn();
return;
}
Lng32 cliRC = deleteFromTextTable(&cliInterface, objUID,
COM_HBASE_COL_FAMILY_TEXT, 0);
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
NAString allColFams = currColFams + " " + colFamily;
cliRC = updateTextTable(&cliInterface, objUID,
COM_HBASE_COL_FAMILY_TEXT, 0,
allColFams);
if (cliRC < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-CAT_UNABLE_TO_CREATE_OBJECT)
<< DgTableName(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
HbaseCreateOption hbco("NAME", trafColFam.data());
NAList<HbaseCreateOption*> hbcol;
hbcol.insert(&hbco);
ElemDDLHbaseOptions edhbo(&hbcol, STMTHEAP);
NAList<NAString> nal;
nal.insert(trafColFam);
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
cliRC = alterHbaseTable(ehi,
&hbaseTable,
nal,
&(edhbo.getHbaseOptions()));
if (cliRC < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
}
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
if ((alterAddColNode->getAddConstraintPK()) OR
(alterAddColNode->getAddConstraintCheckArray().entries() NEQ 0) OR
(alterAddColNode->getAddConstraintUniqueArray().entries() NEQ 0) OR
(alterAddColNode->getAddConstraintRIArray().entries() NEQ 0))
{
addConstraints(tableName, currCatAnsiName, currSchAnsiName,
alterAddColNode->getAddConstraintPK(),
alterAddColNode->getAddConstraintUniqueArray(),
alterAddColNode->getAddConstraintRIArray(),
alterAddColNode->getAddConstraintCheckArray());
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_))
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT))
{
processReturn();
return;
}
label_return:
processReturn();
return;
}
void CmpSeabaseDDL::alterSeabaseTableDropColumn(
StmtDDLAlterTableDropColumn * alterDropColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterDropColNode->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->useCache();
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return;
}
// Make sure user has the privilege to perform the drop column
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
processReturn ();
return;
}
// return an error if trying to drop a column from a volatile table
if (naTable->isVolatileTable())
{
*CmpCommon::diags() << DgSqlCode(-CAT_REGULAR_OPERATION_ON_VOLATILE_OBJECT);
processReturn ();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &colName = alterDropColNode->getColName();
const NAColumn * nacol = nacolArr.getColumn(colName);
if (! nacol)
{
// column doesnt exist. Error or return, depending on 'if exists' option.
if (NOT alterDropColNode->dropIfExists())
{
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colName);
}
processReturn();
return;
}
// If column is a LOB column , error
Int32 datatype = nacol->getType()->getFSDatatype();
if ((datatype == REC_BLOB) || (datatype == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COLUMN_ALTER)
<< DgColumnName(colName);
processReturn();
return;
}
const NAFileSet * naFS = naTable->getClusteringIndex();
const NAColumnArray &naKeyColArr = naFS->getIndexKeyColumns();
if (naKeyColArr.getColumn(colName))
{
// key column cannot be dropped
*CmpCommon::diags() << DgSqlCode(-1420)
<< DgColumnName(colName);
processReturn();
return;
}
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
const NAColumnArray &naIndexColArr = naFS->getAllColumns();
if (naIndexColArr.getColumn(colName))
{
// secondary index column cannot be dropped
*CmpCommon::diags() << DgSqlCode(-1421)
<< DgColumnName(colName)
<< DgTableName(naFS->getExtFileSetName());
processReturn();
return;
}
} // for
} // secondary indexes present
if ((naTable->getClusteringIndex()->hasSyskey()) &&
(nacolArr.entries() == 2))
{
// this table has one SYSKEY column and one other column.
// Dropping that column will leave the table with no user column.
// Return an error.
*CmpCommon::diags() << DgSqlCode(-1424)
<< DgColumnName(colName);
}
Int64 objUID = naTable->objectUid().castToInt64();
NABoolean xnWasStartedHere = FALSE;
Lng32 colNumber = nacol->getPosition();
char *col = NULL;
if (naTable->isSQLMXAlignedTable())
{
if (alignedFormatTableAddDropColumn(naTable->objectUid().castToInt64(),
FALSE, // drop col
catalogNamePart, schemaNamePart, objectNamePart,
(char*)colName.data(), NULL))
{
processReturn();
return;
}
}
else
{
if (beginXnIfNotInProgress(&cliInterface, xnWasStartedHere))
return;
char buf[4000];
str_sprintf(buf, "delete from %s.\"%s\".%s where object_uid = %Ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
colNumber);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_return;
}
str_sprintf(buf, "update %s.\"%s\".%s set column_number = column_number - 1 where object_uid = %Ld and column_number >= %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
colNumber);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_return;
}
str_sprintf(buf, "update %s.\"%s\".%s set column_number = column_number - 1 where object_uid = %Ld and column_number >= %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
objUID,
colNumber);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_return;
}
// keys for indexes refer to base table column number.
// modify it so they now refer to new column numbers.
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
const QualifiedName &indexName = naFS->getFileSetName();
str_sprintf(buf, "update %s.\"%s\".%s set column_number = column_number - 1 where column_number >= %d and object_uid = (select object_uid from %s.\"%s\".%s where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' and object_type = 'IX') ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
colNumber,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
indexName.getCatalogName().data(),
indexName.getSchemaName().data(),
indexName.getObjectName().data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_return;
}
} // for
} // secondary indexes present
// remove column from all rows of the base table
HbaseStr hbaseTable;
hbaseTable.val = (char*)extNameForHbase.data();
hbaseTable.len = extNameForHbase.length();
{
NAString column(nacol->getHbaseColFam(), heap_);
column.append(":");
char * colQualPtr = (char*)nacol->getHbaseColQual().data();
Lng32 colQualLen = nacol->getHbaseColQual().length();
Int64 colQval = str_atoi(colQualPtr, colQualLen);
if (colQval <= UCHAR_MAX)
{
unsigned char c = (unsigned char)colQval;
column.append((char*)&c, 1);
}
else if (colQval <= USHRT_MAX)
{
unsigned short s = (unsigned short)colQval;
column.append((char*)&s, 2);
}
else if (colQval <= ULONG_MAX)
{
Lng32 l = (Lng32)colQval;
column.append((char*)&l, 4);
}
else
column.append((char*)&colQval, 8);
HbaseStr colNameStr;
col = (char *) heap_->allocateMemory(column.length() + 1, FALSE);
if (col)
{
memcpy(col, column.data(), column.length());
col[column.length()] = 0;
colNameStr.val = col;
colNameStr.len = column.length();
}
else
{
cliRC = -EXE_NO_MEM_TO_EXEC;
*CmpCommon::diags() << DgSqlCode(-EXE_NO_MEM_TO_EXEC); // error -8571
goto label_return;
}
cliRC = ehi->deleteColumns(hbaseTable, colNameStr);
if (cliRC < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::deleteColumns()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr().data());
goto label_return;
}
}
} // hbase format table
cliRC = updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT);
if (cliRC < 0)
{
goto label_return;
}
label_return:
endXnIfStartedHere(&cliInterface, xnWasStartedHere, cliRC);
deallocEHI(ehi);
heap_->deallocateMemory(col);
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
processReturn();
return;
}
void CmpSeabaseDDL::alterSeabaseTableAlterIdentityColumn(
StmtDDLAlterTableAlterColumnSetSGOption * alterIdentityColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterIdentityColNode->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &colName = alterIdentityColNode->getColumnName();
const NAColumn * nacol = nacolArr.getColumn(colName);
if (! nacol)
{
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colName);
processReturn();
return;
}
if (! nacol->isIdentityColumn())
{
*CmpCommon::diags() << DgSqlCode(-1590)
<< DgColumnName(colName);
processReturn();
return;
}
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catalogNamePart, schemaNamePart, objectNamePart,
alterIdentityColNode->getColumnName(),
seqName);
ElemDDLSGOptions * sgo = alterIdentityColNode->getSGOptions();
NAString options;
if (sgo)
{
char tmpBuf[1000];
if (sgo->isIncrementSpecified())
{
str_sprintf(tmpBuf, " increment by %Ld", sgo->getIncrement());
options += tmpBuf;
}
if (sgo->isMaxValueSpecified())
{
if (sgo->isNoMaxValue())
str_sprintf(tmpBuf, " no maxvalue ", sgo->getMaxValue());
else
str_sprintf(tmpBuf, " maxvalue %Ld", sgo->getMaxValue());
options += tmpBuf;
}
if (sgo->isMinValueSpecified())
{
if (sgo->isNoMinValue())
str_sprintf(tmpBuf, " no maxvalue ", sgo->getMinValue());
else
str_sprintf(tmpBuf, " minvalue %Ld", sgo->getMinValue());
options += tmpBuf;
}
if (sgo->isStartValueSpecified())
{
str_sprintf(tmpBuf, " start with %Ld", sgo->getStartValue());
options += tmpBuf;
}
if (sgo->isCacheSpecified())
{
if (sgo->isNoCache())
str_sprintf(tmpBuf, " no cache ");
else
str_sprintf(tmpBuf, " cache %Ld ", sgo->getCache());
options += tmpBuf;
}
if (sgo->isCycleSpecified())
{
if (sgo->isNoCycle())
str_sprintf(tmpBuf, " no cycle ");
else
str_sprintf(tmpBuf, " cycle ");
options += tmpBuf;
}
if (sgo->isResetSpecified())
{
str_sprintf(tmpBuf, " reset ");
options += tmpBuf;
}
char buf[4000];
str_sprintf(buf, "alter internal sequence %s.\"%s\".\"%s\" %s",
catalogNamePart.data(), schemaNamePart.data(), seqName.data(),
options.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
}
// CorrName cn(objectNamePart, STMTHEAP, schemaNamePart, catalogNamePart);
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
return;
}
void CmpSeabaseDDL::alterSeabaseTableAlterColumnDatatype(
StmtDDLAlterTableAlterColumnDatatype * alterColNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterColNode->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->useCache();
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return;
}
// Make sure user has the privilege to perform the alter column
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
processReturn ();
return;
}
// return an error if trying to alter a column from a volatile table
if (naTable->isVolatileTable())
{
*CmpCommon::diags() << DgSqlCode(-CAT_REGULAR_OPERATION_ON_VOLATILE_OBJECT);
processReturn ();
return;
}
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
const NAString &colName = alterColNode->getColumnName();
const NAColumn * nacol = nacolArr.getColumn(colName);
if (! nacol)
{
// column doesnt exist. Error.
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colName);
processReturn();
return;
}
const NAType * currType = nacol->getType();
NAType * newType = alterColNode->getType();
// Column that can be altered must meet these conditions:
// -- old and new column datatype must be VARCHAR
// -- new col length must be greater than or equal to old length
// -- old and new character sets must be the same
NABoolean canAlter = FALSE;
if ((DFS2REC::isSQLVarChar(currType->getFSDatatype())) &&
(DFS2REC::isSQLVarChar(newType->getFSDatatype())) &&
(currType->getFSDatatype() == newType->getFSDatatype()) &&
(currType->getNominalSize() <= newType->getNominalSize()) &&
(((CharType*)currType)->getCharSet() == ((CharType*)newType)->getCharSet()))
canAlter = TRUE;
if (NOT canAlter)
{
NAString reason;
if (NOT ((DFS2REC::isSQLVarChar(currType->getFSDatatype())) &&
(DFS2REC::isSQLVarChar(newType->getFSDatatype()))))
reason = "Old and New datatypes must be VARCHAR.";
else if (currType->getFSDatatype() != newType->getFSDatatype())
reason = "Old and New datatypes must be the same.";
else if (((CharType*)currType)->getCharSet() != ((CharType*)newType)->getCharSet())
reason = "Old and New character sets must be the same.";
else if (currType->getNominalSize() > newType->getNominalSize())
reason = "New length must be greater than or equal to old length.";
// key column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1404)
<< DgColumnName(colName)
<< DgString0(reason);
processReturn();
return;
}
// If column is a LOB column , error
if ((currType->getFSDatatype() == REC_BLOB) || (currType->getFSDatatype() == REC_CLOB))
{
*CmpCommon::diags() << DgSqlCode(-CAT_LOB_COLUMN_ALTER)
<< DgColumnName(colName);
processReturn();
return;
}
const NAFileSet * naFS = naTable->getClusteringIndex();
const NAColumnArray &naKeyColArr = naFS->getIndexKeyColumns();
if (naKeyColArr.getColumn(colName))
{
// key column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1420)
<< DgColumnName(colName);
processReturn();
return;
}
if (naTable->hasSecondaryIndexes())
{
const NAFileSetList &naFsList = naTable->getIndexList();
for (Lng32 i = 0; i < naFsList.entries(); i++)
{
naFS = naFsList[i];
// skip clustering index
if (naFS->getKeytag() == 0)
continue;
const NAColumnArray &naIndexColArr = naFS->getAllColumns();
if (naIndexColArr.getColumn(colName))
{
// secondary index column cannot be altered
*CmpCommon::diags() << DgSqlCode(-1421)
<< DgColumnName(colName)
<< DgTableName(naFS->getExtFileSetName());
processReturn();
return;
}
} // for
} // secondary indexes present
Int64 objUID = naTable->objectUid().castToInt64();
Lng32 colNumber = nacol->getPosition();
char *col = NULL;
char buf[4000];
str_sprintf(buf, "update %s.\"%s\".%s set column_size = %d where object_uid = %Ld and column_number = %d",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
newType->getNominalSize(),
objUID,
colNumber);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
deallocEHI(ehi);
heap_->deallocateMemory(col);
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
processReturn();
return;
}
void CmpSeabaseDDL::alterSeabaseTableAddPKeyConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterAddConstraint->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
// Make sure user has the privilege to perform the add pk constraint
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return;
}
ElemDDLColRefArray &keyColumnArray = alterAddConstraint->getConstraint()->castToElemDDLConstraintPK()->getKeyColumnArray();
NAList<NAString> keyColList(HEAP, keyColumnArray.entries());
NAString pkeyStr("(");
for (Int32 j = 0; j < keyColumnArray.entries(); j++)
{
const NAString &colName = keyColumnArray[j]->getColumnName();
keyColList.insert(colName);
pkeyStr += colName;
if (j < (keyColumnArray.entries() - 1))
pkeyStr += ", ";
}
pkeyStr += ")";
if (constraintErrorChecks(&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintUnique(),
naTable,
COM_UNIQUE_CONSTRAINT, //TRUE,
keyColList))
{
return;
}
// update unique key constraint info
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
// if table doesnt have a user defined primary key, is empty and doesn't have any
// dependent objects (index, views, triggers, RI, etc), then drop it and recreate it with
// this new primary key.
// Do this optimization in mode_special_4 only.
Lng32 len = 0;
Lng32 rowCount = 0;
NABoolean ms4 = FALSE;
if (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON)
{
ms4 = TRUE;
char query[2000];
str_sprintf(query, "select [any 1] cast(1 as int not null) from \"%s\".\"%s\".\"%s\" for read committed access",
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data());
cliRC = cliInterface.executeImmediate(query, (char*)&rowCount, &len, NULL);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
}
// if the table is not empty, or there are dependent objects/constraints,
// or the table already has a pkey/store by, then create a unique constraint.
NABoolean isStoreBy = FALSE;
Int32 nonSystemKeyCols = 0;
if (naTable->getClusteringIndex())
{
NAFileSet * naf = naTable->getClusteringIndex();
for (Lng32 i = 0; i < naf->getIndexKeyColumns().entries(); i++)
{
NAColumn * nac = naf->getIndexKeyColumns()[i];
if (NOT nac->isSystemColumn())
nonSystemKeyCols++;
else if (nac->isSyskeyColumn())
isStoreBy = TRUE;
} // for
if (nonSystemKeyCols == 0)
isStoreBy = FALSE;
} // if
if ((rowCount > 0) || // not empty
(NOT ms4) || // not mode_special_4
(naTable->hasSecondaryIndexes()) || // user indexes
(NOT naTable->getClusteringIndex()->hasSyskey()) || // user defined pkey
(isStoreBy) || // user defined store by
(naTable->getUniqueConstraints().entries() > 0) || // unique constraints
(naTable->getRefConstraints().entries() > 0) || // ref constraints
(naTable->getCheckConstraints().entries() > 0))
{
// cannot create clustered primary key constraint.
// create a unique constraint instead.
NAString cliQuery;
cliQuery = "alter table " + extTableName + " add constraint " + uniqueStr
+ " unique " + pkeyStr + ";";
cliRC = cliInterface.executeImmediate((char*)cliQuery.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
return;
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
// empty table. Drop and recreate it with the new primary key.
char * buf = NULL;
ULng32 buflen = 0;
retcode = CmpDescribeSeabaseTable(cn, 3/*createlike*/, buf, buflen, STMTHEAP,
pkeyStr.data(), TRUE);
if (retcode)
return;
NAString cliQuery;
// drop this table.
cliQuery = "drop table ";
cliQuery += extTableName;
cliQuery += " no check;";
cliRC = cliInterface.executeImmediate((char*)cliQuery.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
char cqdbuf[200];
str_sprintf(cqdbuf, "cqd traf_create_table_with_uid '%Ld';",
tableUID);
cliRC = cliInterface.executeImmediate(cqdbuf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
// and recreate it with the new primary key.
cliQuery = "create table ";
cliQuery += extTableName;
cliQuery += " ";
NABoolean done = FALSE;
Lng32 curPos = 0;
while (NOT done)
{
short len = *(short*)&buf[curPos];
NAString frag(&buf[curPos+sizeof(short)],
len - ((buf[curPos+len-1]== '\n') ? 1 : 0));
cliQuery += frag;
curPos += ((((len+sizeof(short))-1)/8)+1)*8;
if (curPos >= buflen)
done = TRUE;
}
cliRC = cliInterface.executeImmediate((char*)cliQuery.data());
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
}
str_sprintf(cqdbuf, "cqd traf_create_table_with_uid '' ;");
cliInterface.executeImmediate(cqdbuf);
if (cliRC < 0)
{
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT))
{
processReturn();
deallocEHI(ehi);
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
return;
}
void CmpSeabaseDDL::alterSeabaseTableAddUniqueConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterAddConstraint->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
// Make sure user has the privilege to perform the create unique constraint
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return;
}
ElemDDLColRefArray &keyColumnArray = alterAddConstraint->getConstraint()->castToElemDDLConstraintUnique()->getKeyColumnArray();
NAList<NAString> keyColList(HEAP, keyColumnArray.entries());
NAList<NAString> keyColOrderList(HEAP, keyColumnArray.entries());
for (Int32 j = 0; j < keyColumnArray.entries(); j++)
{
const NAString &colName = keyColumnArray[j]->getColumnName();
keyColList.insert(colName);
if (keyColumnArray[j]->getColumnOrdering() == COM_DESCENDING_ORDER)
keyColOrderList.insert("DESC");
else
keyColOrderList.insert("ASC");
}
if (constraintErrorChecks(
&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintUnique(),
naTable,
COM_UNIQUE_CONSTRAINT,
keyColList))
{
return;
}
// update unique key constraint info
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComUID comUID;
comUID.make_UID();
Int64 uniqueUID = comUID.get_value();
if (updateConstraintMD(keyColList, keyColOrderList, uniqueStr, tableUID, uniqueUID,
naTable, COM_UNIQUE_CONSTRAINT, TRUE, &cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1043)
<< DgTableName(uniqueStr);
return;
}
NAList<NAString> emptyKeyColList;
if (updateIndexInfo(keyColList,
keyColOrderList,
emptyKeyColList,
uniqueStr,
uniqueUID,
catalogNamePart, schemaNamePart, objectNamePart,
naTable,
TRUE,
(CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_ON),
FALSE,
&cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT))
{
processReturn();
deallocEHI(ehi);
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
return;
}
// returns 1 if referenced table refdTable has a dependency on the
// original referencing table origRingTable.
// return 0, if it does not.
// return -1, if error.
short CmpSeabaseDDL::isCircularDependent(CorrName &ringTable,
CorrName &refdTable,
CorrName &origRingTable,
BindWA *bindWA)
{
// get natable for the referenced table.
NATable *naTable = bindWA->getNATable(refdTable);
if (naTable == NULL || bindWA->errStatus())
{
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(naTable->getTableName().getQualifiedNameAsString());
processReturn();
return -1;
}
// find all the tables the refdTable depends on.
const AbstractRIConstraintList &refList = naTable->getRefConstraints();
for (Int32 i = 0; i < refList.entries(); i++)
{
AbstractRIConstraint *ariConstr = refList[i];
if (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT)
continue;
RefConstraint * refConstr = (RefConstraint*)ariConstr;
if (refConstr->selfRef())
continue;
CorrName cn(refConstr->getUniqueConstraintReferencedByMe().getTableName());
if (cn == origRingTable)
{
return 1; // dependency exists
}
short rc = isCircularDependent(cn, cn,
origRingTable, bindWA);
if (rc)
return rc;
} // for
return 0;
}
void CmpSeabaseDDL::alterSeabaseTableAddRIConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterAddConstraint->getTableName();
ComObjectName referencingTableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
referencingTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = referencingTableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = referencingTableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = referencingTableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = referencingTableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
if ((isSeabaseReservedSchema(referencingTableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(referencingTableName.getObjectNamePart().getInternalName(),
STMTHEAP,
referencingTableName.getSchemaNamePart().getInternalName(),
referencingTableName.getCatalogNamePart().getInternalName());
NATable *ringNaTable = bindWA.getNATable(cn);
if (ringNaTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
// Make sure user has the privilege to perform the add RI constraint
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
ringNaTable->getOwner(),ringNaTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return;
}
const ElemDDLConstraintRI *constraintNode =
alterAddConstraint->getConstraint()->castToElemDDLConstraintRI();
ComObjectName referencedTableName( constraintNode->getReferencedTableName()
, COM_TABLE_NAME);
referencedTableName.applyDefaults(currCatAnsiName, currSchAnsiName);
if ((isSeabaseReservedSchema(referencedTableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
referencedTableName.getCatalogNamePart().getInternalName(),
referencedTableName.getSchemaNamePart().getInternalName(),
referencedTableName.getObjectNamePart().getInternalName(),
COM_BASE_TABLE_OBJECT,
TRUE);
if (retcode < 0)
{
processReturn();
return;
}
CorrName cn2(referencedTableName.getObjectNamePart().getInternalName(),
STMTHEAP,
referencedTableName.getSchemaNamePart().getInternalName(),
referencedTableName.getCatalogNamePart().getInternalName());
NATable *refdNaTable = bindWA.getNATable(cn2);
if (refdNaTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn2.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
if (refdNaTable->getViewText())
{
*CmpCommon::diags()
<< DgSqlCode(-1127)
<< DgTableName(cn2.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
// If the referenced and referencing tables are the same,
// reject the request. At this time, we do not allow self
// referencing constraints.
if ((CmpCommon::getDefault(TRAF_ALLOW_SELF_REF_CONSTR) == DF_OFF) &&
(referencingTableName == referencedTableName))
{
*CmpCommon::diags() << DgSqlCode(-CAT_SELF_REFERENCING_CONSTRAINT);
processReturn();
return;
}
// User must have REFERENCES privilege on the referenced table
if (isAuthorizationEnabled())
{
PrivMgrUserPrivs* privs = refdNaTable->getPrivInfo();
if (privs == NULL)
{
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_RETRIEVE_PRIVS);
deallocEHI(ehi);
processReturn();
return;
}
if (!ComUser::isRootUserID() && !privs->hasReferencePriv())
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn();
return;
}
}
ElemDDLColNameArray &ringCols = alterAddConstraint->getConstraint()->castToElemDDLConstraintRI()->getReferencingColumns();
NAList<NAString> ringKeyColList(HEAP, ringCols.entries());
NAList<NAString> ringKeyColOrderList(HEAP, ringCols.entries());
NAString ringColListForValidation;
NAString ringNullList;
for (Int32 j = 0; j < ringCols.entries(); j++)
{
const NAString &colName = ringCols[j]->getColumnName();
ringKeyColList.insert(colName);
ringKeyColOrderList.insert("ASC");
ringColListForValidation += "\"";
ringColListForValidation += colName;
ringColListForValidation += "\"";
if (j < (ringCols.entries() - 1))
ringColListForValidation += ", ";
ringNullList += "and ";
ringNullList += "\"";
ringNullList += colName;
ringNullList += "\"";
ringNullList += " is not null ";
}
if (constraintErrorChecks(&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintRI(),
ringNaTable,
COM_FOREIGN_KEY_CONSTRAINT, //FALSE, // referencing constr
ringKeyColList))
{
return;
}
const NAString &addConstrName = alterAddConstraint->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
// Compare the referenced column list against the primary and unique
// constraint defined for the referenced table. The referenced
// column list must match one of these constraints. Note that if there
// was no referenced column list specified, the primary key is used
// and a match has automatically been found.
const ElemDDLColNameArray &referencedColNode =
constraintNode->getReferencedColumns();
NAList<NAString> refdKeyColList(HEAP, referencedColNode.entries());
NAString refdColListForValidation;
for (Int32 j = 0; j < referencedColNode.entries(); j++)
{
const NAString &colName = referencedColNode[j]->getColumnName();
refdKeyColList.insert(colName);
refdColListForValidation += "\"";
refdColListForValidation += colName;
refdColListForValidation += "\"";
if (j < (referencedColNode.entries() - 1))
refdColListForValidation += ", ";
}
if (referencedColNode.entries() == 0)
{
NAFileSet * naf = refdNaTable->getClusteringIndex();
for (Lng32 i = 0; i < naf->getIndexKeyColumns().entries(); i++)
{
NAColumn * nac = naf->getIndexKeyColumns()[i];
if (nac->isComputedColumnAlways() &&
nac->isSystemColumn())
// always computed system columns in the key are redundant,
// don't include them (also don't include them in the DDL)
continue;
const NAString &colName = nac->getColName();
refdKeyColList.insert(colName);
refdColListForValidation += "\"";
refdColListForValidation += nac->getColName();
refdColListForValidation += "\"";
if (i < (naf->getIndexKeyColumns().entries() - 1))
refdColListForValidation += ", ";
}
}
if (ringKeyColList.entries() != refdKeyColList.entries())
{
*CmpCommon::diags()
<< DgSqlCode(-1046)
<< DgConstraintName(addConstrName);
processReturn();
return;
}
const NAColumnArray &ringNACarr = ringNaTable->getNAColumnArray();
const NAColumnArray &refdNACarr = refdNaTable->getNAColumnArray();
for (Int32 i = 0; i < ringKeyColList.entries(); i++)
{
const NAString &ringColName = ringKeyColList[i];
const NAString &refdColName = refdKeyColList[i];
const NAColumn * ringNAC = ringNACarr.getColumn(ringColName);
const NAColumn * refdNAC = refdNACarr.getColumn(refdColName);
if (! refdNAC)
{
*CmpCommon::diags() << DgSqlCode(-1009)
<< DgColumnName(refdColName);
processReturn();
return;
}
if (NOT (ringNAC->getType()->equalIgnoreNull(*refdNAC->getType())))
{
*CmpCommon::diags()
<< DgSqlCode(-1046)
<< DgConstraintName(addConstrName);
processReturn();
return;
}
}
// method getCorrespondingConstraint expects an empty input list if there are no
// user specified columns. Clear the refdKeyColList before calling it.
if (referencedColNode.entries() == 0)
{
refdKeyColList.clear();
}
NAString constrName;
NABoolean isPkey = FALSE;
NAList<int> reorderList;
// Find a uniqueness constraint on the referenced table that matches
// the referenced column list (not necessarily in the original order
// of columns). Also find out how to reorder the column lists to
// match the found uniqueness constraint. This is the order in
// which we'll add the columns to the metadata (KEYS table). Note
// that SHOWDDL may therefore show the foreign key columns in a
// different order. This is a limitation of the current way we
// store RI constraints in the metadata.
if (NOT refdNaTable->getCorrespondingConstraint(refdKeyColList,
TRUE, // unique constraint
&constrName,
&isPkey,
&reorderList))
{
*CmpCommon::diags() << DgSqlCode(-CAT_REFERENCED_CONSTRAINT_DOES_NOT_EXIST)
<< DgConstraintName(addConstrName);
return;
}
if (reorderList.entries() > 0)
{
CollIndex numEntries = ringKeyColList.entries();
CMPASSERT(ringKeyColOrderList.entries() == numEntries &&
refdKeyColList.entries() == numEntries &&
reorderList.entries() == numEntries);
// re-order referencing and referenced key column lists to match
// the order of the uniqueness constraint in the referenced table
NAArray<NAString> ringTempKeyColArray(numEntries);
NAArray<NAString> ringTempKeyColOrderArray(numEntries);
NAArray<NAString> refdTempKeyColArray(numEntries);
// copy the lists into temp arrays in the correct order
for (CollIndex i=0; i<numEntries; i++)
{
CollIndex newEntry = static_cast<CollIndex>(reorderList[i]);
ringTempKeyColArray.insertAt(newEntry, ringKeyColList[i]);
ringTempKeyColOrderArray.insertAt(newEntry, ringKeyColOrderList[i]);
refdTempKeyColArray.insertAt(newEntry, refdKeyColList[i]);
}
// copy back into the lists (this will assert if we have any holes in the array)
for (CollIndex j=0; j<numEntries; j++)
{
ringKeyColList[j] = ringTempKeyColArray[j];
ringKeyColOrderList[j] = ringTempKeyColOrderArray[j];
refdKeyColList[j] = refdTempKeyColArray[j];
}
} // reorder the lists if needed
// check for circular RI dependencies.
// check if referenced table cn2 refers back to the referencing table cn.
retcode = isCircularDependent(cn, cn2, cn, &bindWA);
if (retcode == 1) // dependency exists
{
*CmpCommon::diags() << DgSqlCode(-CAT_RI_CIRCULAR_DEPENDENCY)
<< DgConstraintName(addConstrName)
<< DgTableName(cn.getExposedNameAsAnsiString());
return;
}
else if (retcode < 0)
{
// error. Diags area has been populated
return;
}
if ((CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_OFF) &&
(constraintNode->isEnforced()))
{
// validate data for RI constraint.
// generate a "select" statement to validate the constraint. For example:
// SELECT count(*) FROM T1
// WHERE NOT ((T1C1,T1C2) IN (SELECT T2C1,T2C2 FROM T2))
// OR T1C1 IS NULL OR T1C2 IS NULL;
// This statement returns > 0 if there exist data violating the constraint.
char * validQry =
new(STMTHEAP) char[ringColListForValidation.length() +
refdColListForValidation.length() +
ringNullList.length() +
2000];
str_sprintf(validQry, "select count(*) from \"%s\".\"%s\".\"%s\" where not ((%s) in (select %s from \"%s\".\"%s\".\"%s\")) %s;",
referencingTableName.getCatalogNamePart().getInternalName().data(),
referencingTableName.getSchemaNamePart().getInternalName().data(),
referencingTableName.getObjectNamePart().getInternalName().data(),
ringColListForValidation.data(),
refdColListForValidation.data(),
referencedTableName.getCatalogNamePart().getInternalName().data(),
referencedTableName.getSchemaNamePart().getInternalName().data(),
referencedTableName.getObjectNamePart().getInternalName().data(),
ringNullList.data());
Lng32 len = 0;
Int64 rowCount = 0;
cliRC = cliInterface.executeImmediate(validQry, (char*)&rowCount, &len, NULL);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
if (rowCount > 0)
{
*CmpCommon::diags() << DgSqlCode(-1143)
<< DgConstraintName(addConstrName)
<< DgTableName(referencingTableName.getObjectNamePart().getInternalName().data())
<< DgString0(referencedTableName.getObjectNamePart().getInternalName().data())
<< DgString1(validQry);
return;
}
}
ComObjectName refdConstrName(constrName);
refdConstrName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString refdCatName = refdConstrName.getCatalogNamePartAsAnsiString();
const NAString refdSchName = refdConstrName.getSchemaNamePartAsAnsiString(TRUE);
const NAString refdObjName = refdConstrName.getObjectNamePartAsAnsiString(TRUE);
Int64 refdConstrUID =
getObjectUID(&cliInterface,
refdCatName.data(), refdSchName.data(), refdObjName.data(),
(isPkey ? COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT :
COM_UNIQUE_CONSTRAINT_OBJECT_LIT));
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComUID comUID;
comUID.make_UID();
Int64 ringConstrUID = comUID.get_value();
if (updateConstraintMD(ringKeyColList, ringKeyColOrderList, uniqueStr, tableUID, ringConstrUID,
ringNaTable, COM_FOREIGN_KEY_CONSTRAINT,
constraintNode->isEnforced(), &cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateRIConstraintMD(ringConstrUID, refdConstrUID,
&cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateIndexInfo(ringKeyColList,
ringKeyColOrderList,
refdKeyColList,
uniqueStr,
ringConstrUID,
catalogNamePart, schemaNamePart, objectNamePart,
ringNaTable,
FALSE,
(CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_ON),
TRUE, // because of the way the data is recorded in the
// metadata, the indexes of referencing and referenced
// tables need to have their columns in the same
// sequence (differences in ASC/DESC are ok)
&cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (NOT constraintNode->isEnforced())
{
*CmpCommon::diags()
<< DgSqlCode(1313)
<< DgString0(addConstrName);
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT))
{
processReturn();
deallocEHI(ehi);
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
// remove natable for the table being referenced
ActiveSchemaDB()->getNATableDB()->removeNATable(cn2,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
return;
}
short CmpSeabaseDDL::getCheckConstraintText(StmtDDLAddConstraintCheck *addCheckNode,
NAString &qualifiedText)
{
// ComString qualifiedText;
const ParNameLocList &nameLocList = addCheckNode->getNameLocList();
const ParNameLoc *pNameLoc = NULL;
const char *pInputStr = nameLocList.getInputStringPtr();
StringPos inputStrPos = addCheckNode->getStartPosition();
// CharInfo::CharSet mapCS = (CharInfo::CharSet) SqlParser_ISO_MAPPING;
for (size_t x = 0; x < nameLocList.entries(); x++)
{
pNameLoc = &nameLocList[x];
const NAString &nameExpanded = pNameLoc->getExpandedName(FALSE/*no assert*/);
size_t nameAsIs = 0;
size_t nameLenInBytes = 0;
//
// When the character set of the input string is a variable-length/width
// multi-byte characters set, the value returned by getNameLength()
// may not be numerically equal to the number of bytes in the original
// input string that we need to skip. So, we get the character
// conversion routines to tell us how many bytes we need to skip.
//
enum cnv_charset eCnvCS = convertCharsetEnum(nameLocList.getInputStringCharSet());
const char *str_to_test = (const char *) &pInputStr[pNameLoc->getNamePosition()];
const int max_bytes2cnv = addCheckNode->getEndPosition()
- pNameLoc->getNamePosition() + 1;
const char *tmp_out_bufr = new (STMTHEAP) char[max_bytes2cnv * 4 + 10 /* Ensure big enough! */ ];
char * p1stUnstranslatedChar = NULL;
int cnvErrStatus = LocaleToUTF16(
cnv_version1 // in - const enum cnv_version version
, str_to_test // in - const char *in_bufr
, max_bytes2cnv // in - const int in_len
, tmp_out_bufr // out - const char *out_bufr
, max_bytes2cnv * 4 // in - const int out_len
, eCnvCS // in - enum cnv_charset charset
, p1stUnstranslatedChar // out - char * & first_untranslated_char
, NULL // out - unsigned int *output_data_len_p
, 0 // in - const int cnv_flags
, (int)FALSE // in - const int addNullAtEnd_flag
, NULL // out - unsigned int * translated_char_cnt_p
, pNameLoc->getNameLength() // in - unsigned int max_chars_to_convert
);
// NOTE: No errors should be possible -- string has been converted before.
NADELETEBASIC (tmp_out_bufr, STMTHEAP);
nameLenInBytes = p1stUnstranslatedChar - str_to_test;
// If name not expanded, then use the original name as is
if (nameExpanded.isNull())
nameAsIs = nameLenInBytes;
// Copy from (last position in) input string up to current name
qualifiedText += ComString(&pInputStr[inputStrPos],
pNameLoc->getNamePosition() - inputStrPos +
nameAsIs);
if (NOT nameAsIs) // original name to be replaced with expanded
{
size_t namePos = pNameLoc->getNamePosition();
size_t nameLen = pNameLoc->getNameLength();
// Solution 10-080506-3000
// For description and explanation of the fix, please read the
// comments in method CatExecCreateView::buildViewText() in
// module CatExecCreateView.cpp
// Example: CREATE TABLE T ("c1" INT NOT NULL PRIMARY KEY,
// C2 INT CHECK (C2 BETWEEN 0 AND"c1")) NO PARTITION;
if ( pInputStr[namePos] EQU '"'
AND nameExpanded.data()[0] NEQ '"'
AND namePos > 1
AND ( pInputStr[namePos - 1] EQU '_' OR
isAlNumIsoMapCS((unsigned char)pInputStr[namePos - 1]) )
)
{
// insert a blank separator to avoid syntax error
// WITHOUT FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_788388997_8627
// CHECK (CAT.SCH.T.C2 BETWEEN 0 ANDCAT.SCH.T."c1") DROPPABLE ;
// ... ^^^^^^
qualifiedText += " "; // the FIX
// WITH FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_788388997_8627
// CHECK (CAT.SCH.T.C2 BETWEEN 0 AND CAT.SCH.T."c1") DROPPABLE ;
// ... ^^^
}
qualifiedText += nameExpanded;
// Problem reported in solution 10-080506-3000
// Example: CREATE TABLE T (C1 INT NOT NULL PRIMARY KEY,
// C2 INT CHECK ("C2"IN(1,2,3))) NO PARTITION;
if ( pInputStr[namePos + nameLen - 1] EQU '"'
AND nameExpanded.data()[nameExpanded.length() - 1] NEQ '"'
AND pInputStr[namePos + nameLen] NEQ '\0'
AND ( pInputStr[namePos + nameLen] EQU '_' OR
isAlNumIsoMapCS((unsigned char)pInputStr[namePos + nameLen]) )
)
{
// insert a blank separator to avoid syntax error
// WITHOUT FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_654532688_9627
// CHECK (CAT.SCH.T.C2IN (1, 2, 3)) DROPPABLE ;
// ... ^^^^
qualifiedText += " "; // the FIX
// WITH FIX - Example:
// ... ALTER TABLE CAT.SCH.T ADD CONSTRAINT CAT.SCH.T_654532688_9627
// CHECK (CAT.SCH.T.C2 IN (1, 2, 3)) DROPPABLE ;
// ... ^^^
}
} // if (NOT nameAsIs)
inputStrPos = pNameLoc->getNamePosition() + nameLenInBytes;
} // for
// CAT_ASSERT(addCheckNode->getEndPosition() >= inputStrPos);
qualifiedText += ComString(&pInputStr[inputStrPos],
addCheckNode->getEndPosition() - inputStrPos + 1);
PrettifySqlText(qualifiedText, NULL);
// CharType::getCharSetAsPrefix(SqlParser_NATIONAL_CHARSET));
return 0;
}
// nonstatic method, calling two member functions
short CmpSeabaseDDL::getTextFromMD(
ExeCliInterface * cliInterface,
Int64 textUID,
ComTextType textType,
Lng32 textSubID,
NAString &outText)
{
short retcode = getTextFromMD(getSystemCatalog(),
cliInterface,
textUID,
textType,
textSubID,
outText);
if (retcode)
processReturn();
return retcode;
}
// static version of this method
short CmpSeabaseDDL::getTextFromMD(const char * catalogName,
ExeCliInterface * cliInterface,
Int64 textUID,
ComTextType textType,
Lng32 textSubID,
NAString &outText)
{
Lng32 cliRC;
char query[1000];
str_sprintf(query, "select text from %s.\"%s\".%s where text_uid = %Ld and text_type = %d and sub_id = %d for read committed access order by seq_num",
catalogName, SEABASE_MD_SCHEMA, SEABASE_TEXT,
textUID, static_cast<int>(textType), textSubID);
Queue * textQueue = NULL;
cliRC = cliInterface->fetchAllRows(textQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// glue text together
for (Lng32 idx = 0; idx < textQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)textQueue->getNext();
char * text = (char*)vi->get(0);
outText += text;
}
return 0;
}
void CmpSeabaseDDL::alterSeabaseTableAddCheckConstraint(
StmtDDLAddConstraint * alterAddConstraint,
NAString &currCatName, NAString &currSchName)
{
StmtDDLAddConstraintCheck *alterAddCheckNode = alterAddConstraint
->castToStmtDDLAddConstraintCheck();
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterAddConstraint->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
// Make sure user has the privilege to perform the add check constraint
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return;
}
const ParCheckConstraintColUsageList &colList =
alterAddCheckNode->getColumnUsageList();
for (CollIndex cols = 0; cols < colList.entries(); cols++)
{
const ParCheckConstraintColUsage &ckColUsg = colList[cols];
const ComString &colName = ckColUsg.getColumnName();
if ((colName EQU "SYSKEY") &&
(naTable->getClusteringIndex()->hasSyskey()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_SYSKEY_COL_NOT_ALLOWED_IN_CK_CNSTRNT)
<< DgColumnName( "SYSKEY")
<< DgTableName(extTableName);
processReturn();
deallocEHI(ehi);
return;
}
}
NAList<NAString> keyColList;
if (constraintErrorChecks(&cliInterface,
alterAddConstraint->castToStmtDDLAddConstraintCheck(),
naTable,
COM_CHECK_CONSTRAINT,
keyColList))
{
return;
}
// update check constraint info
NAString uniqueStr;
if (genUniqueName(alterAddConstraint, uniqueStr))
{
return;
}
// get check text
NAString checkConstrText;
if (getCheckConstraintText(alterAddCheckNode, checkConstrText))
{
return;
}
if (CmpCommon::getDefault(TRAF_NO_CONSTR_VALIDATION) == DF_OFF)
{
// validate data for check constraint.
// generate a "select" statement to validate the constraint. For example:
// SELECT count(*) FROM T1 where not checkConstrText;
// This statement returns > 0 if there exist data violating the constraint.
char * validQry = new(STMTHEAP) char[checkConstrText.length() + 2000];
str_sprintf(validQry, "select count(*) from \"%s\".\"%s\".\"%s\" where not %s",
tableName.getCatalogNamePart().getInternalName().data(),
tableName.getSchemaNamePart().getInternalName().data(),
tableName.getObjectNamePart().getInternalName().data(),
checkConstrText.data());
Lng32 len = 0;
Int64 rowCount = 0;
cliRC = cliInterface.executeImmediate(validQry, (char*)&rowCount, &len, NULL);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
if (rowCount > 0)
{
*CmpCommon::diags() << DgSqlCode(-1083)
<< DgConstraintName(uniqueStr);
return;
}
}
Int64 tableUID =
getObjectUID(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(), objectNamePart.data(),
COM_BASE_TABLE_OBJECT_LIT);
ComUID comUID;
comUID.make_UID();
Int64 checkUID = comUID.get_value();
NAList<NAString> emptyList;
if (updateConstraintMD(keyColList, emptyList, uniqueStr, tableUID, checkUID,
naTable, COM_CHECK_CONSTRAINT, TRUE, &cliInterface))
{
*CmpCommon::diags()
<< DgSqlCode(-1029)
<< DgTableName(uniqueStr);
return;
}
if (updateTextTable(&cliInterface, checkUID, COM_CHECK_CONSTR_TEXT, 0,
checkConstrText))
{
processReturn();
return;
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT))
{
processReturn();
deallocEHI(ehi);
return;
}
if (!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS,
COM_BASE_TABLE_OBJECT);
}
return;
}
void CmpSeabaseDDL::alterSeabaseTableDropConstraint(
StmtDDLDropConstraint * alterDropConstraint,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
const NAString &tabName = alterDropConstraint->getTableName();
ComObjectName tableName(tabName, COM_TABLE_NAME);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
if ((isSeabaseReservedSchema(tableName)) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
*CmpCommon::diags() << DgSqlCode(-CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA);
processReturn();
return;
}
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
ActiveSchemaDB()->getNATableDB()->useCache();
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
// Make sure user has the privilege to perform the drop constraint
if (!isDDLOperationAuthorized(SQLOperation::ALTER_TABLE,
naTable->getOwner(),naTable->getSchemaOwner()))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
deallocEHI(ehi);
processReturn ();
return;
}
const NAString &dropConstrName = alterDropConstraint->
getConstraintNameAsQualifiedName().getQualifiedNameAsAnsiString();
const NAString &constrCatName = alterDropConstraint->
getConstraintNameAsQualifiedName().getCatalogName();
const NAString &constrSchName = alterDropConstraint->
getConstraintNameAsQualifiedName().getSchemaName();
const NAString &constrObjName = alterDropConstraint->
getConstraintNameAsQualifiedName().getObjectName();
char outObjType[10];
Int64 constrUID = getObjectUID(&cliInterface,
constrCatName.data(), constrSchName.data(), constrObjName.data(),
NULL,
"object_type = '"COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT"' or object_type = '"COM_UNIQUE_CONSTRAINT_OBJECT_LIT"' or object_type = '"COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT"' or object_type = '"COM_CHECK_CONSTRAINT_OBJECT_LIT"' ",
outObjType);
if (constrUID < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-1005)
<< DgConstraintName(dropConstrName);
processReturn();
return;
}
NABoolean isUniqConstr =
((strcmp(outObjType, COM_UNIQUE_CONSTRAINT_OBJECT_LIT) == 0) ||
(strcmp(outObjType, COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT) == 0));
NABoolean isRefConstr =
(strcmp(outObjType, COM_REFERENTIAL_CONSTRAINT_OBJECT_LIT) == 0);
NABoolean isPkeyConstr =
(strcmp(outObjType, COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT) == 0);
NABoolean isCheckConstr =
(strcmp(outObjType, COM_CHECK_CONSTRAINT_OBJECT_LIT) == 0);
NABoolean constrFound = FALSE;
if (isUniqConstr)
{
constrFound = FALSE;
const AbstractRIConstraintList &ariList = naTable->getUniqueConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = ariList[i];
UniqueConstraint * uniqueConstr = (UniqueConstraint*)ariList[i];
const NAString &tableConstrName =
uniqueConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (dropConstrName == tableConstrName)
{
constrFound = TRUE;
if (uniqueConstr->hasRefConstraintsReferencingMe())
{
*CmpCommon::diags()
<< DgSqlCode(-1050);
deallocEHI(ehi);
processReturn();
return;
}
}
} // for
if (NOT constrFound)
{
*CmpCommon::diags() << DgSqlCode(-1052);
processReturn();
return;
}
}
NATable *otherNaTable = NULL;
Int64 otherConstrUID = 0;
if (isRefConstr)
{
constrFound = FALSE;
RefConstraint * refConstr = NULL;
const AbstractRIConstraintList &ariList = naTable->getRefConstraints();
for (Int32 i = 0; i < ariList.entries(); i++)
{
AbstractRIConstraint *ariConstr = ariList[i];
const NAString &tableConstrName =
ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
if (dropConstrName == tableConstrName)
{
constrFound = TRUE;
refConstr = (RefConstraint*)ariConstr;
}
} // for
if (NOT constrFound)
{
*CmpCommon::diags() << DgSqlCode(-1052);
processReturn();
return;
}
CorrName otherCN(refConstr->getUniqueConstraintReferencedByMe().getTableName());
otherNaTable = bindWA.getNATable(otherCN);
if (otherNaTable == NULL || bindWA.errStatus())
{
deallocEHI(ehi);
processReturn();
return;
}
AbstractRIConstraint * otherConstr =
refConstr->findConstraint(&bindWA, refConstr->getUniqueConstraintReferencedByMe());
const NAString& otherCatName =
otherConstr->getConstraintName().getCatalogName();
const NAString& otherSchName =
otherConstr->getConstraintName().getSchemaName();
const NAString& otherConstrName =
otherConstr->getConstraintName().getObjectName();
otherConstrUID = getObjectUID(&cliInterface,
otherCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_UNIQUE_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
CmpCommon::diags()->clear();
otherConstrUID = getObjectUID(&cliInterface,
otherCatName.data(), otherSchName.data(), otherConstrName.data(),
COM_PRIMARY_KEY_CONSTRAINT_OBJECT_LIT );
if (otherConstrUID < 0)
{
processReturn();
return;
}
}
}
NABoolean indexFound = FALSE;
Lng32 isExplicit = 0;
Lng32 keytag = 0;
if ((isUniqConstr || isRefConstr) && (NOT isPkeyConstr))
{
// find the index that corresponds to this constraint
char query[1000];
str_sprintf(query, "select I.keytag, I.is_explicit from %s.\"%s\".%s T, %s.\"%s\".%s I where T.table_uid = %Ld and T.constraint_uid = %Ld and T.table_uid = I.base_table_uid and T.index_uid = I.index_uid ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_INDEXES,
naTable->objectUid().castToInt64(),
constrUID);
Queue * indexQueue = NULL;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
cliRC = cliInterface.fetchAllRows(indexQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return;
}
if (indexQueue->numEntries() > 1)
{
*CmpCommon::diags()
<< DgSqlCode(-1005)
<< DgConstraintName(dropConstrName);
processReturn();
return;
}
if (indexQueue->numEntries() ==1)
{
indexFound = TRUE;
indexQueue->position();
OutputInfo * oi = (OutputInfo*)indexQueue->getCurr();
keytag = *(Lng32*)oi->get(0);
isExplicit = *(Lng32*)oi->get(1);
}
}
if (deleteConstraintInfoFromSeabaseMDTables(&cliInterface,
naTable->objectUid().castToInt64(),
(otherNaTable ? otherNaTable->objectUid().castToInt64() : 0),
constrUID,
otherConstrUID,
constrCatName,
constrSchName,
constrObjName,
(isPkeyConstr ? COM_PRIMARY_KEY_CONSTRAINT_OBJECT :
(isUniqConstr ? COM_UNIQUE_CONSTRAINT_OBJECT :
(isRefConstr ? COM_REFERENTIAL_CONSTRAINT_OBJECT :
COM_CHECK_CONSTRAINT_OBJECT)))))
{
processReturn();
return;
}
// if the index corresponding to this constraint is an implicit index and 'no check'
// option is not specified, drop it.
if (((indexFound) && (NOT isExplicit) && (keytag != 0)) &&
(alterDropConstraint->getDropBehavior() != COM_NO_CHECK_DROP_BEHAVIOR))
{
char buf[4000];
str_sprintf(buf, "drop index \"%s\".\"%s\".\"%s\" no check",
constrCatName.data(), constrSchName.data(), constrObjName.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
}
if (updateObjectRedefTime(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT_LIT))
{
processReturn();
deallocEHI(ehi);
return;
}
// remove NATable for this table
ActiveSchemaDB()->getNATableDB()->removeNATable(cn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
if (isRefConstr && otherNaTable)
{
CorrName otherCn(
otherNaTable->getExtendedQualName().getQualifiedNameObj(), STMTHEAP);
ActiveSchemaDB()->getNATableDB()->removeNATable(otherCn,
NATableDB::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT);
}
return;
}
void CmpSeabaseDDL::seabaseGrantRevoke(
StmtDDLNode * stmtDDLNode,
NABoolean isGrant,
NAString &currCatName, NAString &currSchName,
NABoolean useHBase)
{
Lng32 retcode = 0;
if (!isAuthorizationEnabled())
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHORIZATION_NOT_ENABLED);
return;
}
StmtDDLGrant * grantNode = NULL;
StmtDDLRevoke * revokeNode = NULL;
NAString tabName;
ComAnsiNameSpace nameSpace;
NAString grantedByName;
NABoolean isGrantedBySpecified = FALSE;
if (isGrant)
{
grantNode = stmtDDLNode->castToStmtDDLGrant();
tabName = grantNode->getTableName();
nameSpace = grantNode->getGrantNameAsQualifiedName().getObjectNameSpace();
isGrantedBySpecified = grantNode->isByGrantorOptionSpecified();
grantedByName =
isGrantedBySpecified ? grantNode->getByGrantor()->getAuthorizationIdentifier(): "";
}
else
{
revokeNode = stmtDDLNode->castToStmtDDLRevoke();
tabName = revokeNode->getTableName();
nameSpace = revokeNode->getRevokeNameAsQualifiedName().getObjectNameSpace();
isGrantedBySpecified = revokeNode->isByGrantorOptionSpecified();
grantedByName =
isGrantedBySpecified ? revokeNode->getByGrantor()->getAuthorizationIdentifier(): "";
}
// If using HBase to perform authorization, call it now.
ComObjectName tableName(tabName, COM_TABLE_NAME);
if (useHBase || isHbase(tableName))
{
seabaseGrantRevokeHBase(stmtDDLNode, isGrant, currCatName, currSchName);
return;
}
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
retcode = existsInSeabaseMDTable(&cliInterface,
catalogNamePart, schemaNamePart, objectNamePart,
COM_BASE_TABLE_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)
? FALSE : TRUE),
TRUE, TRUE);
if (retcode < 0)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
// set up common information for all grantees
ComObjectType objectType = COM_BASE_TABLE_OBJECT;
switch (nameSpace)
{
case COM_LIBRARY_NAME:
objectType = COM_LIBRARY_OBJECT;
break;
case COM_UDF_NAME:
case COM_UDR_NAME:
objectType = COM_USER_DEFINED_ROUTINE_OBJECT;
break;
case COM_SEQUENCE_GENERATOR_NAME:
objectType = COM_SEQUENCE_GENERATOR_OBJECT;
break;
default:
objectType = COM_BASE_TABLE_OBJECT;
}
// get the objectUID and objectOwner
Int64 objectUID = 0;
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
Int64 objectFlags = 0 ;
NATable *naTable = NULL;
if (objectType == COM_BASE_TABLE_OBJECT)
{
naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
processReturn();
return;
}
objectUID = (int64_t)naTable->objectUid().get_value();
objectOwnerID = (int32_t)naTable->getOwner();
schemaOwnerID = naTable->getSchemaOwner();
objectType = naTable->getObjectType();
}
ElemDDLGranteeArray & pGranteeArray =
(isGrant ? grantNode->getGranteeArray() : revokeNode->getGranteeArray());
ElemDDLPrivActArray & privActsArray =
(isGrant ? grantNode->getPrivilegeActionArray() :
revokeNode->getPrivilegeActionArray());
NABoolean allPrivs =
(isGrant ? grantNode->isAllPrivilegesSpecified() :
revokeNode->isAllPrivilegesSpecified());
NABoolean isWGOSpecified =
(isGrant ? grantNode->isWithGrantOptionSpecified() :
revokeNode->isGrantOptionForSpecified());
std::vector<PrivType> objectPrivs;
std::vector<ColPrivSpec> colPrivs;
if (allPrivs)
objectPrivs.push_back(ALL_PRIVS);
else
if (!checkSpecifiedPrivs(privActsArray,extTableName.data(),objectType,
naTable,objectPrivs,colPrivs))
{
processReturn();
return;
}
// Prepare to call privilege manager
NAString MDLoc;
CONCAT_CATSCH(MDLoc, getSystemCatalog(), SEABASE_MD_SCHEMA);
NAString privMgrMDLoc;
CONCAT_CATSCH(privMgrMDLoc, getSystemCatalog(), SEABASE_PRIVMGR_SCHEMA);
PrivMgrCommands command(std::string(MDLoc.data()),
std::string(privMgrMDLoc.data()),
CmpCommon::diags());
// If the object is a metadata table or a privilege manager table, don't
// allow the privilege to be grantable.
NABoolean isMDTable = (isSeabaseMD(tableName) ||
isSeabasePrivMgrMD(tableName));
if (isMDTable && isWGOSpecified)
{
*CmpCommon::diags() << DgSqlCode(-CAT_WGO_NOT_ALLOWED);
processReturn();
return;
}
// Grants/revokes of the select privilege on metadata tables are allowed
// Grants/revokes of other relevant privileges are allowed if parser flag
// INTERNAL_QUERY_FROM_EXEUTIL is set
// Revoke: allow ALL and ALL_DML to be specified
if (isMDTable && !Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) &&
!isMDGrantRevokeOK(objectPrivs,colPrivs,isGrant))
{
*CmpCommon::diags() << DgSqlCode(-CAT_SMD_PRIVS_CANNOT_BE_CHANGED);
processReturn();
return;
}
// for metadata tables, the objectUID is not initialized in the NATable
// structure
if (objectUID == 0)
{
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
objectUID = getObjectInfo(&cliInterface,
catalogNamePart.data(), schemaNamePart.data(),
objectNamePart.data(), objectType,
objectOwnerID,schemaOwnerID,objectFlags);
if (objectUID == -1 || objectOwnerID == 0)
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting object UID and object owner for grant/revoke request");
processReturn();
return;
}
}
// Determine effective grantor ID and grantor name based on GRANTED BY clause
// current user, and object owner
Int32 effectiveGrantorID;
std::string effectiveGrantorName;
PrivStatus result = command.getGrantorDetailsForObject(
isGrantedBySpecified,
std::string(grantedByName.data()),
objectOwnerID,
effectiveGrantorID,
effectiveGrantorName);
if (result != STATUS_GOOD)
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting grantor ID and grantor name");
processReturn();
return;
}
std::string objectName (extTableName.data());
// For now, only support one grantee per request
// TBD: support multiple grantees - a testing effort?
if (pGranteeArray.entries() > 1)
{
*CmpCommon::diags() << DgSqlCode (-CAT_ONLY_ONE_GRANTEE_ALLOWED);
processReturn();
return;
}
for (CollIndex j = 0; j < pGranteeArray.entries(); j++)
{
NAString authName(pGranteeArray[j]->getAuthorizationIdentifier());
Int32 grantee;
if (pGranteeArray[j]->isPublic())
{
grantee = PUBLIC_USER;
authName = PUBLIC_AUTH_NAME;
}
else
{
Int16 retcode = ComUser::getAuthIDFromAuthName(authName.data(), grantee);
if (retcode == FENOTFOUND)
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHID_DOES_NOT_EXIST_ERROR)
<< DgString0(authName.data());
processReturn();
return;
}
else if (retcode != FEOK)
{
*CmpCommon::diags() << DgSqlCode (-CAT_INTERNAL_EXCEPTION_ERROR)
<< DgString0(__FILE__)
<< DgInt0(__LINE__)
<< DgString1("verifying grantee");
processReturn();
return;
}
}
std::string granteeName (authName.data());
if (isGrant)
{
PrivStatus result = command.grantObjectPrivilege(objectUID,
objectName,
objectType,
grantee,
granteeName,
effectiveGrantorID,
effectiveGrantorName,
objectPrivs,
colPrivs,
allPrivs,
isWGOSpecified);
}
else
{
PrivStatus result = command.revokeObjectPrivilege(objectUID,
objectName,
objectType,
grantee,
granteeName,
effectiveGrantorID,
effectiveGrantorName,
objectPrivs,
colPrivs,
allPrivs,
isWGOSpecified);
}
}
return;
}
void CmpSeabaseDDL::seabaseGrantRevokeHBase(
StmtDDLNode * stmtDDLNode,
NABoolean isGrant,
NAString &currCatName, NAString &currSchName)
{
Lng32 cliRC = 0;
Lng32 retcode = 0;
StmtDDLGrant * grantNode = NULL;
StmtDDLRevoke * revokeNode = NULL;
NAString tabName;
if (isGrant)
{
grantNode = stmtDDLNode->castToStmtDDLGrant();
tabName = grantNode->getTableName();
}
else
{
revokeNode = stmtDDLNode->castToStmtDDLRevoke();
tabName = revokeNode->getTableName();
}
ComObjectName tableName(tabName);
ComAnsiNamePart currCatAnsiName(currCatName);
ComAnsiNamePart currSchAnsiName(currSchName);
tableName.applyDefaults(currCatAnsiName, currSchAnsiName);
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
const NAString extTableName = tableName.getExternalName(TRUE);
const NAString extNameForHbase = catalogNamePart + "." + schemaNamePart + "." + objectNamePart;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (isSeabaseReservedSchema(tableName))
{
*CmpCommon::diags() << DgSqlCode(-1118)
<< DgTableName(extTableName);
// deallocEHI(ehi);
processReturn();
return;
}
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context(), FALSE/*inDDL*/);
CorrName cn(tableName.getObjectNamePart().getInternalName(),
STMTHEAP,
tableName.getSchemaNamePart().getInternalName(),
tableName.getCatalogNamePart().getInternalName());
NATable *naTable = bindWA.getNATable(cn);
if (naTable == NULL || bindWA.errStatus())
{
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(cn.getExposedNameAsAnsiString());
deallocEHI(ehi);
processReturn();
return;
}
ElemDDLGranteeArray & pGranteeArray =
(isGrant ? grantNode->getGranteeArray() : revokeNode->getGranteeArray());
ElemDDLPrivActArray & pPrivActsArray =
(isGrant ? grantNode->getPrivilegeActionArray() :
revokeNode->getPrivilegeActionArray());
NABoolean allPrivs =
(isGrant ? grantNode->isAllPrivilegesSpecified() :
revokeNode->isAllPrivilegesSpecified());
TextVec userPermissions;
if (allPrivs)
{
userPermissions.push_back("READ");
userPermissions.push_back("WRITE");
userPermissions.push_back("CREATE");
}
else
{
for (Lng32 i = 0; i < pPrivActsArray.entries(); i++)
{
switch (pPrivActsArray[i]->getOperatorType() )
{
case ELM_PRIV_ACT_SELECT_ELEM:
{
userPermissions.push_back("READ");
break;
}
case ELM_PRIV_ACT_INSERT_ELEM:
case ELM_PRIV_ACT_DELETE_ELEM:
case ELM_PRIV_ACT_UPDATE_ELEM:
{
userPermissions.push_back("WRITE");
break;
}
case ELM_PRIV_ACT_CREATE_ELEM:
{
userPermissions.push_back("CREATE");
break;
}
default:
{
NAString privType = "UNKNOWN";
*CmpCommon::diags() << DgSqlCode(-CAT_INVALID_PRIV_FOR_OBJECT)
<< DgString0(privType)
<< DgString1(extTableName);
deallocEHI(ehi);
processReturn();
return;
}
} // end switch
} // for
}
for (CollIndex j = 0; j < pGranteeArray.entries(); j++)
{
NAString authName(pGranteeArray[j]->getAuthorizationIdentifier());
if (isGrant)
retcode = ehi->grant(authName.data(), extNameForHbase.data(), userPermissions);
else
retcode = ehi->revoke(authName.data(), extNameForHbase.data(), userPermissions);
if (retcode < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< (isGrant ? DgString0((char*)"ExpHbaseInterface::grant()") :
DgString0((char*)"ExpHbaseInterface::revoke()"))
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr().data());
deallocEHI(ehi);
processReturn();
return;
}
}
retcode = ehi->close();
if (retcode < 0)
{
*CmpCommon::diags() << DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::close()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr().data());
deallocEHI(ehi);
processReturn();
return;
}
deallocEHI(ehi);
processReturn();
return;
}
void CmpSeabaseDDL::createNativeHbaseTable(
StmtDDLCreateHbaseTable * createTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ComObjectName tableName(createTableNode->getTableName());
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
std::vector<NAString> colFamVec;
for (Lng32 i = 0; i < createTableNode->csl()->entries(); i++)
{
const NAString * nas = (NAString*)(*createTableNode->csl())[i];
colFamVec.push_back(nas->data());
}
NAList<HbaseCreateOption*> hbaseCreateOptions;
NAString hco;
retcode = setupHbaseOptions(createTableNode->getHbaseOptionsClause(),
0, objectNamePart,
hbaseCreateOptions, hco);
if (retcode)
{
deallocEHI(ehi);
processReturn();
return;
}
HbaseStr hbaseTable;
hbaseTable.val = (char*)objectNamePart.data();
hbaseTable.len = objectNamePart.length();
if (createHbaseTable(ehi, &hbaseTable, colFamVec,
&hbaseCreateOptions) == -1)
{
deallocEHI(ehi);
processReturn();
return;
}
}
void CmpSeabaseDDL::dropNativeHbaseTable(
StmtDDLDropHbaseTable * dropTableNode,
NAString &currCatName, NAString &currSchName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
ComObjectName tableName(dropTableNode->getTableName());
const NAString catalogNamePart = tableName.getCatalogNamePartAsAnsiString();
const NAString schemaNamePart = tableName.getSchemaNamePartAsAnsiString(TRUE);
const NAString objectNamePart = tableName.getObjectNamePartAsAnsiString(TRUE);
// TDB - add a check to see if there is an external HBASE table that should be
// removed
ExpHbaseInterface * ehi = allocEHI();
if (ehi == NULL)
{
processReturn();
return;
}
HbaseStr hbaseTable;
hbaseTable.val = (char*)objectNamePart.data();
hbaseTable.len = objectNamePart.length();
retcode = dropHbaseTable(ehi, &hbaseTable);
if (retcode < 0)
{
deallocEHI(ehi);
processReturn();
return;
}
}
/////////////////////////////////////////////////////////////////////////
// This method generates and returns tableInfo struct for internal special
// tables (like metadata, histograms). These tables have hardcoded definitions
// but need objectUID to be returned. ObjectUID is stored in metadata
// and is read from there.
// This is done only if we are not in bootstrap mode, for example, when initializing
// metadata. At that time, there is no metadata available so it cannot be read
// to return objectUID.
// A NULL tableInfo is returned if in bootstrap mode.
//
// RETURN: -1, if error. 0, if all ok.
//////////////////////////////////////////////////////////////////////////
short CmpSeabaseDDL::getSpecialTableInfo
(
NAMemory * heap,
const NAString &catName,
const NAString &schName,
const NAString &objName,
const NAString &extTableName,
const ComObjectType &objType,
ComTdbVirtTableTableInfo* &tableInfo)
{
Lng32 cliRC = 0;
tableInfo = NULL;
NABoolean switched = FALSE;
Int32 objectOwner = NA_UserIdDefault;
Int32 schemaOwner = NA_UserIdDefault;
Int64 objUID = 1; // dummy value
Int64 objectFlags = 0 ;
NABoolean createTableInfo = FALSE;
NABoolean isUninit = FALSE;
if (CmpCommon::context()->isUninitializedSeabase())
{
isUninit = TRUE;
createTableInfo = TRUE;
}
NABoolean getUID = TRUE;
if (isUninit)
getUID = FALSE;
else if (CmpCommon::context()->isMxcmp())
getUID = FALSE;
else if (CmpCommon::getDefault(TRAF_BOOTSTRAP_MD_MODE) == DF_ON)
getUID = FALSE;
if (getUID)
{
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return -1;
cliRC = cliInterface.holdAndSetCQD("traf_bootstrap_md_mode", "ON");
if (cliRC < 0)
{
goto label_error_return;
}
objUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), objName.data(),
objType, objectOwner, schemaOwner,objectFlags);
cliRC = cliInterface.restoreCQD("traf_bootstrap_md_mode");
if (objUID <= 0)
goto label_error_return;
switchBackCompiler();
createTableInfo = TRUE;
}
if (createTableInfo)
{
tableInfo = new(heap) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = new(heap) char[extTableName.length() + 1];
strcpy((char*)tableInfo->tableName, (char*)extTableName.data());
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = objUID;
tableInfo->isAudited = 1;
tableInfo->validDef = 1;
tableInfo->objOwnerID = objectOwner;
tableInfo->schemaOwnerID = schemaOwner;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->objectFlags = objectFlags;
tableInfo->rowFormat = COM_UNKNOWN_FORMAT_TYPE;
}
return 0;
label_error_return:
switchBackCompiler();
return -1;
}
desc_struct * CmpSeabaseDDL::getSeabaseMDTableDesc(
const NAString &catName,
const NAString &schName,
const NAString &objName,
const ComObjectType objType)
{
Lng32 cliRC = 0;
desc_struct * tableDesc = NULL;
NAString schNameL = "\"";
schNameL += schName;
schNameL += "\"";
ComObjectName coName(catName, schNameL, objName);
NAString extTableName = coName.getExternalName(TRUE);
ComTdbVirtTableTableInfo * tableInfo = NULL;
Lng32 colInfoSize = 0;
const ComTdbVirtTableColumnInfo * colInfo = NULL;
Lng32 keyInfoSize = 0;
const ComTdbVirtTableKeyInfo * keyInfo = NULL;
Lng32 uniqueInfoSize = 0;
ComTdbVirtTableConstraintInfo * constrInfo = NULL;
Lng32 indexInfoSize = 0;
const ComTdbVirtTableIndexInfo * indexInfo = NULL;
if (NOT CmpSeabaseMDupgrade::getMDtableInfo(coName,
tableInfo,
colInfoSize, colInfo,
keyInfoSize, keyInfo,
indexInfoSize, indexInfo,
objType))
return NULL;
// Setup the primary key information as a unique constraint
uniqueInfoSize = 1;
constrInfo = new(STMTHEAP) ComTdbVirtTableConstraintInfo[uniqueInfoSize];
constrInfo->baseTableName = (char*)extTableName.data();
// The primary key constraint name is the name of the object appended
// with "_PK";
NAString constrName = extTableName;
constrName += "_PK";
constrInfo->constrName = (char*)constrName.data();
constrInfo->constrType = 3; // pkey_constr
constrInfo->colCount = keyInfoSize;
constrInfo->keyInfoArray = (ComTdbVirtTableKeyInfo *)keyInfo;
constrInfo->numRingConstr = 0;
constrInfo->ringConstrArray = NULL;
constrInfo->numRefdConstr = 0;
constrInfo->refdConstrArray = NULL;
constrInfo->checkConstrLen = 0;
constrInfo->checkConstrText = NULL;
tableDesc =
Generator::createVirtualTableDesc
((char*)extTableName.data(),
colInfoSize,
(ComTdbVirtTableColumnInfo*)colInfo,
keyInfoSize,
(ComTdbVirtTableKeyInfo*)keyInfo,
uniqueInfoSize, constrInfo,
indexInfoSize,
(ComTdbVirtTableIndexInfo *)indexInfo,
0, NULL,
tableInfo);
return tableDesc;
}
desc_struct * CmpSeabaseDDL::getSeabaseHistTableDesc(const NAString &catName,
const NAString &schName,
const NAString &objName)
{
Lng32 cliRC = 0;
desc_struct * tableDesc = NULL;
NAString schNameL = "\"";
schNameL += schName;
schNameL += "\""; // transforms internal format schName to external format
ComObjectName coName(catName, schNameL, objName);
NAString extTableName = coName.getExternalName(TRUE);
Lng32 numCols = 0;
ComTdbVirtTableColumnInfo * colInfo = NULL;
Lng32 numKeys;
ComTdbVirtTableKeyInfo * keyInfo;
ComTdbVirtTableIndexInfo * indexInfo;
Parser parser(CmpCommon::context());
ComTdbVirtTableConstraintInfo * constrInfo =
new(STMTHEAP) ComTdbVirtTableConstraintInfo[1];
NAString constrName;
if (objName == HBASE_HIST_NAME)
{
if (processDDLandCreateDescs(parser,
seabaseHistogramsDDL, sizeof(seabaseHistogramsDDL),
FALSE,
0, NULL, 0, NULL,
numCols, colInfo,
numKeys, keyInfo,
indexInfo))
return NULL;
constrName = HBASE_HIST_PK;
}
else if (objName == HBASE_HISTINT_NAME)
{
if (processDDLandCreateDescs(parser,
seabaseHistogramIntervalsDDL, sizeof(seabaseHistogramIntervalsDDL),
FALSE,
0, NULL, 0, NULL,
numCols, colInfo,
numKeys, keyInfo,
indexInfo))
return NULL;
constrName = HBASE_HISTINT_PK;
}
else
return NULL;
ComObjectName coConstrName(catName, schNameL, constrName);
NAString * extConstrName =
new(STMTHEAP) NAString(coConstrName.getExternalName(TRUE));
constrInfo->baseTableName = (char*)extTableName.data();
constrInfo->constrName = (char*)extConstrName->data();
constrInfo->constrType = 3; // pkey_constr
constrInfo->colCount = numKeys;
constrInfo->keyInfoArray = keyInfo;
constrInfo->numRingConstr = 0;
constrInfo->ringConstrArray = NULL;
constrInfo->numRefdConstr = 0;
constrInfo->refdConstrArray = NULL;
constrInfo->checkConstrLen = 0;
constrInfo->checkConstrText = NULL;
ComTdbVirtTableTableInfo * tableInfo = NULL;
if (getSpecialTableInfo(STMTHEAP, catName, schName, objName,
extTableName, COM_BASE_TABLE_OBJECT, tableInfo))
return NULL;
tableDesc =
Generator::createVirtualTableDesc
((char*)extTableName.data(),
numCols,
colInfo,
numKeys,
keyInfo,
1, constrInfo,
0, NULL,
0, NULL,
tableInfo);
return tableDesc;
}
Lng32 CmpSeabaseDDL::getSeabaseColumnInfo(ExeCliInterface *cliInterface,
Int64 objUID,
const NAString &catName,
const NAString &schName,
const NAString &objName,
char *direction,
NABoolean *isTableSalted,
Lng32 *identityColPos,
Lng32 *numCols,
ComTdbVirtTableColumnInfo **outColInfoArray)
{
char query[3000];
Lng32 cliRC;
if (identityColPos)
*identityColPos = -1;
Queue * tableColInfo = NULL;
str_sprintf(query, "select column_name, column_number, column_class, "
"fs_data_type, column_size, column_precision, column_scale, "
"datetime_start_field, datetime_end_field, trim(is_upshifted), column_flags, "
"nullable, trim(character_set), default_class, default_value, "
"trim(column_heading), hbase_col_family, hbase_col_qualifier, direction, "
"is_optional, flags from %s.\"%s\".%s "
"where object_uid = %Ld and direction in (%s)"
"order by 2 for read committed access",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID,
direction);
cliRC = cliInterface->fetchAllRows(tableColInfo, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
*numCols = tableColInfo->numEntries();
ComTdbVirtTableColumnInfo *colInfoArray =
new(STMTHEAP) ComTdbVirtTableColumnInfo[*numCols];
NABoolean tableIsSalted = FALSE;
tableColInfo->position();
for (Lng32 idx = 0; idx < *numCols; idx++)
{
OutputInfo * oi = (OutputInfo*)tableColInfo->getNext();
ComTdbVirtTableColumnInfo &colInfo = colInfoArray[idx];
char * data = NULL;
Lng32 len = 0;
// get the column name
oi->get(0, data, len);
colInfo.colName = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.colName, data);
colInfo.colNumber = *(Lng32*)oi->get(1);
char *colClass = (char*)oi->get(2);
if (strcmp(colClass,COM_USER_COLUMN_LIT) == 0)
colInfo.columnClass = COM_USER_COLUMN;
else if (strcmp(colClass,COM_SYSTEM_COLUMN_LIT) == 0)
colInfo.columnClass = COM_SYSTEM_COLUMN;
else if (strcmp(colClass,COM_ADDED_USER_COLUMN_LIT) == 0)
colInfo.columnClass = COM_ADDED_USER_COLUMN;
else if (strcmp(colClass,COM_MV_SYSTEM_ADDED_COLUMN_LIT) == 0)
colInfo.columnClass = COM_MV_SYSTEM_ADDED_COLUMN;
else
CMPASSERT(0);
colInfo.datatype = *(Lng32*)oi->get(3);
colInfo.length = *(Lng32*)oi->get(4);
colInfo.precision = *(Lng32*)oi->get(5);
colInfo.scale = *(Lng32*)oi->get(6);
colInfo.dtStart = *(Lng32 *)oi->get(7);
colInfo.dtEnd = *(Lng32 *)oi->get(8);
if (strcmp((char*)oi->get(9), "Y") == 0)
colInfo.upshifted = -1;
else
colInfo.upshifted = 0;
colInfo.hbaseColFlags = *(ULng32 *)oi->get(10);
colInfo.nullable = *(Lng32 *)oi->get(11);
colInfo.charset =
(SQLCHARSET_CODE)CharInfo::getCharSetEnum((char*)oi->get(12));
colInfo.defaultClass = (ComColumnDefaultClass)*(Lng32 *)oi->get(13);
NAString tempDefVal;
data = NULL;
if (colInfo.defaultClass == COM_USER_DEFINED_DEFAULT ||
colInfo.defaultClass == COM_ALWAYS_COMPUTE_COMPUTED_COLUMN_DEFAULT ||
colInfo.defaultClass == COM_ALWAYS_DEFAULT_COMPUTED_COLUMN_DEFAULT)
{
oi->get(14, data, len);
if (colInfo.defaultClass != COM_USER_DEFINED_DEFAULT)
{
// get computed column definition from text table, but note
// that for older tables the definition may be stored in
// COLUMNS.DEFAULT_VALUE instead (that's returned in "data")
cliRC = getTextFromMD(cliInterface,
objUID,
COM_COMPUTED_COL_TEXT,
colInfo.colNumber,
tempDefVal);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
if (strcmp(colInfo.colName,
ElemDDLSaltOptionsClause::getSaltSysColName()) == 0)
tableIsSalted = TRUE;
}
}
else if (colInfo.defaultClass == COM_NULL_DEFAULT)
{
tempDefVal = "NULL";
}
else if (colInfo.defaultClass == COM_USER_FUNCTION_DEFAULT)
{
tempDefVal = "USER";
}
else if (colInfo.defaultClass == COM_CURRENT_DEFAULT)
{
tempDefVal = "CURRENT_TIMESTAMP";
}
else if ((colInfo.defaultClass == COM_IDENTITY_GENERATED_BY_DEFAULT) ||
(colInfo.defaultClass == COM_IDENTITY_GENERATED_ALWAYS))
{
NAString userFunc("SEQNUM(");
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catName, schName, objName, colInfo.colName,
seqName);
NAString fullyQSeq = catName + "." + schName + "." + "\"" + seqName + "\"";
tempDefVal = userFunc + fullyQSeq + ")";
if (identityColPos)
*identityColPos = idx;
}
if (! tempDefVal.isNull())
{
data = (char*)tempDefVal.data();
len = tempDefVal.length();
}
if (colInfo.defaultClass != COM_NO_DEFAULT)
{
colInfo.defVal = new(STMTHEAP) char[len + 2];
str_cpy_all((char*)colInfo.defVal, data, len);
char * c = (char*)colInfo.defVal;
c[len] = 0;
c[len+1] = 0;
}
else
colInfo.defVal = NULL;
oi->get(15, data, len);
if (len > 0)
{
colInfo.colHeading = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.colHeading, data);
}
else
colInfo.colHeading = NULL;
oi->get(16, data, len);
colInfo.hbaseColFam = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.hbaseColFam, data);
oi->get(17, data, len);
colInfo.hbaseColQual = new(STMTHEAP) char[len + 1];
strcpy((char*)colInfo.hbaseColQual, data);
strcpy(colInfo.paramDirection, (char*)oi->get(18));
if (*((char*)oi->get(19)) == 'Y')
colInfo.isOptional = 1;
else
colInfo.isOptional = 0;
colInfo.colFlags = *(Int64 *)oi->get(20);
// temporary code, until we have updated flags to have the salt
// flag set for all tables, even those created before end of November
// 2014, when the flag was added during Trafodion R1.0 development
if (colInfo.defaultClass == COM_ALWAYS_COMPUTE_COMPUTED_COLUMN_DEFAULT &&
strcmp(colInfo.colName,
ElemDDLSaltOptionsClause::getSaltSysColName()) == 0)
colInfo.colFlags |= SEABASE_COLUMN_IS_SALT;
}
if (isTableSalted != NULL)
*isTableSalted = tableIsSalted;
*outColInfoArray = colInfoArray;
return *numCols;
}
ComTdbVirtTableSequenceInfo * CmpSeabaseDDL::getSeabaseSequenceInfo(
const NAString &catName,
const NAString &schName,
const NAString &seqName,
NAString &extSeqName,
Int32 & objectOwner,
Int32 & schemaOwner,
Int64 & seqUID)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
NAString schNameL = "\"";
schNameL += schName;
schNameL += "\"";
NAString seqNameL = "\"";
seqNameL += seqName;
seqNameL += "\"";
ComObjectName coName(catName, schNameL, seqNameL);
extSeqName = coName.getExternalName(TRUE);
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
objectOwner = NA_UserIdDefault;
seqUID = -1;
schemaOwner = NA_UserIdDefault;
Int64 objectFlags = 0 ;
seqUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), seqName.data(),
COM_SEQUENCE_GENERATOR_OBJECT,
objectOwner,schemaOwner,objectFlags,TRUE/*report error*/);
if (seqUID == -1 || objectOwner == 0)
{
// There may not be an error in the diags area, if not, add an error
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting object UID and owners for get sequence command");
return NULL;
}
char buf[4000];
str_sprintf(buf, "select fs_data_type, start_value, increment, max_value, min_value, cycle_option, cache_size, next_value, seq_type, redef_ts from %s.\"%s\".%s where seq_uid = %Ld",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_SEQ_GEN,
seqUID);
Queue * seqQueue = NULL;
cliRC = cliInterface.fetchAllRows(seqQueue, buf, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return NULL;
}
if ((seqQueue->numEntries() == 0) ||
(seqQueue->numEntries() > 1))
{
*CmpCommon::diags() << DgSqlCode(-4082)
<< DgTableName(extSeqName);
return NULL;
}
ComTdbVirtTableSequenceInfo *seqInfo =
new (STMTHEAP) ComTdbVirtTableSequenceInfo();
seqQueue->position();
OutputInfo * vi = (OutputInfo*)seqQueue->getNext();
seqInfo->datatype = *(Lng32*)vi->get(0);
seqInfo->startValue = *(Int64*)vi->get(1);
seqInfo->increment = *(Int64*)vi->get(2);
seqInfo->maxValue = *(Int64*)vi->get(3);
seqInfo->minValue = *(Int64*)vi->get(4);
seqInfo->cycleOption = (memcmp(vi->get(5), COM_YES_LIT, 1) == 0 ? 1 : 0);
seqInfo->cache = *(Int64*)vi->get(6);
seqInfo->nextValue = *(Int64*)vi->get(7);
seqInfo->seqType = (memcmp(vi->get(8), "E", 1) == 0 ? COM_EXTERNAL_SG : COM_INTERNAL_SG);
seqInfo->seqUID = seqUID;
seqInfo->redefTime = *(Int64*)vi->get(9);
return seqInfo;
}
desc_struct * CmpSeabaseDDL::getSeabaseLibraryDesc(
const NAString &catName,
const NAString &schName,
const NAString &libraryName)
{
desc_struct * tableDesc = NULL;
NAString extLibName;
Int32 objectOwner = 0;
Int32 schemaOwner = 0;
Int64 objectFlags = 0 ;
char query[4000];
char buf[4000];
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return NULL;
Int64 libUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(),
libraryName.data(),
COM_LIBRARY_OBJECT,
objectOwner, schemaOwner,objectFlags);
if (libUID == -1)
{
switchBackCompiler();
return NULL;
}
str_sprintf(buf, "SELECT library_filename, version "
"FROM %s.\"%s\".%s "
"WHERE library_uid = %Ld "
"FOR READ COMMITTED ACCESS",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_LIBRARIES,libUID);
Int32 cliRC = cliInterface.fetchRowsPrologue(buf, TRUE/*no exec*/);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
switchBackCompiler();
return NULL;
}
cliRC = cliInterface.clearExecFetchClose(NULL, 0);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
switchBackCompiler();
return NULL;
}
if (cliRC == 100) // did not find the row
{
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(libraryName);
switchBackCompiler();
return NULL;
}
switchBackCompiler();
char * ptr = NULL;
Lng32 len = 0;
ComTdbVirtTableLibraryInfo *libraryInfo = new (STMTHEAP) ComTdbVirtTableLibraryInfo();
if (libraryInfo == NULL)
return NULL;
libraryInfo->library_name = libraryName.data();
cliInterface.getPtrAndLen(1, ptr, len);
libraryInfo->library_filename = new (STMTHEAP) char[len + 1];
str_cpy_and_null((char *)libraryInfo->library_filename, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(2, ptr, len);
libraryInfo->library_version = *(Int32 *)ptr;
libraryInfo->object_owner_id = objectOwner;
libraryInfo->schema_owner_id = schemaOwner;
libraryInfo->library_UID = libUID;
desc_struct *library_desc = Generator::createVirtualLibraryDesc(
libraryName.data(),
libraryInfo);
processReturn();
return library_desc;
}
desc_struct * CmpSeabaseDDL::getSeabaseSequenceDesc(const NAString &catName,
const NAString &schName,
const NAString &seqName)
{
desc_struct * tableDesc = NULL;
NAString extSeqName;
Int32 objectOwner = 0;
Int32 schemaOwner = 0;
Int64 seqUID = -1;
ComTdbVirtTableSequenceInfo * seqInfo =
getSeabaseSequenceInfo(catName, schName, seqName, extSeqName,
objectOwner, schemaOwner, seqUID);
if (! seqInfo)
{
return NULL;
}
ComTdbVirtTableTableInfo * tableInfo =
new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = extSeqName.data();
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = seqUID;
tableInfo->isAudited = 0;
tableInfo->validDef = 1;
tableInfo->objOwnerID = objectOwner;
tableInfo->schemaOwnerID = schemaOwner;
tableInfo->hbaseCreateOptions = NULL;
tableInfo->objectFlags = 0;
tableDesc =
Generator::createVirtualTableDesc
((char*)extSeqName.data(),
0, NULL, // colInfo
0, NULL, // keyInfo
0, NULL,
0, NULL, //indexInfo
0, NULL, // viewInfo
tableInfo,
seqInfo);
return tableDesc;
}
void populateRegionDescForEndKey(char* buf, Int32 len, struct desc_struct* target)
{
target->body.hbase_region_desc.beginKey = NULL;
target->body.hbase_region_desc.beginKeyLen = 0;
target->body.hbase_region_desc.endKey = buf;
target->body.hbase_region_desc.endKeyLen = len;
}
void populateRegionDescAsRANGE(char* buf, Int32 len, struct desc_struct* target, NAMemory*)
{
target->header.nodetype = DESC_HBASE_RANGE_REGION_TYPE;
populateRegionDescForEndKey(buf, len, target);
}
desc_struct * CmpSeabaseDDL::getSeabaseUserTableDesc(const NAString &catName,
const NAString &schName,
const NAString &objName,
const ComObjectType objType,
NABoolean includeInvalidDefs,
Int32 ctlFlags)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
char query[4000];
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
desc_struct * tableDesc = NULL;
Int32 objectOwner = 0 ;
Int32 schemaOwner = 0 ;
Int64 objUID = -1 ;
Int64 objectFlags = 0 ;
//
// For performance reasons, whenever possible, we want to issue only one
// "select" to the OBJECTS metadata table to determine both the existence
// of the specified table and the objUID for the table. Since it is more
// likely that a user query refers to tables (directly or indirectly) that
// are already in existence, this optimization can save the cost of the
// existence check for all such user objects. In the less likely case that
// an object does not exist we must drop back and re-issue the metadata
// query for the existence check in order to ensure we get the proper error
// reported.
//
if ( objType ) // Must have objType
{
objUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), objName.data(),
objType, objectOwner, schemaOwner,objectFlags, FALSE /*no error now */,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) ? FALSE
: (objType != COM_INDEX_OBJECT ? TRUE : FALSE)));
}
// If we didn't call getObjectInfo() above OR if it gave an error, then:
if ( objUID < 0 )
{
cliRC = existsInSeabaseMDTable(&cliInterface,
catName.data(), schName.data(), objName.data(),
COM_UNKNOWN_OBJECT,
(Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) ? FALSE
: (objType != COM_INDEX_OBJECT ? TRUE : FALSE)),
TRUE, TRUE);
if (cliRC < 0)
{
processReturn();
return NULL;
}
if (cliRC == 0) // doesn't exist
{
processReturn();
return NULL;
}
}
if (objUID < 0)
{
if (objType != COM_BASE_TABLE_OBJECT)
{
processReturn();
return NULL;
}
else
{
// object type passed in was for a table. Could not find it but.
// this could be a view. Look for that.
CmpCommon::diags()->clear();
objUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(), objName.data(), COM_VIEW_OBJECT,
objectOwner,schemaOwner,objectFlags);
if (objUID < 0)
{
processReturn();
return NULL;
}
}
}
str_sprintf(query, "select is_audited, num_salt_partns, row_format from %s.\"%s\".%s where table_uid = %Ld for read committed access",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLES,
objUID);
Queue * tableAttrQueue = NULL;
cliRC = cliInterface.fetchAllRows(tableAttrQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
NABoolean isAudited = TRUE;
Lng32 numSaltPartns = 0;
NABoolean alignedFormat = FALSE;
NAString * hbaseCreateOptions = new(STMTHEAP) NAString();
NAString colFamStr;
if (cliRC == 0) // read some rows
{
if (tableAttrQueue->entries() != 1) // only one row should be returned
{
processReturn();
return NULL;
}
tableAttrQueue->position();
OutputInfo * vi = (OutputInfo*)tableAttrQueue->getNext();
char * audit = vi->get(0);
isAudited = (memcmp(audit, COM_YES_LIT, 1) == 0);
numSaltPartns = *(Lng32*)vi->get(1);
char * format = vi->get(2);
alignedFormat = (memcmp(format, COM_ALIGNED_FORMAT_LIT, 2) == 0);
if (getTextFromMD(&cliInterface, objUID, COM_HBASE_OPTIONS_TEXT, 0,
*hbaseCreateOptions))
{
processReturn();
return NULL;
}
if (getTextFromMD(&cliInterface, objUID, COM_HBASE_COL_FAMILY_TEXT, 0,
colFamStr))
{
processReturn();
return NULL;
}
}
Lng32 numCols;
ComTdbVirtTableColumnInfo * colInfoArray;
NABoolean tableIsSalted = FALSE;
char direction[20];
str_sprintf(direction, "'%s'", COM_UNKNOWN_PARAM_DIRECTION_LIT);
Lng32 identityColPos = -1;
if (getSeabaseColumnInfo(&cliInterface,
objUID,
catName, schName, objName,
(char *)direction,
&tableIsSalted,
&identityColPos,
&numCols,
&colInfoArray) <= 0)
{
processReturn();
return NULL;
}
if (objType == COM_INDEX_OBJECT)
{
str_sprintf(query, "select k.column_name, c.column_number, k.keyseq_number, ordering, cast(0 as int not null) from %s.\"%s\".%s k, %s.\"%s\".%s c where k.column_name = c.column_name and k.object_uid = c.object_uid and k.object_uid = %Ld and k.nonkeycol = 0 for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_COLUMNS,
objUID);
}
else
{
str_sprintf(query, "select column_name, column_number, keyseq_number, ordering, cast(0 as int not null) from %s.\"%s\".%s where object_uid = %Ld and nonkeycol = 0 for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
objUID);
}
Queue * tableKeyInfo = NULL;
cliRC = cliInterface.fetchAllRows(tableKeyInfo, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableKeyInfo * keyInfoArray = NULL;
if (tableKeyInfo->numEntries() > 0)
{
keyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[tableKeyInfo->numEntries()];
}
tableKeyInfo->position();
for (int idx = 0; idx < tableKeyInfo->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)tableKeyInfo->getNext();
populateKeyInfo(keyInfoArray[idx], vi);
}
str_sprintf(query, "select O.catalog_name, O.schema_name, O.object_name, I.keytag, I.is_unique, I.is_explicit, I.key_colcount, I.nonkey_colcount, T.num_salt_partns, T.row_format from %s.\"%s\".%s I, %s.\"%s\".%s O , %s.\"%s\".%s T where I.base_table_uid = %Ld and I.index_uid = O.object_uid %s and I.index_uid = T.table_uid for read committed access order by 1,2,3",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_INDEXES,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLES,
objUID,
(includeInvalidDefs ? " " : " and O.valid_def = 'Y' "));
//Turn off CQDs MERGE_JOINS and HASH_JOINS to avoid a full table scan of
//SEABASE_OBJECTS table. Full table scan of SEABASE_OBJECTS table causes
//simultaneous DDL operations to run into conflict.
//Make sure to restore the CQDs after this query including error paths.
cliInterface.holdAndSetCQD("MERGE_JOINS", "OFF");
cliInterface.holdAndSetCQD("HASH_JOINS", "OFF");
Queue * indexInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(indexInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
}
//restore CQDs.
cliInterface.restoreCQD("MERGE_JOINS");
cliInterface.restoreCQD("HASH_JOINS");
if (cliRC < 0)
return NULL;
ComTdbVirtTableIndexInfo * indexInfoArray = NULL;
if (indexInfoQueue->numEntries() > 0)
{
indexInfoArray =
new(STMTHEAP) ComTdbVirtTableIndexInfo[indexInfoQueue->numEntries()];
}
NAString qCatName = "\"";
qCatName += catName;
qCatName += "\"";
NAString qSchName = "\"";
qSchName += schName;
qSchName += "\"";
NAString qObjName = "\"";
qObjName += objName;
qObjName += "\"";
ComObjectName coName(qCatName, qSchName, qObjName);
NAString * extTableName =
new(STMTHEAP) NAString(coName.getExternalName(TRUE));
const NAString extNameForHbase = catName + "." + schName + "." + objName;
indexInfoQueue->position();
for (int idx = 0; idx < indexInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)indexInfoQueue->getNext();
char * idxCatName = (char*)vi->get(0);
char * idxSchName = (char*)vi->get(1);
char * idxObjName = (char*)vi->get(2);
Lng32 keyTag = *(Lng32*)vi->get(3);
Lng32 isUnique = *(Lng32*)vi->get(4);
Lng32 isExplicit = *(Lng32*)vi->get(5);
Lng32 keyColCount = *(Lng32*)vi->get(6);
Lng32 nonKeyColCount = *(Lng32*)vi->get(7);
Lng32 idxNumSaltPartns = *(Lng32*)vi->get(8);
char * format = vi->get(9);
ComRowFormat idxRowFormat;
if (memcmp(format, COM_ALIGNED_FORMAT_LIT, 2) == 0)
idxRowFormat = COM_ALIGNED_FORMAT_TYPE;
else
if (memcmp(format, COM_PACKED_FORMAT_LIT, 2) == 0)
idxRowFormat = COM_PACKED_FORMAT_TYPE;
else
if (memcmp(format, COM_HBASE_FORMAT_LIT, 2) == 0)
idxRowFormat = COM_HBASE_FORMAT_TYPE;
else
idxRowFormat = COM_UNKNOWN_FORMAT_TYPE;
Int64 idxUID = getObjectUID(&cliInterface,
idxCatName, idxSchName, idxObjName,
COM_INDEX_OBJECT_LIT);
if (idxUID < 0)
{
processReturn();
return NULL;
}
NAString * idxHbaseCreateOptions = new(STMTHEAP) NAString();
if (getTextFromMD(&cliInterface, idxUID, COM_HBASE_OPTIONS_TEXT, 0,
*idxHbaseCreateOptions))
{
processReturn();
return NULL;
}
indexInfoArray[idx].baseTableName = (char*)extTableName->data();
NAString qIdxCatName = "\"";
qIdxCatName += idxCatName;
qIdxCatName += "\"";
NAString qIdxSchName = "\"";
qIdxSchName += idxSchName;
qIdxSchName += "\"";
NAString qIdxObjName = "\"";
qIdxObjName += idxObjName;
qIdxObjName += "\"";
ComObjectName coIdxName(qIdxCatName, qIdxSchName, qIdxObjName);
NAString * extIndexName =
new(STMTHEAP) NAString(coIdxName.getExternalName(TRUE));
indexInfoArray[idx].indexName = (char*)extIndexName->data();
indexInfoArray[idx].keytag = keyTag;
indexInfoArray[idx].isUnique = isUnique;
indexInfoArray[idx].isExplicit = isExplicit;
indexInfoArray[idx].keyColCount = keyColCount;
indexInfoArray[idx].nonKeyColCount = nonKeyColCount;
indexInfoArray[idx].hbaseCreateOptions =
(idxHbaseCreateOptions->isNull() ? NULL : idxHbaseCreateOptions->data());
indexInfoArray[idx].numSaltPartns = idxNumSaltPartns;
indexInfoArray[idx].rowFormat = idxRowFormat;
Queue * keyInfoQueue = NULL;
str_sprintf(query, "select column_name, column_number, keyseq_number, ordering, nonkeycol from %s.\"%s\".%s where object_uid = %Ld for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
idxUID);
cliRC = cliInterface.initializeInfoList(keyInfoQueue, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
cliRC = cliInterface.fetchAllRows(keyInfoQueue, query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
if (keyInfoQueue->numEntries() == 0)
{
*CmpCommon::diags() << DgSqlCode(-4400);
processReturn();
return NULL;
}
ComTdbVirtTableKeyInfo * keyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[keyColCount];
ComTdbVirtTableKeyInfo * nonKeyInfoArray = NULL;
if (nonKeyColCount > 0)
{
nonKeyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[nonKeyColCount];
}
keyInfoQueue->position();
Lng32 jk = 0;
Lng32 jnk = 0;
for (Lng32 j = 0; j < keyInfoQueue->numEntries(); j++)
{
OutputInfo * vi = (OutputInfo*)keyInfoQueue->getNext();
Lng32 nonKeyCol = *(Lng32*)vi->get(4);
if (nonKeyCol == 0)
{
populateKeyInfo(keyInfoArray[jk], vi, TRUE);
jk++;
}
else
{
if (nonKeyInfoArray)
{
populateKeyInfo(nonKeyInfoArray[jnk], vi, TRUE);
jnk++;
}
}
}
indexInfoArray[idx].keyInfoArray = keyInfoArray;
indexInfoArray[idx].nonKeyInfoArray = nonKeyInfoArray;
} // for
// get constraint info
str_sprintf(query, "select O.object_name, C.constraint_type, C.col_count, C.constraint_uid, C.enforced from %s.\"%s\".%s O, %s.\"%s\".%s C where O.catalog_name = '%s' and O.schema_name = '%s' and C.table_uid = %Ld and O.object_uid = C.constraint_uid order by 1",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
catName.data(), schName.data(),
objUID);
Queue * constrInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(constrInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableConstraintInfo * constrInfoArray = NULL;
if (constrInfoQueue->numEntries() > 0)
{
constrInfoArray =
new(STMTHEAP) ComTdbVirtTableConstraintInfo[constrInfoQueue->numEntries()];
}
NAString tableCatName = "\"";
tableCatName += catName;
tableCatName += "\"";
NAString tableSchName = "\"";
tableSchName += schName;
tableSchName += "\"";
NAString tableObjName = "\"";
tableObjName += objName;
tableObjName += "\"";
ComObjectName coTableName(tableCatName, tableSchName, tableObjName);
extTableName =
new(STMTHEAP) NAString(coTableName.getExternalName(TRUE));
constrInfoQueue->position();
for (int idx = 0; idx < constrInfoQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)constrInfoQueue->getNext();
char * constrName = (char*)vi->get(0);
char * constrType = (char*)vi->get(1);
Lng32 colCount = *(Lng32*)vi->get(2);
Int64 constrUID = *(Int64*)vi->get(3);
char * enforced = (char*)vi->get(4);
constrInfoArray[idx].baseTableName = (char*)extTableName->data();
NAString cnNas = "\"";
cnNas += constrName;
cnNas += "\"";
ComObjectName coConstrName(tableCatName, tableSchName, cnNas);
NAString * extConstrName =
new(STMTHEAP) NAString(coConstrName.getExternalName(TRUE));
constrInfoArray[idx].constrName = (char*)extConstrName->data();
constrInfoArray[idx].colCount = colCount;
if (strcmp(constrType, COM_UNIQUE_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 0; // unique_constr
else if (strcmp(constrType, COM_FOREIGN_KEY_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 1; // ref_constr
else if (strcmp(constrType, COM_CHECK_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 2; // check_constr
else if (strcmp(constrType, COM_PRIMARY_KEY_CONSTRAINT_LIT) == 0)
constrInfoArray[idx].constrType = 3; // pkey_constr
if (strcmp(enforced, COM_YES_LIT) == 0)
constrInfoArray[idx].isEnforced = 1;
else
constrInfoArray[idx].isEnforced = 0;
Queue * keyInfoQueue = NULL;
str_sprintf(query, "select column_name, column_number, keyseq_number, ordering , cast(0 as int not null) from %s.\"%s\".%s where object_uid = %Ld for read committed access order by keyseq_number",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_KEYS,
constrUID);
cliRC = cliInterface.initializeInfoList(keyInfoQueue, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
cliRC = cliInterface.fetchAllRows(keyInfoQueue, query);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableKeyInfo * keyInfoArray = NULL;
if (colCount > 0)
{
keyInfoArray =
new(STMTHEAP) ComTdbVirtTableKeyInfo[colCount];
keyInfoQueue->position();
Lng32 jk = 0;
for (Lng32 j = 0; j < keyInfoQueue->numEntries(); j++)
{
OutputInfo * vi = (OutputInfo*)keyInfoQueue->getNext();
populateKeyInfo(keyInfoArray[jk], vi, TRUE);
jk++;
}
}
constrInfoArray[idx].keyInfoArray = keyInfoArray;
constrInfoArray[idx].numRingConstr = 0;
constrInfoArray[idx].ringConstrArray = NULL;
constrInfoArray[idx].numRefdConstr = 0;
constrInfoArray[idx].refdConstrArray = NULL;
constrInfoArray[idx].checkConstrLen = 0;
constrInfoArray[idx].checkConstrText = NULL;
// attach all the referencing constraints
if ((strcmp(constrType, COM_UNIQUE_CONSTRAINT_LIT) == 0) ||
(strcmp(constrType, COM_PRIMARY_KEY_CONSTRAINT_LIT) == 0))
{
str_sprintf(query, "select trim(O.catalog_name || '.' || '\"' || O.schema_name || '\"' || '.' || '\"' || O.object_name || '\"' ) constr_name, trim(O2.catalog_name || '.' || '\"' || O2.schema_name || '\"' || '.' || '\"' || O2.object_name || '\"' ) table_name from %s.\"%s\".%s U, %s.\"%s\".%s O, %s.\"%s\".%s O2, %s.\"%s\".%s T where O.object_uid = U.foreign_constraint_uid and O2.object_uid = T.table_uid and T.constraint_uid = U.foreign_constraint_uid and U.unique_constraint_uid = %Ld order by 2, 1",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_UNIQUE_REF_CONSTR_USAGE,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
constrUID
);
Queue * ringInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(ringInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableRefConstraints * ringInfoArray = NULL;
if (ringInfoQueue->numEntries() > 0)
{
ringInfoArray =
new(STMTHEAP) ComTdbVirtTableRefConstraints[ringInfoQueue->numEntries()];
}
ringInfoQueue->position();
for (Lng32 i = 0; i < ringInfoQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)ringInfoQueue->getNext();
ringInfoArray[i].constrName = (char*)vi->get(0);
ringInfoArray[i].baseTableName = (char*)vi->get(1);
}
constrInfoArray[idx].numRingConstr = ringInfoQueue->numEntries();
constrInfoArray[idx].ringConstrArray = ringInfoArray;
}
// attach all the referencing constraints
if (strcmp(constrType, COM_FOREIGN_KEY_CONSTRAINT_LIT) == 0)
{
str_sprintf(query, "select trim(O.catalog_name || '.' || '\"' || O.schema_name || '\"' || '.' || '\"' || O.object_name || '\"' ) constr_name, trim(O2.catalog_name || '.' || '\"' || O2.schema_name || '\"' || '.' || '\"' || O2.object_name || '\"' ) table_name from %s.\"%s\".%s R, %s.\"%s\".%s O, %s.\"%s\".%s O2, %s.\"%s\".%s T where O.object_uid = R.unique_constraint_uid and O2.object_uid = T.table_uid and T.constraint_uid = R.unique_constraint_uid and R.ref_constraint_uid = %Ld order by 2,1",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_REF_CONSTRAINTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_TABLE_CONSTRAINTS,
constrUID
);
Queue * refdInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(refdInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableRefConstraints * refdInfoArray = NULL;
if (refdInfoQueue->numEntries() > 0)
{
refdInfoArray =
new(STMTHEAP) ComTdbVirtTableRefConstraints[refdInfoQueue->numEntries()];
}
refdInfoQueue->position();
for (Lng32 i = 0; i < refdInfoQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)refdInfoQueue->getNext();
refdInfoArray[i].constrName = (char*)vi->get(0);
refdInfoArray[i].baseTableName = (char*)vi->get(1);
}
constrInfoArray[idx].numRefdConstr = refdInfoQueue->numEntries();
constrInfoArray[idx].refdConstrArray = refdInfoArray;
}
if (strcmp(constrType, COM_CHECK_CONSTRAINT_LIT) == 0)
{
NAString constrText;
if (getTextFromMD(&cliInterface, constrUID, COM_CHECK_CONSTR_TEXT, 0,
constrText))
{
processReturn();
return NULL;
}
char * ct = new(STMTHEAP) char[constrText.length()+1];
memcpy(ct, constrText.data(), constrText.length());
ct[constrText.length()] = 0;
constrInfoArray[idx].checkConstrLen = constrText.length();
constrInfoArray[idx].checkConstrText = ct;
}
} // for
str_sprintf(query, "select check_option, is_updatable, is_insertable from %s.\"%s\".%s where view_uid = %Ld for read committed access ",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_VIEWS,
objUID);
Queue * viewInfoQueue = NULL;
cliRC = cliInterface.fetchAllRows(viewInfoQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
processReturn();
return NULL;
}
ComTdbVirtTableViewInfo * viewInfoArray = NULL;
if (viewInfoQueue->numEntries() > 0)
{
// must have only one entry
if (viewInfoQueue->numEntries() > 1)
{
processReturn();
return NULL;
}
viewInfoArray = new(STMTHEAP) ComTdbVirtTableViewInfo[1];
viewInfoQueue->position();
OutputInfo * vi = (OutputInfo*)viewInfoQueue->getNext();
char * checkOption = (char*)vi->get(0);
Lng32 isUpdatable = *(Lng32*)vi->get(1);
Lng32 isInsertable = *(Lng32*)vi->get(2);
viewInfoArray[0].viewName = (char*)extTableName->data();
if (NAString(checkOption) != COM_NONE_CHECK_OPTION_LIT)
{
viewInfoArray[0].viewCheckText = new(STMTHEAP) char[strlen(checkOption) + 1];
strcpy(viewInfoArray[0].viewCheckText, checkOption);
}
else
viewInfoArray[0].viewCheckText = NULL;
viewInfoArray[0].isUpdatable = isUpdatable;
viewInfoArray[0].isInsertable = isInsertable;
// get view text from TEXT table
NAString viewText;
if (getTextFromMD(&cliInterface, objUID, COM_VIEW_TEXT, 0, viewText))
{
processReturn();
return NULL;
}
viewInfoArray[0].viewText = new(STMTHEAP) char[viewText.length() + 1];
strcpy(viewInfoArray[0].viewText, viewText.data());
}
ComTdbVirtTableSequenceInfo * seqInfo = NULL;
if (identityColPos >= 0)
{
NAString seqName;
SequenceGeneratorAttributes::genSequenceName
(catName, schName, objName, colInfoArray[identityColPos].colName,
seqName);
NAString extSeqName;
Int32 objectOwner;
Int64 seqUID;
seqInfo = getSeabaseSequenceInfo(catName, schName, seqName,
extSeqName, objectOwner, schemaOwner, seqUID);
}
ComTdbVirtTableTableInfo * tableInfo = new(STMTHEAP) ComTdbVirtTableTableInfo[1];
tableInfo->tableName = extTableName->data();
tableInfo->createTime = 0;
tableInfo->redefTime = 0;
tableInfo->objUID = objUID;
tableInfo->isAudited = (isAudited ? -1 : 0);
tableInfo->validDef = 1;
tableInfo->objOwnerID = objectOwner;
tableInfo->schemaOwnerID = schemaOwner;
tableInfo->numSaltPartns = numSaltPartns;
tableInfo->hbaseCreateOptions =
(hbaseCreateOptions->isNull() ? NULL : hbaseCreateOptions->data());
tableInfo->rowFormat = (alignedFormat ? COM_ALIGNED_FORMAT_TYPE : COM_HBASE_FORMAT_TYPE);
if (NOT colFamStr.isNull())
{
char colFamBuf[1000];
char * colFamBufPtr = colFamBuf;
strcpy(colFamBufPtr, colFamStr.data());
strsep(&colFamBufPtr, " ");
tableInfo->defaultColFam = colFamBuf;
tableInfo->allColFams = colFamBufPtr;
}
else
{
tableInfo->defaultColFam = SEABASE_DEFAULT_COL_FAMILY;
tableInfo->allColFams = NULL;
}
tableInfo->objectFlags = objectFlags;
tableDesc =
Generator::createVirtualTableDesc
(
extTableName->data(), //objName,
numCols,
colInfoArray,
tableKeyInfo->numEntries(), //keyIndex,
keyInfoArray,
constrInfoQueue->numEntries(),
constrInfoArray,
indexInfoQueue->numEntries(),
indexInfoArray,
viewInfoQueue->numEntries(),
viewInfoArray,
tableInfo,
seqInfo);
// reset the SMD table flag
tableDesc->body.table_desc.issystemtablecode = 0;
if ( tableDesc ) {
// request the default
ExpHbaseInterface* ehi =CmpSeabaseDDL::allocEHI();
ByteArrayList* bal = ehi->getRegionEndKeys(extNameForHbase);
// create a list of region descriptors
((table_desc_struct*)tableDesc)->hbase_regionkey_desc =
assembleDescs(bal, populateRegionDescAsRANGE, STMTHEAP);
delete bal;
// if this is base table or index and hbase object doesn't exist, then this object
// is corrupted.
if (!objectFlags & SEABASE_OBJECT_IS_EXTERNAL_HIVE &&
!objectFlags & SEABASE_OBJECT_IS_EXTERNAL_HBASE)
{
if ((tableDesc->body.table_desc.objectType == COM_BASE_TABLE_OBJECT) &&
(existsInHbase(extNameForHbase, ehi) == 0))
{
*CmpCommon::diags() << DgSqlCode(-4254)
<< DgString0(*extTableName);
tableDesc = NULL;
return NULL;
}
}
if (ctlFlags & GET_SNAPSHOTS)
{
char * snapName = NULL;
Lng32 retcode = ehi->getLatestSnapshot(extNameForHbase.data(), snapName, STMTHEAP);
if (retcode < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::getLatestSnapshot()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr().data());
delete ehi;
}
if (snapName != NULL)
{
tableDesc->body.table_desc.snapshotName=snapName;
}
}
//test return code
CmpSeabaseDDL::deallocEHI(ehi);
}
if (! tableDesc)
processReturn();
return tableDesc;
}
desc_struct * CmpSeabaseDDL::getSeabaseTableDesc(const NAString &catName,
const NAString &schName,
const NAString &objName,
const ComObjectType objType,
NABoolean includeInvalidDefs)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
if ((CmpCommon::context()->isUninitializedSeabase()) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
if (CmpCommon::context()->uninitializedSeabaseErrNum() == -1398)
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum())
<< DgInt0(CmpCommon::context()->hbaseErrNum())
<< DgString0(CmpCommon::context()->hbaseErrStr());
else
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum());
return NULL;
}
desc_struct *tDesc = NULL;
NABoolean isMDTable = (isSeabaseMD(catName, schName, objName) ||
isSeabasePrivMgrMD(catName, schName));
if (isMDTable)
{
if (! CmpCommon::context()->getTrafMDDescsInfo())
{
*CmpCommon::diags() << DgSqlCode(-1428);
return NULL;
}
tDesc = getSeabaseMDTableDesc(catName, schName, objName, objType);
// Could not find this metadata object in the static predefined structs.
// It could be a metadata view or other objects created in MD schema.
// Look for it as a regular object.
}
else if ((objName == HBASE_HIST_NAME) ||
(objName == HBASE_HISTINT_NAME))
{
NAString tabName = catName;
tabName += ".";
tabName += schName;
tabName += ".";
tabName += objName;
if (existsInHbase(tabName))
{
tDesc = getSeabaseHistTableDesc(catName, schName, objName);
}
return tDesc;
}
if (! tDesc)
{
if ((CmpCommon::context()->isUninitializedSeabase()) &&
(!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
if (CmpCommon::context()->uninitializedSeabaseErrNum() == -1398)
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum())
<< DgInt0(CmpCommon::context()->hbaseErrNum())
<< DgString0(CmpCommon::context()->hbaseErrStr());
else
*CmpCommon::diags() << DgSqlCode(CmpCommon::context()->uninitializedSeabaseErrNum());
}
else
{
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return NULL;
switch (objType)
{
case COM_SEQUENCE_GENERATOR_OBJECT:
tDesc = getSeabaseSequenceDesc(catName, schName, objName);
break;
case COM_LIBRARY_OBJECT:
tDesc = getSeabaseLibraryDesc(catName, schName, objName);
break;
default:
tDesc = getSeabaseUserTableDesc(catName, schName, objName,
objType, includeInvalidDefs,
GET_SNAPSHOTS /* get snapshot */);
}
switchBackCompiler();
}
}
return tDesc;
}
//
// Produce a list of desc_struct objects. In each object, the body_struct
// field points at hbase_region_desc. The order of the keyinfo, obtained from
// org.apache.hadoop.hbase.client.HTable.getEndKey(), is preserved.
//
// Allocate space from STMTHEAP, per the call of this function
// in CmpSeabaseDDL::getSeabaseTableDesc() and the
// Generator::createVirtualTableDesc() call make before this one that
// uses STMTPHEAP througout.
//
desc_struct* assembleDescs(ByteArrayList* bal, populateFuncT func, NAMemory* heap)
{
if ( !bal )
return NULL;
desc_struct *result = NULL;
Int32 entries = bal->getSize();
Int32 len = 0;
char* buf = NULL;
for (Int32 i=entries-1; i>=0; i-- ) {
// call JNI interface
len = bal->getEntrySize(i);
if ( len > 0 ) {
buf = new (heap) char[len];
Int32 datalen;
if ( !bal->getEntry(i, buf, len, datalen) || datalen != len ) {
return NULL;
}
} else
buf = NULL;
desc_struct* wrapper = NULL;
wrapper = new (heap) desc_struct();
wrapper->header.OSV = 0; // TBD
wrapper->header.OFV = 0; // TBD
(*func)(buf, len, wrapper, heap);
wrapper->header.next = result;
result = wrapper;
}
return result;
}
// a wrapper method to getSeabaseRoutineDescInternal so
// CmpContext context switching can take place.
// getSeabaseRoutineDescInternal prepares and executes
// several queries on metadata tables
desc_struct *CmpSeabaseDDL::getSeabaseRoutineDesc(const NAString &catName,
const NAString &schName,
const NAString &objName)
{
desc_struct *result = NULL;
if (switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
return NULL;
result = getSeabaseRoutineDescInternal(catName, schName, objName);
switchBackCompiler();
return result;
}
desc_struct *CmpSeabaseDDL::getSeabaseRoutineDescInternal(const NAString &catName,
const NAString &schName,
const NAString &objName)
{
Lng32 retcode = 0;
Lng32 cliRC = 0;
desc_struct *result;
char query[4000];
char buf[4000];
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int64 objectUID = 0;
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
Int64 objectFlags = 0 ;
ComObjectType objectType = COM_USER_DEFINED_ROUTINE_OBJECT;
objectUID = getObjectInfo(&cliInterface,
catName.data(), schName.data(),
objName.data(), objectType,
objectOwnerID,schemaOwnerID,objectFlags);
if (objectUID == -1 || objectOwnerID == 0)
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
SEABASEDDL_INTERNAL_ERROR("getting object UID and owners for routine desc request");
processReturn();
return NULL;
}
str_sprintf(buf, "select udr_type, language_type, deterministic_bool,"
" sql_access, call_on_null, isolate_bool, param_style,"
" transaction_attributes, max_results, state_area_size, external_name,"
" parallelism, user_version, external_security, execution_mode,"
" library_filename, version, signature, catalog_name, schema_name,"
" object_name"
" from %s.\"%s\".%s r, %s.\"%s\".%s l, %s.\"%s\".%s o "
" where r.udr_uid = %Ld and r.library_uid = l.library_uid "
" and l.library_uid = o.object_uid for read committed access",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_ROUTINES,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_LIBRARIES,
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
objectUID);
cliRC = cliInterface.fetchRowsPrologue(buf, TRUE/*no exec*/);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return NULL;
}
cliRC = cliInterface.clearExecFetchClose(NULL, 0);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return NULL;
}
if (cliRC == 100) // did not find the row
{
*CmpCommon::diags() << DgSqlCode(-CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
<< DgString0(objName);
return NULL;
}
char * ptr = NULL;
Lng32 len = 0;
ComTdbVirtTableRoutineInfo *routineInfo = new (STMTHEAP) ComTdbVirtTableRoutineInfo();
routineInfo->object_uid = objectUID;
routineInfo->object_owner_id = objectOwnerID;
routineInfo->schema_owner_id = schemaOwnerID;
routineInfo->routine_name = objName.data();
cliInterface.getPtrAndLen(1, ptr, len);
str_cpy_all(routineInfo->UDR_type, ptr, len);
routineInfo->UDR_type[len] = '\0';
cliInterface.getPtrAndLen(2, ptr, len);
str_cpy_all(routineInfo->language_type, ptr, len);
routineInfo->language_type[len] = '\0';
cliInterface.getPtrAndLen(3, ptr, len);
if (*ptr == 'Y')
routineInfo->deterministic = 1;
else
routineInfo->deterministic = 0;
cliInterface.getPtrAndLen(4, ptr, len);
str_cpy_all(routineInfo->sql_access, ptr, len);
routineInfo->sql_access[len] = '\0';
cliInterface.getPtrAndLen(5, ptr, len);
if (*ptr == 'Y')
routineInfo->call_on_null = 1;
else
routineInfo->call_on_null = 0;
cliInterface.getPtrAndLen(6, ptr, len);
if (*ptr == 'Y')
routineInfo->isolate = 1;
else
routineInfo->isolate = 0;
cliInterface.getPtrAndLen(7, ptr, len);
str_cpy_all(routineInfo->param_style, ptr, len);
routineInfo->param_style[len] = '\0';
cliInterface.getPtrAndLen(8, ptr, len);
str_cpy_all(routineInfo->transaction_attributes, ptr, len);
routineInfo->transaction_attributes[len] = '\0';
cliInterface.getPtrAndLen(9, ptr, len);
routineInfo->max_results = *(Int32 *)ptr;
cliInterface.getPtrAndLen(10, ptr, len);
routineInfo->state_area_size = *(Int32 *)ptr;
cliInterface.getPtrAndLen(11, ptr, len);
routineInfo->external_name = new (STMTHEAP) char[len+1];
str_cpy_and_null((char *)routineInfo->external_name, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(12, ptr, len);
str_cpy_all(routineInfo->parallelism, ptr, len);
routineInfo->parallelism[len] = '\0';
cliInterface.getPtrAndLen(13, ptr, len);
str_cpy_all(routineInfo->user_version, ptr, len);
routineInfo->user_version[len] = '\0';
cliInterface.getPtrAndLen(14, ptr, len);
str_cpy_all(routineInfo->external_security, ptr, len);
routineInfo->external_security[len] = '\0';
cliInterface.getPtrAndLen(15, ptr, len);
str_cpy_all(routineInfo->execution_mode, ptr, len);
routineInfo->execution_mode[len] = '\0';
cliInterface.getPtrAndLen(16, ptr, len);
routineInfo->library_filename = new (STMTHEAP) char[len+1];
str_cpy_and_null((char *)routineInfo->library_filename, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(17, ptr, len);
routineInfo->library_version = *(Int32 *)ptr;
cliInterface.getPtrAndLen(18, ptr, len);
routineInfo->signature = new (STMTHEAP) char[len+1];
str_cpy_and_null((char *)routineInfo->signature, ptr, len, '\0', ' ', TRUE);
// library SQL name, in three parts
cliInterface.getPtrAndLen(19, ptr, len);
char *libCat = new (STMTHEAP) char[len+1];
str_cpy_and_null(libCat, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(20, ptr, len);
char *libSch = new (STMTHEAP) char[len+1];
str_cpy_and_null(libSch, ptr, len, '\0', ' ', TRUE);
cliInterface.getPtrAndLen(21, ptr, len);
char *libObj = new (STMTHEAP) char[len+1];
str_cpy_and_null(libObj, ptr, len, '\0', ' ', TRUE);
ComObjectName libSQLName(libCat, libSch, libObj,
COM_UNKNOWN_NAME,
ComAnsiNamePart::INTERNAL_FORMAT,
STMTHEAP);
NAString libSQLExtName = libSQLName.getExternalName();
routineInfo->library_sqlname = new (STMTHEAP) char[libSQLExtName.length()+1];
str_cpy_and_null((char *)routineInfo->library_sqlname,
libSQLExtName.data(),
libSQLExtName.length(),
'\0', ' ', TRUE);
ComTdbVirtTableColumnInfo *paramsArray;
Lng32 numParams;
char direction[50];
str_sprintf(direction, "'%s', '%s', '%s'",
COM_INPUT_PARAM_LIT, COM_OUTPUT_PARAM_LIT,
COM_INOUT_PARAM_LIT);
// Params
if (getSeabaseColumnInfo(&cliInterface,
objectUID,
catName, schName, objName,
(char *)direction,
NULL,
NULL,
&numParams,
¶msArray) < 0)
{
processReturn();
return NULL;
}
desc_struct *routine_desc = NULL;
routine_desc = Generator::createVirtualRoutineDesc(
objName.data(),
routineInfo,
numParams,
paramsArray);
if (routine_desc == NULL)
processReturn();
return routine_desc;
}
// *****************************************************************************
// * *
// * Function: checkSpecifiedPrivs *
// * *
// * Processes the privilege specification and returns the lists of object *
// * and column privileges. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <privActsArray> ElemDDLPrivActArray & In *
// * is a reference to the parsed list of privileges to be granted or *
// * revoked. *
// * *
// * <externalObjectName> const char * In *
// * is the fully qualified name of the object that privileges are being *
// * granted or revoked on. *
// * *
// * <objectType> ComObjectType In *
// * is the type of the object that privileges are being granted or *
// * revoked on. *
// * *
// * <naTable> NATable * In *
// * if the object type is a table or view, the cache for the metadata *
// * related to the object, otherwise NULL. *
// * *
// * <objectPrivs> std::vector<PrivType> & Out *
// * passes back a list of the object privileges to be granted or revoked. *
// * *
// * <colPrivs> std::vector<ColPrivSpec> & Out *
// * passes back a list of the column privileges and the specific columns *
// * on which the privileges are to be granted or revoked. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Privileges processed successfully. Lists of object and column *
// * privileges were returned. *
// * false: Error processing privileges. The error is in the diags area. *
// * *
// *****************************************************************************
static bool checkSpecifiedPrivs(
ElemDDLPrivActArray & privActsArray,
const char * externalObjectName,
ComObjectType objectType,
NATable * naTable,
std::vector<PrivType> & objectPrivs,
std::vector<ColPrivSpec> & colPrivs)
{
for (Lng32 i = 0; i < privActsArray.entries(); i++)
{
// Currently only DML privileges are supported.
PrivType privType;
if (!ElmPrivToPrivType(privActsArray[i]->getOperatorType(),privType) ||
!isDMLPrivType(privType))
{
*CmpCommon::diags() << DgSqlCode(-CAT_INVALID_PRIV_FOR_OBJECT)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str())
<< DgString1(externalObjectName);
return false;
}
//
// The same privilege cannot be specified twice in one grant or revoke
// statement. This includes granting or revoking the same privilege at
// the object-level and the column-level.
if (hasValue(objectPrivs,privType) || hasValue(colPrivs,privType))
{
*CmpCommon::diags() << DgSqlCode(-CAT_DUPLICATE_PRIVILEGES);
return false;
}
if (!isValidPrivTypeForObject(objectType,privType) && privType != PrivType::ALL_DML)
{
*CmpCommon::diags() << DgSqlCode(-CAT_PRIVILEGE_NOT_ALLOWED_FOR_THIS_OBJECT_TYPE)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str());
return false;
}
// For some DML privileges the user may be granting either column
// or object privileges. If it is not a privilege that can be granted
// at the column level, it is an object-level privilege.
if (!isColumnPrivType(privType))
{
objectPrivs.push_back(privType);
continue;
}
ElemDDLPrivActWithColumns * privActWithColumns = dynamic_cast<ElemDDLPrivActWithColumns *>(privActsArray[i]);
ElemDDLColNameArray colNameArray = privActWithColumns->getColumnNameArray();
// If no columns were specified, this is an object-level privilege.
if (colNameArray.entries() == 0)
{
objectPrivs.push_back(privType);
continue;
}
// Column-level privileges can only be specified for tables and views.
if (objectType != COM_BASE_TABLE_OBJECT && objectType != COM_VIEW_OBJECT)
{
*CmpCommon::diags() << DgSqlCode(-CAT_INCORRECT_OBJECT_TYPE)
<< DgTableName(externalObjectName);
return false;
}
// It's a table or view, validate the column. Get the list of
// columns and verify the list contains the specified column(s).
const NAColumnArray &nacolArr = naTable->getNAColumnArray();
for (size_t c = 0; c < colNameArray.entries(); c++)
{
const NAColumn * naCol = nacolArr.getColumn(colNameArray[c]->getColumnName());
if (naCol == NULL)
{
*CmpCommon::diags() << DgSqlCode(-CAT_COLUMN_DOES_NOT_EXIST_ERROR)
<< DgColumnName(colNameArray[c]->getColumnName());
return false;
}
// Specified column was found.
ColPrivSpec colPrivEntry;
colPrivEntry.privType = privType;
colPrivEntry.columnOrdinal = naCol->getPosition();
colPrivs.push_back(colPrivEntry);
}
}
return true;
}
//************************ End of checkSpecifiedPrivs **************************
// *****************************************************************************
// * *
// * Function: ElmPrivToPrivType *
// * *
// * This function maps a parser privilege enum (ELM_PRIV_ACT) to a Privilege *
// * Manager PrivType. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <elmPriv> OperatorTypeEnum In *
// * is a parser privilege enum. *
// * *
// * <privType> PrivType & Out *
// * passes back the CatPrivBitmap privilege enum. *
// * *
// * <forRevoke> bool [In] *
// * is true if this is part of a revoke command, otherwise false. Default *
// * to true. Currently unused, placeholder for schema and DDL privileges. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Privilege converted *
// * false: Privilege not recognized. *
// * *
// *****************************************************************************
static bool ElmPrivToPrivType(
OperatorTypeEnum elmPriv,
PrivType & privType,
bool forRevoke)
{
switch (elmPriv)
{
case ELM_PRIV_ACT_DELETE_ELEM:
privType = PrivType::DELETE_PRIV;
break;
case ELM_PRIV_ACT_EXECUTE_ELEM:
privType = PrivType::EXECUTE_PRIV;
break;
case ELM_PRIV_ACT_INSERT_ELEM:
privType = PrivType::INSERT_PRIV;
break;
case ELM_PRIV_ACT_REFERENCES_ELEM:
privType = PrivType::REFERENCES_PRIV;
break;
case ELM_PRIV_ACT_SELECT_ELEM:
privType = PrivType::SELECT_PRIV;
break;
case ELM_PRIV_ACT_UPDATE_ELEM:
privType = PrivType::UPDATE_PRIV;
break;
case ELM_PRIV_ACT_USAGE_ELEM:
privType = PrivType::USAGE_PRIV;
break;
case ELM_PRIV_ACT_ALTER_ELEM:
// if (forRevoke)
// privType = PrivType::ALL_ALTER;
// else
privType = PrivType::ALTER_PRIV;
break;
case ELM_PRIV_ACT_CREATE_ELEM:
// if (forRevoke)
// privType = PrivType::ALL_CREATE;
// else
privType = PrivType::CREATE_PRIV;
break;
case ELM_PRIV_ACT_DROP_ELEM:
// if (forRevoke)
// privType = PrivType::ALL_DROP;
// else
privType = PrivType::DROP_PRIV;
break;
case ELM_PRIV_ACT_ALL_DDL_ELEM:
privType = PrivType::ALL_DDL;
break;
case ELM_PRIV_ACT_ALL_DML_ELEM:
privType = PrivType::ALL_DML;
break;
case ELM_PRIV_ACT_ALL_OTHER_ELEM:
privType = PrivType::ALL_PRIVS;
break;
default:
return false;
}
return true;
}
//************************* End of ElmPrivToPrivType ***************************
// *****************************************************************************
// * *
// * Function: hasValue *
// * *
// * This function determines if a ColPrivSpec vector contains a PrivType *
// * value. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <container> std::vector<ColPrivSpec> In *
// * is the vector of ColPrivSpec values. *
// * *
// * <value> PrivType In *
// * is the value to be compared against existing values in the vector. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Vector contains the value. *
// * false: Vector does not contain the value. *
// * *
// *****************************************************************************
static bool hasValue(
const std::vector<ColPrivSpec> & container,
PrivType value)
{
for (size_t index = 0; index < container.size(); index++)
if (container[index].privType == value)
return true;
return false;
}
//***************************** End of hasValue ********************************
// *****************************************************************************
// * *
// * Function: hasValue *
// * *
// * This function determines if a PrivType vector contains a PrivType value.*
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <container> std::vector<PrivType> In *
// * is the vector of 32-bit values. *
// * *
// * <value> PrivType In *
// * is the value to be compared against existing values in the vector. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Vector contains the value. *
// * false: Vector does not contain the value. *
// * *
// *****************************************************************************
static bool hasValue(
const std::vector<PrivType> & container,
PrivType value)
{
for (size_t index = 0; index < container.size(); index++)
if (container[index] == value)
return true;
return false;
}
//***************************** End of hasValue ********************************
// *****************************************************************************
// * *
// * Function: isMDGrantRevokeOK *
// * *
// * This function determines if a grant or revoke a privilege to/from a *
// * metadata table should be allowed. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <objectPrivs> const std::vector<PrivType> & In *
// * is a vector of object-level privileges. *
// * *
// * <colPrivs> const std::vector<ColPrivSpec> & In *
// * is a vector of column-level privileges. *
// * *
// * <isGrant> bool In *
// * is a true if this is a grant operation, false if revoke. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Grant/revoke is OK. *
// * false: Grant/revoke should be rejected. *
// * *
// *****************************************************************************
static bool isMDGrantRevokeOK(
const std::vector<PrivType> & objectPrivs,
const std::vector<ColPrivSpec> & colPrivs,
bool isGrant)
{
// Can only grant or revoke privileges on MD tables if only granting select,
// or only revoking all privileges. Only valid combination is no object
// privileges and 1 or more column privileges (all SELECT), or no column
// privilege and exactly one object privilege. In the latter case, the
// privilege must either be SELECT, or if a REVOKE operation, either
// ALL_PRIVS or ALL_DML.
// First check if no column privileges.
if (colPrivs.size() == 0)
{
// Should never get this far with both vectors being empty, but check
// just in case.
if (objectPrivs.size() == 0)
return false;
if (objectPrivs.size() > 1)
return false;
if (objectPrivs[0] == SELECT_PRIV)
return true;
if (isGrant)
return false;
if (objectPrivs[0] == ALL_PRIVS || objectPrivs[0] == ALL_DML)
return true;
return false;
}
// Have column privs
if (objectPrivs.size() > 0)
return false;
for (size_t i = 0; i < colPrivs.size(); i++)
if (colPrivs[i].privType != SELECT_PRIV)
return false;
return true;
}
//************************* End of isMDGrantRevokeOK ***************************
// *****************************************************************************
// * *
// * Function: isValidPrivTypeForObject *
// * *
// * This function determines if a priv type is valid for an object. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <objectType> ComObjectType In *
// * is the type of the object. *
// * *
// * <privType> PrivType In *
// * is the type of the privilege. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Priv type is valid for object. *
// * false: Priv type is not valid for object. *
// * *
// *****************************************************************************
static bool isValidPrivTypeForObject(
ComObjectType objectType,
PrivType privType)
{
switch (objectType)
{
case COM_LIBRARY_OBJECT:
return isLibraryPrivType(privType);
case COM_STORED_PROCEDURE_OBJECT:
case COM_USER_DEFINED_ROUTINE_OBJECT:
return isUDRPrivType(privType);
case COM_SEQUENCE_GENERATOR_OBJECT:
return isSequenceGeneratorPrivType(privType);
case COM_BASE_TABLE_OBJECT:
case COM_VIEW_OBJECT:
return isTablePrivType(privType);
default:
return false;
}
return false;
}
//********************* End of isValidPrivTypeForObject ************************
| 1 | 10,334 | Just wondering whether it's safe to send all CQDs to the child compiler. As this missing CQD is causing trouble, maybe some other user CQDs that now get sent could also cause the statement to fail? Also, if I have a table with a nullable unique column, should I have to set this CQD to create another table like it, or should the CREATE TABLE LIKE work without the CQD? | apache-trafodion | cpp |
@@ -1,11 +1,16 @@
import os
+import packaging.version
+import requests
+
from ..defines import GCP_CREDS_LOCAL_FILE
from ..module_build_spec import ModuleBuildSpec
from ..utils import connect_sibling_docker_container, network_buildkite_container
from .test_images import publish_test_images, test_image_depends_fn
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
+DAGSTER_FROM_SOURCE = "current_branch"
+DAGSTER_EARLIEST_RELEASE = "0.12.4"
def integration_suite_extra_cmds_fn(version): | 1 | import os
from ..defines import GCP_CREDS_LOCAL_FILE
from ..module_build_spec import ModuleBuildSpec
from ..utils import connect_sibling_docker_container, network_buildkite_container
from .test_images import publish_test_images, test_image_depends_fn
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
def integration_suite_extra_cmds_fn(version):
return [
'export AIRFLOW_HOME="/airflow"',
"mkdir -p $${AIRFLOW_HOME}",
"export DAGSTER_DOCKER_IMAGE_TAG=$${BUILDKITE_BUILD_ID}-" + version,
'export DAGSTER_DOCKER_REPOSITORY="$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com"',
"aws ecr get-login --no-include-email --region us-west-2 | sh",
r"aws s3 cp s3://\${BUILDKITE_SECRETS_BUCKET}/gcp-key-elementl-dev.json "
+ GCP_CREDS_LOCAL_FILE,
"export GOOGLE_APPLICATION_CREDENTIALS=" + GCP_CREDS_LOCAL_FILE,
"pushd python_modules/libraries/dagster-celery",
# Run the rabbitmq db. We are in docker running docker
# so this will be a sibling container.
"docker-compose up -d --remove-orphans", # clean up in hooks/pre-exit,
# Can't use host networking on buildkite and communicate via localhost
# between these sibling containers, so pass along the ip.
network_buildkite_container("rabbitmq"),
connect_sibling_docker_container("rabbitmq", "test-rabbitmq", "DAGSTER_CELERY_BROKER_HOST"),
"popd",
]
def integration_steps():
tests = []
tests += publish_test_images()
tests += ModuleBuildSpec(
os.path.join("integration_tests", "python_modules", "dagster-k8s-test-infra"),
upload_coverage=True,
).get_tox_build_steps()
integration_suites_root = os.path.join(
SCRIPT_PATH, "..", "..", "..", "..", "integration_tests", "test_suites"
)
integration_suites = [
os.path.join("integration_tests", "test_suites", suite)
for suite in os.listdir(integration_suites_root)
]
for integration_suite in integration_suites:
tox_env_suffixes = None
upload_coverage = False
if integration_suite == os.path.join(
"integration_tests", "test_suites", "k8s-integration-test-suite"
):
tox_env_suffixes = ["-default"]
upload_coverage = True
elif integration_suite == os.path.join(
"integration_tests", "test_suites", "celery-k8s-integration-test-suite"
):
tox_env_suffixes = [
"-default",
"-markusercodedeployment",
"-markusercodedeploymentsubchart",
"-markdaemon",
]
upload_coverage = True
tests += ModuleBuildSpec(
integration_suite,
env_vars=[
"AIRFLOW_HOME",
"AWS_ACCOUNT_ID",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"BUILDKITE_SECRETS_BUCKET",
"GOOGLE_APPLICATION_CREDENTIALS",
],
upload_coverage=upload_coverage,
extra_cmds_fn=integration_suite_extra_cmds_fn,
depends_on_fn=test_image_depends_fn,
tox_env_suffixes=tox_env_suffixes,
retries=2,
).get_tox_build_steps()
return tests
| 1 | 15,106 | EARLIEST_TESTED_RELEASE might be slightly more clear | dagster-io-dagster | py |
@@ -81,7 +81,7 @@ MSG_STATE_CONFIDENCE = 2
# Allow stopping after the first semicolon/hash encountered,
# so that an option can be continued with the reasons
# why it is active or disabled.
-OPTION_RGX = re.compile(r"\s*#.*\bpylint:\s*([^;#]+)[;#]{0,1}")
+OPTION_RGX = re.compile(r'.*#\s*pylint:\s*([^;#]+)[;#]?.*')
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = "FR" | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Vincent
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2014-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2014-2015 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 LCD 47 <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2014 Damien Nozay <[email protected]>
# Copyright (c) 2015 Aru Sahni <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Ashley Whetter <[email protected]>
# Copyright (c) 2016 xmo-odoo <[email protected]>
# Copyright (c) 2017-2018 hippo91 <[email protected]>
# Copyright (c) 2017 Pierre Sassoulas <[email protected]>
# Copyright (c) 2017 Bryce Guinta <[email protected]>
# Copyright (c) 2017 Chris Lamb <[email protected]>
# Copyright (c) 2017 Anthony Sottile <[email protected]>
# Copyright (c) 2017 Thomas Hisch <[email protected]>
# Copyright (c) 2017 Mikhail Fesenko <[email protected]>
# Copyright (c) 2017 Craig Citro <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Pierre Sassoulas <[email protected]>
# Copyright (c) 2018 Reverb C <[email protected]>
# Copyright (c) 2018 Nick Drozd <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""some various utilities and helper classes, most of them used in the
main pylint class
"""
from __future__ import print_function
import codecs
import collections
from inspect import cleandoc
import os
from os.path import dirname, basename, splitext, exists, isdir, join, normpath
import re
import sys
import tokenize
import warnings
import textwrap
from astroid import nodes, Module
from astroid import modutils
from pylint.interfaces import IRawChecker, ITokenChecker, UNDEFINED, implements
from pylint.reporters.ureports.nodes import Section
from pylint.exceptions import InvalidMessageError, UnknownMessageError, EmptyReportError
MSG_TYPES = {
"I": "info",
"C": "convention",
"R": "refactor",
"W": "warning",
"E": "error",
"F": "fatal",
}
MSG_TYPES_LONG = {v: k for k, v in MSG_TYPES.items()}
MSG_TYPES_STATUS = {"I": 0, "C": 16, "R": 8, "W": 4, "E": 2, "F": 1}
_MSG_ORDER = "EWRCIF"
MSG_STATE_SCOPE_CONFIG = 0
MSG_STATE_SCOPE_MODULE = 1
MSG_STATE_CONFIDENCE = 2
# Allow stopping after the first semicolon/hash encountered,
# so that an option can be continued with the reasons
# why it is active or disabled.
OPTION_RGX = re.compile(r"\s*#.*\bpylint:\s*([^;#]+)[;#]{0,1}")
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = "FR"
class WarningScope:
LINE = "line-based-msg"
NODE = "node-based-msg"
_MsgBase = collections.namedtuple(
"_MsgBase",
[
"msg_id",
"symbol",
"msg",
"C",
"category",
"confidence",
"abspath",
"path",
"module",
"obj",
"line",
"column",
],
)
class Message(_MsgBase):
"""This class represent a message to be issued by the reporters"""
def __new__(cls, msg_id, symbol, location, msg, confidence):
return _MsgBase.__new__(
cls,
msg_id,
symbol,
msg,
msg_id[0],
MSG_TYPES[msg_id[0]],
confidence,
*location
)
def format(self, template):
"""Format the message according to the given template.
The template format is the one of the format method :
cf. http://docs.python.org/2/library/string.html#formatstrings
"""
# For some reason, _asdict on derived namedtuples does not work with
# Python 3.4. Needs some investigation.
return template.format(**dict(zip(self._fields, self)))
def get_module_and_frameid(node):
"""return the module name and the frame id in the module"""
frame = node.frame()
module, obj = "", []
while frame:
if isinstance(frame, Module):
module = frame.name
else:
obj.append(getattr(frame, "name", "<lambda>"))
try:
frame = frame.parent.frame()
except AttributeError:
frame = None
obj.reverse()
return module, ".".join(obj)
def category_id(cid):
cid = cid.upper()
if cid in MSG_TYPES:
return cid
return MSG_TYPES_LONG.get(cid)
def safe_decode(line, encoding, *args, **kwargs):
"""return decoded line from encoding or decode with default encoding"""
try:
return line.decode(encoding or sys.getdefaultencoding(), *args, **kwargs)
except LookupError:
return line.decode(sys.getdefaultencoding(), *args, **kwargs)
def decoding_stream(stream, encoding, errors="strict"):
try:
reader_cls = codecs.getreader(encoding or sys.getdefaultencoding())
except LookupError:
reader_cls = codecs.getreader(sys.getdefaultencoding())
return reader_cls(stream, errors)
def tokenize_module(module):
with module.stream() as stream:
readline = stream.readline
return list(tokenize.tokenize(readline))
def build_message_def(checker, msgid, msg_tuple):
if implements(checker, (IRawChecker, ITokenChecker)):
default_scope = WarningScope.LINE
else:
default_scope = WarningScope.NODE
options = {}
if len(msg_tuple) > 3:
(msg, symbol, descr, options) = msg_tuple
elif len(msg_tuple) > 2:
(msg, symbol, descr) = msg_tuple
else:
# messages should have a symbol, but for backward compatibility
# they may not.
(msg, descr) = msg_tuple
warnings.warn(
"[pylint 0.26] description of message %s doesn't include "
"a symbolic name" % msgid,
DeprecationWarning,
)
symbol = None
options.setdefault("scope", default_scope)
return MessageDefinition(checker, msgid, msg, descr, symbol, **options)
class MessageDefinition:
def __init__(
self,
checker,
msgid,
msg,
descr,
symbol,
scope,
minversion=None,
maxversion=None,
old_names=None,
):
self.checker = checker
if len(msgid) != 5:
raise InvalidMessageError("Invalid message id %r" % msgid)
if not msgid[0] in MSG_TYPES:
raise InvalidMessageError("Bad message type %s in %r" % (msgid[0], msgid))
self.msgid = msgid
self.msg = msg
self.descr = descr
self.symbol = symbol
self.scope = scope
self.minversion = minversion
self.maxversion = maxversion
self.old_names = old_names or []
def __repr__(self):
return "MessageDefinition:{}".format(self.__dict__)
def may_be_emitted(self):
"""return True if message may be emitted using the current interpreter"""
if self.minversion is not None and self.minversion > sys.version_info:
return False
if self.maxversion is not None and self.maxversion <= sys.version_info:
return False
return True
def format_help(self, checkerref=False):
"""return the help string for the given message id"""
desc = self.descr
if checkerref:
desc += " This message belongs to the %s checker." % self.checker.name
title = self.msg
if self.symbol:
msgid = "%s (%s)" % (self.symbol, self.msgid)
else:
msgid = self.msgid
if self.minversion or self.maxversion:
restr = []
if self.minversion:
restr.append("< %s" % ".".join([str(n) for n in self.minversion]))
if self.maxversion:
restr.append(">= %s" % ".".join([str(n) for n in self.maxversion]))
restr = " or ".join(restr)
if checkerref:
desc += " It can't be emitted when using Python %s." % restr
else:
desc += " This message can't be emitted when using Python %s." % restr
desc = _normalize_text(" ".join(desc.split()), indent=" ")
if title != "%s":
title = title.splitlines()[0]
return ":%s: *%s*\n%s" % (msgid, title.rstrip(" "), desc)
return ":%s:\n%s" % (msgid, desc)
class MessagesHandlerMixIn:
"""a mix-in class containing all the messages related methods for the main
lint class
"""
__by_id_managed_msgs = [] # type: ignore
def __init__(self):
self._msgs_state = {}
self.msg_status = 0
def _checker_messages(self, checker):
for known_checker in self._checkers[checker.lower()]:
for msgid in known_checker.msgs:
yield msgid
@classmethod
def clear_by_id_managed_msgs(cls):
cls.__by_id_managed_msgs.clear()
@classmethod
def get_by_id_managed_msgs(cls):
return cls.__by_id_managed_msgs
def _register_by_id_managed_msg(self, msgid, line, is_disabled=True):
"""If the msgid is a numeric one, then register it to inform the user
it could furnish instead a symbolic msgid."""
try:
msg = self.msgs_store.get_message_definition(msgid)
if msgid == msg.msgid:
MessagesHandlerMixIn.__by_id_managed_msgs.append(
(self.current_name, msg.msgid, msg.symbol, line, is_disabled)
)
except UnknownMessageError:
pass
def disable(self, msgid, scope="package", line=None, ignore_unknown=False):
"""don't output message of the given id"""
self._set_msg_status(
msgid, enable=False, scope=scope, line=line, ignore_unknown=ignore_unknown
)
self._register_by_id_managed_msg(msgid, line)
def enable(self, msgid, scope="package", line=None, ignore_unknown=False):
"""reenable message of the given id"""
self._set_msg_status(
msgid, enable=True, scope=scope, line=line, ignore_unknown=ignore_unknown
)
self._register_by_id_managed_msg(msgid, line, is_disabled=False)
def _set_msg_status(
self, msgid, enable, scope="package", line=None, ignore_unknown=False
):
assert scope in ("package", "module")
if msgid == "all":
for _msgid in MSG_TYPES:
self._set_msg_status(_msgid, enable, scope, line, ignore_unknown)
if enable and not self._python3_porting_mode:
# Don't activate the python 3 porting checker if it wasn't activated explicitly.
self.disable("python3")
return
# msgid is a category?
catid = category_id(msgid)
if catid is not None:
for _msgid in self.msgs_store._msgs_by_category.get(catid):
self._set_msg_status(_msgid, enable, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
msgs_store = self.msgs_store
for checker in self._checkers[msgid.lower()]:
for _msgid in checker.msgs:
if _msgid in msgs_store._alternative_names:
self._set_msg_status(_msgid, enable, scope, line)
return
# msgid is report id?
if msgid.lower().startswith("rp"):
if enable:
self.enable_report(msgid)
else:
self.disable_report(msgid)
return
try:
# msgid is a symbolic or numeric msgid.
msg = self.msgs_store.get_message_definition(msgid)
except UnknownMessageError:
if ignore_unknown:
return
raise
if scope == "module":
self.file_state.set_msg_status(msg, line, enable)
if not enable and msg.symbol != "locally-disabled":
self.add_message(
"locally-disabled", line=line, args=(msg.symbol, msg.msgid)
)
else:
msgs = self._msgs_state
msgs[msg.msgid] = enable
# sync configuration object
self.config.enable = [
self._message_symbol(mid) for mid, val in sorted(msgs.items()) if val
]
self.config.disable = [
self._message_symbol(mid)
for mid, val in sorted(msgs.items())
if not val
]
def _message_symbol(self, msgid):
"""Get the message symbol of the given message id
Return the original message id if the message does not
exist.
"""
try:
return self.msgs_store.get_message_definition(msgid).symbol
except UnknownMessageError:
return msgid
def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
"""Returns the scope at which a message was enabled/disabled."""
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
return None
def is_message_enabled(self, msg_descr, line=None, confidence=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if self.config.confidence and confidence:
if confidence.name not in self.config.confidence:
return False
try:
msgid = self.msgs_store.get_message_definition(msg_descr).msgid
except UnknownMessageError:
# The linter checks for messages that are not registered
# due to version mismatch, just treat them as message IDs
# for now.
msgid = msg_descr
if line is None:
return self._msgs_state.get(msgid, True)
try:
return self.file_state._module_msgs_state[msgid][line]
except KeyError:
# Check if the message's line is after the maximum line existing in ast tree.
# This line won't appear in the ast tree and won't be referred in
# self.file_state._module_msgs_state
# This happens for example with a commented line at the end of a module.
max_line_number = self.file_state.get_effective_max_line_number()
if max_line_number and line > max_line_number:
fallback = True
lines = self.file_state._raw_module_msgs_state.get(msgid, {})
# Doesn't consider scopes, as a disable can be in a different scope
# than that of the current line.
closest_lines = reversed(
[
(message_line, enable)
for message_line, enable in lines.items()
if message_line <= line
]
)
last_line, is_enabled = next(closest_lines, (None, None))
if last_line is not None:
fallback = is_enabled
return self._msgs_state.get(msgid, fallback)
return self._msgs_state.get(msgid, True)
def add_message(
self,
msg_descr,
line=None,
node=None,
args=None,
confidence=UNDEFINED,
col_offset=None,
):
"""Adds a message given by ID or name.
If provided, the message string is expanded using args.
AST checkers must provide the node argument (but may optionally
provide line if the line number is different), raw and token checkers
must provide the line argument.
"""
msg_info = self.msgs_store.get_message_definition(msg_descr)
msgid = msg_info.msgid
# backward compatibility, message may not have a symbol
symbol = msg_info.symbol or msgid
# Fatal messages and reports are special, the node/scope distinction
# does not apply to them.
if msgid[0] not in _SCOPE_EXEMPT:
if msg_info.scope == WarningScope.LINE:
if line is None:
raise InvalidMessageError(
"Message %s must provide line, got None" % msgid
)
if node is not None:
raise InvalidMessageError(
"Message %s must only provide line, "
"got line=%s, node=%s" % (msgid, line, node)
)
elif msg_info.scope == WarningScope.NODE:
# Node-based warnings may provide an override line.
if node is None:
raise InvalidMessageError(
"Message %s must provide Node, got None" % msgid
)
if line is None and node is not None:
line = node.fromlineno
if col_offset is None and hasattr(node, "col_offset"):
col_offset = (
node.col_offset
) # XXX measured in bytes for utf-8, divide by two for chars?
# should this message be displayed
if not self.is_message_enabled(msgid, line, confidence):
self.file_state.handle_ignored_message(
self.get_message_state_scope(msgid, line, confidence),
msgid,
line,
node,
args,
confidence,
)
return
# update stats
msg_cat = MSG_TYPES[msgid[0]]
self.msg_status |= MSG_TYPES_STATUS[msgid[0]]
self.stats[msg_cat] += 1
self.stats["by_module"][self.current_name][msg_cat] += 1
try:
self.stats["by_msg"][symbol] += 1
except KeyError:
self.stats["by_msg"][symbol] = 1
# expand message ?
msg = msg_info.msg
if args:
msg %= args
# get module and object
if node is None:
module, obj = self.current_name, ""
abspath = self.current_file
else:
module, obj = get_module_and_frameid(node)
abspath = node.root().file
path = abspath.replace(self.reporter.path_strip_prefix, "", 1)
# add the message
self.reporter.handle_message(
Message(
msgid,
symbol,
(abspath, path, module, obj, line or 1, col_offset or 0),
msg,
confidence,
)
)
def print_full_documentation(self, stream=None):
"""output a full documentation in ReST format"""
if not stream:
stream = sys.stdout
print("Pylint global options and switches", file=stream)
print("----------------------------------", file=stream)
print("", file=stream)
print("Pylint provides global options and switches.", file=stream)
print("", file=stream)
by_checker = {}
for checker in self.get_checkers():
if checker.name == "master":
if checker.options:
for section, options in checker.options_by_section():
if section is None:
title = "General options"
else:
title = "%s options" % section.capitalize()
print(title, file=stream)
print("~" * len(title), file=stream)
_rest_format_section(stream, None, options)
print("", file=stream)
else:
name = checker.name
try:
by_checker[name]["options"] += checker.options_and_values()
by_checker[name]["msgs"].update(checker.msgs)
by_checker[name]["reports"] += checker.reports
except KeyError:
by_checker[name] = {
"options": list(checker.options_and_values()),
"msgs": dict(checker.msgs),
"reports": list(checker.reports),
}
print("Pylint checkers' options and switches", file=stream)
print("-------------------------------------", file=stream)
print("", file=stream)
print("Pylint checkers can provide three set of features:", file=stream)
print("", file=stream)
print("* options that control their execution,", file=stream)
print("* messages that they can raise,", file=stream)
print("* reports that they can generate.", file=stream)
print("", file=stream)
print("Below is a list of all checkers and their features.", file=stream)
print("", file=stream)
for checker, info in sorted(by_checker.items()):
self._print_checker_doc(checker, info, stream=stream)
@staticmethod
def _print_checker_doc(checker_name, info, stream=None):
"""Helper method for print_full_documentation.
Also used by doc/exts/pylint_extensions.py.
"""
if not stream:
stream = sys.stdout
doc = info.get("doc")
module = info.get("module")
msgs = info.get("msgs")
options = info.get("options")
reports = info.get("reports")
checker_title = "%s checker" % (checker_name.replace("_", " ").title())
if module:
# Provide anchor to link against
print(".. _%s:\n" % module, file=stream)
print(checker_title, file=stream)
print("~" * len(checker_title), file=stream)
print("", file=stream)
if module:
print("This checker is provided by ``%s``." % module, file=stream)
print("Verbatim name of the checker is ``%s``." % checker_name, file=stream)
print("", file=stream)
if doc:
# Provide anchor to link against
title = "{} Documentation".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
print(cleandoc(doc), file=stream)
print("", file=stream)
if options:
title = "{} Options".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
_rest_format_section(stream, None, options)
print("", file=stream)
if msgs:
title = "{} Messages".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
for msgid, msg in sorted(
msgs.items(), key=lambda kv: (_MSG_ORDER.index(kv[0][0]), kv[1])
):
msg = build_message_def(checker_name, msgid, msg)
print(msg.format_help(checkerref=False), file=stream)
print("", file=stream)
if reports:
title = "{} Reports".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
for report in reports:
print(":%s: %s" % report[:2], file=stream)
print("", file=stream)
print("", file=stream)
class FileState:
"""Hold internal state specific to the currently analyzed file"""
def __init__(self, modname=None):
self.base_name = modname
self._module_msgs_state = {}
self._raw_module_msgs_state = {}
self._ignored_msgs = collections.defaultdict(set)
self._suppression_mapping = {}
self._effective_max_line_number = None
def collect_block_lines(self, msgs_store, module_node):
"""Walk the AST to collect block level options line numbers."""
for msg, lines in self._module_msgs_state.items():
self._raw_module_msgs_state[msg] = lines.copy()
orig_state = self._module_msgs_state.copy()
self._module_msgs_state = {}
self._suppression_mapping = {}
self._effective_max_line_number = module_node.tolineno
self._collect_block_lines(msgs_store, module_node, orig_state)
def _collect_block_lines(self, msgs_store, node, msg_state):
"""Recursively walk (depth first) AST to collect block level options
line numbers.
"""
for child in node.get_children():
self._collect_block_lines(msgs_store, child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. # pylint: disable=E1102
# 4. print self.blip
# 5. # pylint: disable=E1101
# 6. print self.bla
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if (
isinstance(node, (nodes.Module, nodes.ClassDef, nodes.FunctionDef))
and node.body
):
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in msg_state.items():
for lineno, state in list(lines.items()):
original_lineno = lineno
if first > lineno or last < lineno:
continue
# Set state for all lines for this block, if the
# warning is applied to nodes.
if msgs_store.get_message_definition(msgid).scope == WarningScope.NODE:
if lineno > firstchildlineno:
state = True
first_, last_ = node.block_range(lineno)
else:
first_ = lineno
last_ = last
for line in range(first_, last_ + 1):
# do not override existing entries
if line in self._module_msgs_state.get(msgid, ()):
continue
if line in lines: # state change in the same block
state = lines[line]
original_lineno = line
if not state:
self._suppression_mapping[(msgid, line)] = original_lineno
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno]
def set_msg_status(self, msg, line, status):
"""Set status (enabled/disable) for a given message at a given line"""
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = status
except KeyError:
self._module_msgs_state[msg.msgid] = {line: status}
def handle_ignored_message(
self, state_scope, msgid, line, node, args, confidence
): # pylint: disable=unused-argument
"""Report an ignored message.
state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,
depending on whether the message was disabled locally in the module,
or globally. The other arguments are the same as for add_message.
"""
if state_scope == MSG_STATE_SCOPE_MODULE:
try:
orig_line = self._suppression_mapping[(msgid, line)]
self._ignored_msgs[(msgid, orig_line)].add(line)
except KeyError:
pass
def iter_spurious_suppression_messages(self, msgs_store):
for warning, lines in self._raw_module_msgs_state.items():
for line, enable in lines.items():
if not enable and (warning, line) not in self._ignored_msgs:
yield "useless-suppression", line, (
msgs_store.get_msg_display_string(warning),
)
# don't use iteritems here, _ignored_msgs may be modified by add_message
for (warning, from_), lines in list(self._ignored_msgs.items()):
for line in lines:
yield "suppressed-message", line, (
msgs_store.get_msg_display_string(warning),
from_,
)
def get_effective_max_line_number(self):
return self._effective_max_line_number
class MessagesStore:
"""The messages store knows information about every possible message but has
no particular state during analysis.
"""
def __init__(self):
# Primary registry for all active messages (i.e. all messages
# that can be emitted by pylint for the underlying Python
# version). It contains the 1:1 mapping from symbolic names
# to message definition objects.
# Keys are msg ids, values are a 2-uple with the msg type and the
# msg itself
self._messages = {}
# Maps alternative names (numeric IDs, deprecated names) to
# message definitions. May contain several names for each definition
# object.
self._alternative_names = {}
self._msgs_by_category = collections.defaultdict(list)
@property
def messages(self):
"""The list of all active messages."""
return self._messages.values()
def add_renamed_message(self, old_id, old_symbol, new_symbol):
"""Register the old ID and symbol for a warning that was renamed.
This allows users to keep using the old ID/symbol in suppressions.
"""
msg = self.get_message_definition(new_symbol)
msg.old_names.append((old_id, old_symbol))
self._register_alternative_name(msg, old_id, old_symbol)
@staticmethod
def get_checker_message_definitions(checker):
"""Return the list of messages definitions for a checker.
:param BaseChecker checker:
:rtype: list
:return: A list of MessageDefinition.
"""
message_definitions = []
for msgid, msg_tuple in sorted(checker.msgs.items()):
message = build_message_def(checker, msgid, msg_tuple)
message_definitions.append(message)
return message_definitions
def register_messages(self, checker):
"""Register messages from a checker.
:param BaseChecker checker:
"""
messages = self.get_checker_message_definitions(checker)
self._check_checker_consistency(messages)
for message in messages:
self.register_message(message)
def register_message(self, message):
"""Register a MessageDefinition with consistency in mind.
:param MessageDefinition message: The message definition being added.
"""
self._check_id_and_symbol_consistency(message.msgid, message.symbol)
self._check_symbol(message.msgid, message.symbol)
self._check_msgid(message.msgid, message.symbol)
for old_name in message.old_names:
self._check_symbol(message.msgid, old_name[1])
self._messages[message.symbol] = message
self._register_alternative_name(message, message.msgid, message.symbol)
for old_id, old_symbol in message.old_names:
self._register_alternative_name(message, old_id, old_symbol)
self._msgs_by_category[message.msgid[0]].append(message.msgid)
@staticmethod
def _check_checker_consistency(messages):
"""Check the msgid consistency in a list of messages definitions.
msg ids for a checker should be a string of len 4, where the two first
characters are the checker id and the two last the msg id in this
checker.
:param list messages: List of MessageDefinition.
:raises InvalidMessageError: If the checker id in the messages are not
always the same
"""
checker_id = None
existing_ids = []
for message in messages:
if checker_id is not None and checker_id != message.msgid[1:3]:
error_msg = "Inconsistent checker part in message id "
error_msg += "'{}' (expected 'x{checker_id}xx' ".format(
message.msgid, checker_id=checker_id
)
error_msg += "because we already had {existing_ids}).".format(
existing_ids=existing_ids
)
raise InvalidMessageError(error_msg)
checker_id = message.msgid[1:3]
existing_ids.append(message.msgid)
def _register_alternative_name(self, msg, msgid, symbol):
"""helper for register_message()"""
self._check_id_and_symbol_consistency(msgid, symbol)
self._alternative_names[msgid] = msg
self._alternative_names[symbol] = msg
def _check_symbol(self, msgid, symbol):
"""Check that a symbol is not already used. """
other_message = self._messages.get(symbol)
if other_message:
self._raise_duplicate_msg_id(symbol, msgid, other_message.msgid)
else:
alternative_msgid = None
alternative_message = self._alternative_names.get(symbol)
if alternative_message:
if alternative_message.symbol == symbol:
alternative_msgid = alternative_message.msgid
else:
for old_msgid, old_symbol in alternative_message.old_names:
if old_symbol == symbol:
alternative_msgid = old_msgid
break
if msgid != alternative_msgid:
self._raise_duplicate_msg_id(symbol, msgid, alternative_msgid)
def _check_msgid(self, msgid, symbol):
for message in self._messages.values():
if message.msgid == msgid:
self._raise_duplicate_symbol(msgid, symbol, message.symbol)
def _check_id_and_symbol_consistency(self, msgid, symbol):
try:
alternative = self._alternative_names[msgid]
except KeyError:
alternative = False
try:
if not alternative:
alternative = self._alternative_names[symbol]
except KeyError:
# There is no alternative names concerning this msgid/symbol.
# So nothing to check
return None
old_symbolic_name = None
old_symbolic_id = None
for alternate_msgid, alternate_symbol in alternative.old_names:
if alternate_msgid == msgid or alternate_symbol == symbol:
old_symbolic_id = alternate_msgid
old_symbolic_name = alternate_symbol
if symbol not in (alternative.symbol, old_symbolic_name):
if msgid == old_symbolic_id:
self._raise_duplicate_symbol(msgid, symbol, old_symbolic_name)
else:
self._raise_duplicate_symbol(msgid, symbol, alternative.symbol)
return None
@staticmethod
def _raise_duplicate_symbol(msgid, symbol, other_symbol):
"""Raise an error when a symbol is duplicated.
:param str msgid: The msgid corresponding to the symbols
:param str symbol: Offending symbol
:param str other_symbol: Other offending symbol
:raises InvalidMessageError: when a symbol is duplicated.
"""
error_message = "Message id '{msgid}' cannot have both ".format(msgid=msgid)
error_message += "'{other_symbol}' and '{symbol}' as symbolic name.".format(
other_symbol=other_symbol, symbol=symbol
)
raise InvalidMessageError(error_message)
@staticmethod
def _raise_duplicate_msg_id(symbol, msgid, other_msgid):
"""Raise an error when a msgid is duplicated.
:param str symbol: The symbol corresponding to the msgids
:param str msgid: Offending msgid
:param str other_msgid: Other offending msgid
:raises InvalidMessageError: when a msgid is duplicated.
"""
error_message = "Message symbol '{symbol}' cannot be used for ".format(
symbol=symbol
)
error_message += "'{other_msgid}' and '{msgid}' at the same time.".format(
other_msgid=other_msgid, msgid=msgid
)
raise InvalidMessageError(error_message)
def get_message_definition(self, msgid_or_symbol):
"""Returns the Message object for this message.
:param str msgid_or_symbol: msgid_or_symbol may be either a numeric or symbolic id.
:raises UnknownMessageError: if the message id is not defined.
:rtype: MessageDefinition
:return: A message definition corresponding to msgid_or_symbol
"""
if msgid_or_symbol[1:].isdigit():
msgid_or_symbol = msgid_or_symbol.upper()
for source in (self._alternative_names, self._messages):
try:
return source[msgid_or_symbol]
except KeyError:
pass
raise UnknownMessageError(
"No such message id {msgid_or_symbol}".format(
msgid_or_symbol=msgid_or_symbol
)
)
def get_msg_display_string(self, msgid):
"""Generates a user-consumable representation of a message.
Can be just the message ID or the ID and the symbol.
"""
return repr(self.get_message_definition(msgid).symbol)
def help_message(self, msgids):
"""Display help messages for the given message identifiers"""
for msgid in msgids:
try:
print(self.get_message_definition(msgid).format_help(checkerref=True))
print("")
except UnknownMessageError as ex:
print(ex)
print("")
continue
def list_messages(self):
"""Output full messages list documentation in ReST format. """
messages = sorted(self._messages.values(), key=lambda m: m.msgid)
for message in messages:
if not message.may_be_emitted():
continue
print(message.format_help(checkerref=False))
print("")
class ReportsHandlerMixIn:
"""a mix-in class containing all the reports and stats manipulation
related methods for the main lint class
"""
def __init__(self):
self._reports = collections.defaultdict(list)
self._reports_state = {}
def report_order(self):
""" Return a list of reports, sorted in the order
in which they must be called.
"""
return list(self._reports)
def register_report(self, reportid, r_title, r_cb, checker):
"""register a report
reportid is the unique identifier for the report
r_title the report's title
r_cb the method to call to make the report
checker is the checker defining the report
"""
reportid = reportid.upper()
self._reports[checker].append((reportid, r_title, r_cb))
def enable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = True
def disable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = False
def report_is_enabled(self, reportid):
"""return true if the report associated to the given identifier is
enabled
"""
return self._reports_state.get(reportid, True)
def make_reports(self, stats, old_stats):
"""render registered reports"""
sect = Section("Report", "%s statements analysed." % (self.stats["statement"]))
for checker in self.report_order():
for reportid, r_title, r_cb in self._reports[checker]:
if not self.report_is_enabled(reportid):
continue
report_sect = Section(r_title)
try:
r_cb(report_sect, stats, old_stats)
except EmptyReportError:
continue
report_sect.report_id = reportid
sect.append(report_sect)
return sect
def add_stats(self, **kwargs):
"""add some stats entries to the statistic dictionary
raise an AssertionError if there is a key conflict
"""
for key, value in kwargs.items():
if key[-1] == "_":
key = key[:-1]
assert key not in self.stats
self.stats[key] = value
return self.stats
def _basename_in_blacklist_re(base_name, black_list_re):
"""Determines if the basename is matched in a regex blacklist
:param str base_name: The basename of the file
:param list black_list_re: A collection of regex patterns to match against.
Successful matches are blacklisted.
:returns: `True` if the basename is blacklisted, `False` otherwise.
:rtype: bool
"""
for file_pattern in black_list_re:
if file_pattern.match(base_name):
return True
return False
def _modpath_from_file(filename, is_namespace):
def _is_package_cb(path, parts):
return modutils.check_modpath_has_init(path, parts) or is_namespace
return modutils.modpath_from_file_with_callback(
filename, is_package_cb=_is_package_cb
)
def expand_modules(files_or_modules, black_list, black_list_re):
"""take a list of files/modules/packages and return the list of tuple
(file, module name) which have to be actually checked
"""
result = []
errors = []
for something in files_or_modules:
if os.path.basename(something) in black_list:
continue
if _basename_in_blacklist_re(os.path.basename(something), black_list_re):
continue
if exists(something):
# this is a file or a directory
try:
modname = ".".join(modutils.modpath_from_file(something))
except ImportError:
modname = splitext(basename(something))[0]
if isdir(something):
filepath = join(something, "__init__.py")
else:
filepath = something
else:
# suppose it's a module or package
modname = something
try:
filepath = modutils.file_from_modpath(modname.split("."))
if filepath is None:
continue
except (ImportError, SyntaxError) as ex:
# FIXME p3k : the SyntaxError is a Python bug and should be
# removed as soon as possible http://bugs.python.org/issue10588
errors.append({"key": "fatal", "mod": modname, "ex": ex})
continue
filepath = normpath(filepath)
modparts = (modname or something).split(".")
try:
spec = modutils.file_info_from_modpath(modparts, path=sys.path)
except ImportError:
# Might not be acceptable, don't crash.
is_namespace = False
is_directory = isdir(something)
else:
is_namespace = modutils.is_namespace(spec)
is_directory = modutils.is_directory(spec)
if not is_namespace:
result.append(
{
"path": filepath,
"name": modname,
"isarg": True,
"basepath": filepath,
"basename": modname,
}
)
has_init = (
not (modname.endswith(".__init__") or modname == "__init__")
and basename(filepath) == "__init__.py"
)
if has_init or is_namespace or is_directory:
for subfilepath in modutils.get_module_files(
dirname(filepath), black_list, list_all=is_namespace
):
if filepath == subfilepath:
continue
if _basename_in_blacklist_re(basename(subfilepath), black_list_re):
continue
modpath = _modpath_from_file(subfilepath, is_namespace)
submodname = ".".join(modpath)
result.append(
{
"path": subfilepath,
"name": submodname,
"isarg": False,
"basepath": filepath,
"basename": modname,
}
)
return result, errors
class PyLintASTWalker:
def __init__(self, linter):
# callbacks per node types
self.nbstatements = 0
self.visit_events = collections.defaultdict(list)
self.leave_events = collections.defaultdict(list)
self.linter = linter
def _is_method_enabled(self, method):
if not hasattr(method, "checks_msgs"):
return True
for msg_desc in method.checks_msgs:
if self.linter.is_message_enabled(msg_desc):
return True
return False
def add_checker(self, checker):
"""walk to the checker's dir and collect visit and leave methods"""
# XXX : should be possible to merge needed_checkers and add_checker
vcids = set()
lcids = set()
visits = self.visit_events
leaves = self.leave_events
for member in dir(checker):
cid = member[6:]
if cid == "default":
continue
if member.startswith("visit_"):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if self._is_method_enabled(v_meth):
visits[cid].append(v_meth)
vcids.add(cid)
elif member.startswith("leave_"):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if self._is_method_enabled(l_meth):
leaves[cid].append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, "visit_default", None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits[cid].append(visit_default)
# for now we have no "leave_default" method in Pylint
def walk(self, astroid):
"""call visit events of astroid checkers for the given node, recurse on
its children, then leave events.
"""
cid = astroid.__class__.__name__.lower()
# Detect if the node is a new name for a deprecated alias.
# In this case, favour the methods for the deprecated
# alias if any, in order to maintain backwards
# compatibility.
visit_events = self.visit_events.get(cid, ())
leave_events = self.leave_events.get(cid, ())
if astroid.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for cb in visit_events or ():
cb(astroid)
# recurse on children
for child in astroid.get_children():
self.walk(child)
for cb in leave_events or ():
cb(astroid)
PY_EXTS = (".py", ".pyc", ".pyo", ".pyw", ".so", ".dll")
def register_plugins(linter, directory):
"""load all module and package in the given directory, looking for a
'register' function in each one, used to register pylint checkers
"""
imported = {}
for filename in os.listdir(directory):
base, extension = splitext(filename)
if base in imported or base == "__pycache__":
continue
if (
extension in PY_EXTS
and base != "__init__"
or (not extension and isdir(join(directory, base)))
):
try:
module = modutils.load_module_from_file(join(directory, filename))
except ValueError:
# empty module name (usually emacs auto-save files)
continue
except ImportError as exc:
print(
"Problem importing module %s: %s" % (filename, exc), file=sys.stderr
)
else:
if hasattr(module, "register"):
module.register(linter)
imported[base] = 1
def get_global_option(checker, option, default=None):
""" Retrieve an option defined by the given *checker* or
by all known option providers.
It will look in the list of all options providers
until the given *option* will be found.
If the option wasn't found, the *default* value will be returned.
"""
# First, try in the given checker's config.
# After that, look in the options providers.
try:
return getattr(checker.config, option.replace("-", "_"))
except AttributeError:
pass
for provider in checker.linter.options_providers:
for options in provider.options:
if options[0] == option:
return getattr(provider.config, option.replace("-", "_"))
return default
def deprecated_option(
shortname=None, opt_type=None, help_msg=None, deprecation_msg=None
):
def _warn_deprecated(option, optname, *args): # pylint: disable=unused-argument
if deprecation_msg:
sys.stderr.write(deprecation_msg % (optname,))
option = {
"help": help_msg,
"hide": True,
"type": opt_type,
"action": "callback",
"callback": _warn_deprecated,
"deprecated": True,
}
if shortname:
option["shortname"] = shortname
return option
def _splitstrip(string, sep=","):
"""return a list of stripped string by splitting the string given as
argument on `sep` (',' by default). Empty string are discarded.
>>> _splitstrip('a, b, c , 4,,')
['a', 'b', 'c', '4']
>>> _splitstrip('a')
['a']
>>> _splitstrip('a,\nb,\nc,')
['a', 'b', 'c']
:type string: str or unicode
:param string: a csv line
:type sep: str or unicode
:param sep: field separator, default to the comma (',')
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
return [word.strip() for word in string.split(sep) if word.strip()]
def _unquote(string):
"""remove optional quotes (simple or double) from the string
:type string: str or unicode
:param string: an optionally quoted string
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
if not string:
return string
if string[0] in "\"'":
string = string[1:]
if string[-1] in "\"'":
string = string[:-1]
return string
def _normalize_text(text, line_len=80, indent=""):
"""Wrap the text on the given line length."""
return "\n".join(
textwrap.wrap(
text, width=line_len, initial_indent=indent, subsequent_indent=indent
)
)
def _check_csv(value):
if isinstance(value, (list, tuple)):
return value
return _splitstrip(value)
def _comment(string):
"""return string as a comment"""
lines = [line.strip() for line in string.splitlines()]
return "# " + ("%s# " % os.linesep).join(lines)
def _format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ",".join(_format_option_value(optdict, item) for item in value)
elif isinstance(value, dict):
value = ",".join("%s:%s" % (k, v) for k, v in value.items())
elif hasattr(value, "match"): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get("type") == "yn":
value = "yes" if value else "no"
elif isinstance(value, str) and value.isspace():
value = "'%s'" % value
return value
def _ini_format_section(stream, section, options, doc=None):
"""format an options section using the INI format"""
if doc:
print(_comment(doc), file=stream)
print("[%s]" % section, file=stream)
_ini_format(stream, options)
def _ini_format(stream, options):
"""format options using the INI format"""
for optname, optdict, value in options:
value = _format_option_value(optdict, value)
help_opt = optdict.get("help")
if help_opt:
help_opt = _normalize_text(help_opt, line_len=79, indent="# ")
print(file=stream)
print(help_opt, file=stream)
else:
print(file=stream)
if value is None:
print("#%s=" % optname, file=stream)
else:
value = str(value).strip()
if re.match(r"^([\w-]+,)+[\w-]+$", str(value)):
separator = "\n " + " " * len(optname)
value = separator.join(x + "," for x in str(value).split(","))
# remove trailing ',' from last element of the list
value = value[:-1]
print("%s=%s" % (optname, value), file=stream)
format_section = _ini_format_section
def _rest_format_section(stream, section, options, doc=None):
"""format an options section using as ReST formatted output"""
if section:
print("%s\n%s" % (section, "'" * len(section)), file=stream)
if doc:
print(_normalize_text(doc, line_len=79, indent=""), file=stream)
print(file=stream)
for optname, optdict, value in options:
help_opt = optdict.get("help")
print(":%s:" % optname, file=stream)
if help_opt:
help_opt = _normalize_text(help_opt, line_len=79, indent=" ")
print(help_opt, file=stream)
if value:
value = str(_format_option_value(optdict, value))
print(file=stream)
print(" Default: ``%s``" % value.replace("`` ", "```` ``"), file=stream)
| 1 | 10,754 | I think replacing `.*` with `\s*` from after the hash makes sense, but not the rest of the changes. | PyCQA-pylint | py |
@@ -220,6 +220,8 @@ type NetworkPolicyRule struct {
// action “nil” defaults to Allow action, which would be the case for rules created for
// K8s Network Policy.
Action *secv1alpha1.RuleAction `json:"action,omitempty" protobuf:"bytes,6,opt,name=action,casttype=github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1.RuleAction"`
+ // EnableLogging indicates whether or not to generate logs when rules are matched. Default to false.
+ EnableLogging bool `json:"enableLogging" protobuf:"varint,7,opt,name=enableLogging"`
}
// Protocol defines network protocols supported for things like container ports. | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
statsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/stats/v1alpha1"
)
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroup is the message format of antrea/pkg/controller/types.AppliedToGroup in an API response.
type AppliedToGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Pods is a list of Pods selected by this group.
Pods []GroupMemberPod `json:"pods,omitempty" protobuf:"bytes,2,rep,name=pods"`
// GroupMembers is list of resources selected by this group. This eventually will replace Pods
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,3,rep,name=groupMembers"`
}
// PodReference represents a Pod Reference.
type PodReference struct {
// The name of this pod.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this pod.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// NamedPort represents a Port with a name on Pod.
type NamedPort struct {
// Port represents the Port number.
Port int32 `json:"port,omitempty" protobuf:"varint,1,opt,name=port"`
// Name represents the associated name with this Port number.
Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
// Protocol for port. Must be UDP, TCP, or SCTP.
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol"`
}
// GroupMemberPod represents a GroupMember related to Pods.
type GroupMemberPod struct {
// Pod maintains the reference to the Pod.
Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
// IP maintains the IPAddress associated with the Pod.
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,2,opt,name=ip"`
// Ports maintain the named port mapping of this Pod.
Ports []NamedPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// ExternalEntityReference represents a ExternalEntity Reference.
type ExternalEntityReference struct {
// The name of this ExternalEntity.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this ExternalEntity.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Endpoint represents an external endpoint.
type Endpoint struct {
// IP is the IP address of the Endpoint.
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Ports is the list NamedPort of the Endpoint.
Ports []NamedPort `json:"ports,omitempty" protobuf:"bytes,2,rep,name=ports"`
}
// GroupMember represents resource member to be populated in Groups.
// This supersedes GroupMemberPod, and will eventually replace it.
type GroupMember struct {
// Pod maintains the reference to the Pod.
Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
// ExternalEntity maintains the reference to the ExternalEntity.
ExternalEntity *ExternalEntityReference `json:"externalEntity,omitempty" protobuf:"bytes,2,opt,name=externalEntity"`
// Endpoints maintains a list of EndPoints associated with this groupMember.
Endpoints []Endpoint `json:"endpoints,omitempty" protobuf:"bytes,3,rep,name=endpoints"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupPatch describes the incremental update of an AppliedToGroup.
type AppliedToGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedPods []GroupMemberPod `json:"addedPods,omitempty" protobuf:"bytes,2,rep,name=addedPods"`
RemovedPods []GroupMemberPod `json:"removedPods,omitempty" protobuf:"bytes,3,rep,name=removedPods"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,4,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,5,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupList is a list of AppliedToGroup objects.
type AppliedToGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AppliedToGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroup is the message format of antrea/pkg/controller/types.AddressGroup in an API response.
type AddressGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Pods []GroupMemberPod `json:"pods,omitempty" protobuf:"bytes,2,rep,name=pods"`
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,3,rep,name=groupMembers"`
}
// IPAddress describes a single IP address. Either an IPv4 or IPv6 address must be set.
type IPAddress []byte
// IPNet describes an IP network.
type IPNet struct {
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
PrefixLength int32 `json:"prefixLength,omitempty" protobuf:"varint,2,opt,name=prefixLength"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupPatch describes the incremental update of an AddressGroup.
type AddressGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedPods []GroupMemberPod `json:"addedPods,omitempty" protobuf:"bytes,2,rep,name=addedPods"`
RemovedPods []GroupMemberPod `json:"removedPods,omitempty" protobuf:"bytes,3,rep,name=removedPods"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,4,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,5,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupList is a list of AddressGroup objects.
type AddressGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AddressGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
type NetworkPolicyType string
const (
K8sNetworkPolicy NetworkPolicyType = "K8sNetworkPolicy"
AntreaClusterNetworkPolicy NetworkPolicyType = "AntreaClusterNetworkPolicy"
AntreaNetworkPolicy NetworkPolicyType = "AntreaNetworkPolicy"
)
type NetworkPolicyReference struct {
// Type of the NetworkPolicy.
Type NetworkPolicyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=NetworkPolicyType"`
// Namespace of the NetworkPolicy. It's empty for Antrea ClusterNetworkPolicy.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the NetworkPolicy.
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the NetworkPolicy.
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +genclient
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy is the message format of antrea/pkg/controller/types.NetworkPolicy in an API response.
type NetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Rules is a list of rules to be applied to the selected Pods.
Rules []NetworkPolicyRule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
// AppliedToGroups is a list of names of AppliedToGroups to which this policy applies.
AppliedToGroups []string `json:"appliedToGroups,omitempty" protobuf:"bytes,3,rep,name=appliedToGroups"`
// Priority represents the relative priority of this Network Policy as compared to
// other Network Policies. Priority will be unset (nil) for K8s Network Policy.
Priority *float64 `json:"priority,omitempty" protobuf:"fixed64,4,opt,name=priority"`
// TierPriority represents the priority of the Tier associated with this Network
// Policy. The TierPriority will remain nil for K8s NetworkPolicy.
TierPriority *int32 `json:"tierPriority,omitempty" protobuf:"varint,5,opt,name=tierPriority"`
// Reference to the original NetworkPolicy that the internal NetworkPolicy is created for.
SourceRef *NetworkPolicyReference `json:"sourceRef,omitempty" protobuf:"bytes,6,opt,name=sourceRef"`
}
// Direction defines traffic direction of NetworkPolicyRule.
type Direction string
const (
DirectionIn Direction = "In"
DirectionOut Direction = "Out"
)
// NetworkPolicyRule describes a particular set of traffic that is allowed.
type NetworkPolicyRule struct {
// The direction of this rule.
// If it's set to In, From must be set and To must not be set.
// If it's set to Out, To must be set and From must not be set.
Direction Direction `json:"direction,omitempty" protobuf:"bytes,1,opt,name=direction"`
// From represents sources which should be able to access the pods selected by the policy.
From NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"`
// To represents destinations which should be able to be accessed by the pods selected by the policy.
To NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,3,opt,name=to"`
// Services is a list of services which should be matched.
Services []Service `json:"services,omitempty" protobuf:"bytes,4,rep,name=services"`
// Priority defines the priority of the Rule as compared to other rules in the
// NetworkPolicy.
Priority int32 `json:"priority,omitempty" protobuf:"varint,5,opt,name=priority"`
// Action specifies the action to be applied on the rule. i.e. Allow/Drop. An empty
// action “nil” defaults to Allow action, which would be the case for rules created for
// K8s Network Policy.
Action *secv1alpha1.RuleAction `json:"action,omitempty" protobuf:"bytes,6,opt,name=action,casttype=github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1.RuleAction"`
}
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
// ProtocolSCTP is the SCTP protocol.
ProtocolSCTP Protocol = "SCTP"
)
// Service describes a port to allow traffic on.
type Service struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
// field defaults to TCP.
// +optional
Protocol *Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol"`
// The port name or number on the given protocol. If not specified, this matches all port numbers.
// +optional
Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
}
// NetworkPolicyPeer describes a peer of NetworkPolicyRules.
// It could be a list of names of AddressGroups and/or a list of IPBlock.
type NetworkPolicyPeer struct {
// A list of names of AddressGroups.
AddressGroups []string `json:"addressGroups,omitempty" protobuf:"bytes,1,rep,name=addressGroups"`
// A list of IPBlock.
IPBlocks []IPBlock `json:"ipBlocks,omitempty" protobuf:"bytes,2,rep,name=ipBlocks"`
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24"). The except entry describes CIDRs that should
// not be included within this rule.
type IPBlock struct {
// CIDR is an IPNet represents the IP Block.
CIDR IPNet `json:"cidr" protobuf:"bytes,1,name=cidr"`
// Except is a slice of IPNets that should not be included within an IP Block.
// Except values will be rejected if they are outside the CIDR range.
// +optional
Except []IPNet `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeStatsSummary contains stats produced on a Node. It's used by the antrea-agents to report stats to the antrea-controller.
type NodeStatsSummary struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The TrafficStats of K8s NetworkPolicies collected from the Node.
NetworkPolicies []NetworkPolicyStats `json:"networkPolicies,omitempty" protobuf:"bytes,2,rep,name=networkPolicies"`
// The TrafficStats of Antrea ClusterNetworkPolicies collected from the Node.
AntreaClusterNetworkPolicies []NetworkPolicyStats `json:"antreaClusterNetworkPolicies,omitempty" protobuf:"bytes,3,rep,name=antreaClusterNetworkPolicies"`
// The TrafficStats of Antrea NetworkPolicies collected from the Node.
AntreaNetworkPolicies []NetworkPolicyStats `json:"antreaNetworkPolicies,omitempty" protobuf:"bytes,4,rep,name=antreaNetworkPolicies"`
}
// NetworkPolicyStats contains the information and traffic stats of a NetworkPolicy.
type NetworkPolicyStats struct {
// The reference of the NetworkPolicy.
NetworkPolicy NetworkPolicyReference `json:"networkPolicy,omitempty" protobuf:"bytes,1,opt,name=networkPolicy"`
// The stats of the NetworkPolicy.
TrafficStats statsv1alpha1.TrafficStats `json:"trafficStats,omitempty" protobuf:"bytes,2,opt,name=trafficStats"`
}
| 1 | 24,522 | interesting .. did not know varint tag covered bool as well | antrea-io-antrea | go |
@@ -35,6 +35,11 @@ struct st_duration_stats_t {
struct gkc_summary *process_time;
struct gkc_summary *response_time;
struct gkc_summary *total_time;
+
+ /**
+ * average event loop latency per worker thread
+ */
+ H2O_VECTOR(uint64_t) evloop_latency_ms;
};
struct st_duration_agg_stats_t { | 1 | /*
* Copyright (c) 2016 Fastly
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "h2o.h"
#include "gkc.h"
#include <inttypes.h>
#include <pthread.h>
#define GK_EPSILON 0.01
struct st_duration_stats_t {
struct gkc_summary *connect_time;
struct gkc_summary *header_time;
struct gkc_summary *body_time;
struct gkc_summary *request_total_time;
struct gkc_summary *process_time;
struct gkc_summary *response_time;
struct gkc_summary *total_time;
};
struct st_duration_agg_stats_t {
struct st_duration_stats_t stats;
pthread_mutex_t mutex;
};
static h2o_logger_t *durations_logger;
static void durations_status_per_thread(void *priv, h2o_context_t *ctx)
{
struct st_duration_agg_stats_t *agg_stats = priv;
if (durations_logger) {
struct st_duration_stats_t *ctx_stats = h2o_context_get_logger_context(ctx, durations_logger);
pthread_mutex_lock(&agg_stats->mutex);
#define ADD_DURATION(x) \
do { \
struct gkc_summary *tmp; \
tmp = gkc_combine(agg_stats->stats.x, ctx_stats->x); \
gkc_summary_free(agg_stats->stats.x); \
agg_stats->stats.x = tmp; \
} while (0)
ADD_DURATION(connect_time);
ADD_DURATION(header_time);
ADD_DURATION(body_time);
ADD_DURATION(request_total_time);
ADD_DURATION(process_time);
ADD_DURATION(response_time);
ADD_DURATION(total_time);
#undef ADD_DURATION
pthread_mutex_unlock(&agg_stats->mutex);
}
}
static void duration_stats_init(struct st_duration_stats_t *stats)
{
stats->connect_time = gkc_summary_alloc(GK_EPSILON);
stats->header_time = gkc_summary_alloc(GK_EPSILON);
stats->body_time = gkc_summary_alloc(GK_EPSILON);
stats->request_total_time = gkc_summary_alloc(GK_EPSILON);
stats->process_time = gkc_summary_alloc(GK_EPSILON);
stats->response_time = gkc_summary_alloc(GK_EPSILON);
stats->total_time = gkc_summary_alloc(GK_EPSILON);
}
static void *durations_status_init(void)
{
struct st_duration_agg_stats_t *agg_stats;
agg_stats = h2o_mem_alloc(sizeof(*agg_stats));
duration_stats_init(&agg_stats->stats);
pthread_mutex_init(&agg_stats->mutex, NULL);
return agg_stats;
}
static void duration_stats_free(struct st_duration_stats_t *stats)
{
gkc_summary_free(stats->connect_time);
gkc_summary_free(stats->header_time);
gkc_summary_free(stats->body_time);
gkc_summary_free(stats->request_total_time);
gkc_summary_free(stats->process_time);
gkc_summary_free(stats->response_time);
gkc_summary_free(stats->total_time);
}
static h2o_iovec_t durations_status_final(void *priv, h2o_globalconf_t *gconf, h2o_req_t *req)
{
struct st_duration_agg_stats_t *agg_stats = priv;
h2o_iovec_t ret;
#define BUFSIZE 16384
#define DURATION_FMT(x) \
" \"" x "-0\": %lu,\n" \
" \"" x "-25\": %lu,\n" \
" \"" x "-50\": %lu,\n" \
" \"" x "-75\": %lu,\n" \
" \"" x "-99\": %lu\n"
#define DURATION_VALS(x) \
gkc_query(agg_stats->stats.x, 0), gkc_query(agg_stats->stats.x, 0.25), gkc_query(agg_stats->stats.x, 0.5), \
gkc_query(agg_stats->stats.x, 0.75), gkc_query(agg_stats->stats.x, 0.99)
ret.base = h2o_mem_alloc_pool(&req->pool, char, BUFSIZE);
ret.len = snprintf(
ret.base, BUFSIZE,
",\n" DURATION_FMT("connect-time") "," DURATION_FMT("header-time") "," DURATION_FMT("body-time") "," DURATION_FMT(
"request-total-time") "," DURATION_FMT("process-time") "," DURATION_FMT("response-time") "," DURATION_FMT("duration"),
DURATION_VALS(connect_time), DURATION_VALS(header_time), DURATION_VALS(body_time), DURATION_VALS(request_total_time),
DURATION_VALS(process_time), DURATION_VALS(response_time), DURATION_VALS(total_time));
#undef BUFSIZE
#undef DURATION_FMT
#undef DURATION_VALS
duration_stats_free(&agg_stats->stats);
pthread_mutex_destroy(&agg_stats->mutex);
free(agg_stats);
return ret;
}
static void stat_access(h2o_logger_t *_self, h2o_req_t *req)
{
struct st_duration_stats_t *ctx_stats = h2o_context_get_logger_context(req->conn->ctx, _self);
#define ADD_OBSERVATION(x, from, until) \
do { \
int64_t dur; \
if (h2o_time_compute_##x(req, &dur)) { \
gkc_insert_value(ctx_stats->x, dur); \
} \
} while (0)
ADD_OBSERVATION(connect_time, &req->conn->connected_at, &req->timestamps.request_begin_at);
ADD_OBSERVATION(header_time, &req->timestamps.request_begin_at,
h2o_timeval_is_null(&req->timestamps.request_body_begin_at) ? &req->processed_at.at
: &req->timestamps.request_body_begin_at);
ADD_OBSERVATION(body_time,
h2o_timeval_is_null(&req->timestamps.request_body_begin_at) ? &req->processed_at.at
: &req->timestamps.request_body_begin_at,
&req->processed_at.at);
ADD_OBSERVATION(request_total_time, &req->timestamps.request_begin_at, &req->processed_at.at);
ADD_OBSERVATION(process_time, &req->processed_at.at, &req->timestamps.response_start_at);
ADD_OBSERVATION(response_time, &req->timestamps.response_start_at, &req->timestamps.response_end_at);
ADD_OBSERVATION(total_time, &req->timestamps.request_begin_at, &req->timestamps.response_end_at);
#undef ADD_OBSERVATION
}
void on_context_init(struct st_h2o_logger_t *self, h2o_context_t *ctx)
{
struct st_duration_stats_t *duration_stats = h2o_mem_alloc(sizeof(struct st_duration_stats_t));
duration_stats_init(duration_stats);
h2o_context_set_logger_context(ctx, self, duration_stats);
}
void on_context_dispose(struct st_h2o_logger_t *self, h2o_context_t *ctx)
{
struct st_duration_stats_t *duration_stats;
duration_stats = h2o_context_get_logger_context(ctx, self);
duration_stats_free(duration_stats);
}
void h2o_duration_stats_register(h2o_globalconf_t *conf)
{
int i, k;
h2o_logger_t *logger;
h2o_hostconf_t *hconf;
durations_logger = logger = h2o_mem_alloc(sizeof(*logger));
memset(logger, 0, sizeof(*logger));
logger->_config_slot = conf->_num_config_slots++;
logger->log_access = stat_access;
logger->on_context_init = on_context_init;
logger->on_context_dispose = on_context_dispose;
for (k = 0; conf->hosts[k]; k++) {
hconf = conf->hosts[k];
for (i = 0; i < hconf->paths.size; i++) {
int j;
for (j = 0; j < hconf->paths.entries[i].handlers.size; j++) {
h2o_pathconf_t *pathconf = &hconf->paths.entries[i];
h2o_vector_reserve(NULL, &pathconf->_loggers, pathconf->_loggers.size + 1);
pathconf->_loggers.entries[pathconf->_loggers.size++] = (void *)logger;
}
}
}
}
h2o_status_handler_t h2o_durations_status_handler = {
{H2O_STRLIT("durations")}, durations_status_final, durations_status_init, durations_status_per_thread};
| 1 | 13,700 | Could you please elaborate the reason for not using `gkc_summary` like the values above? I ask this, because I assume what we want to collect is a histogram of event loop latency, and we use `gkc_summary` for collecting histograms. | h2o-h2o | c |
@@ -187,7 +187,7 @@ Block.prototype.getTransactionHashes = function getTransactionHashes() {
return [Block.Values.NULL_HASH];
}
for (var t = 0; t < this.txs.length; t++) {
- hashes.push(this.txs[t].hash());
+ hashes.push(this.txs[t]._getHash());
}
return hashes;
}; | 1 | 'use strict';
var _ = require('lodash');
var BlockHeader = require('./blockheader');
var BN = require('./crypto/bn');
var bu = require('./util/buffer');
var BufferReader = require('./encoding/bufferreader');
var BufferWriter = require('./encoding/bufferwriter');
var Hash = require('./crypto/hash');
var ju = require('./util/js');
var Transaction = require('./transaction');
var Varint = require('./encoding/varint');
/**
* Instantiate a Block from a Buffer, JSON object, or Object with
* the properties of the Block
*
* @param {*} - A Buffer, JSON string, or Object
* @returns {Block} - An instance of Block
* @constructor
*/
var Block = function Block(arg) {
if (!(this instanceof Block)) {
return new Block(arg);
}
_.extend(this, Block._from(arg));
return this;
};
/**
* @param {*} - A Buffer, JSON string or Object
* @returns {Object} - An object representing block data
* @throws {TypeError} - If the argument was not recognized
* @private
*/
Block._from = function _from(arg) {
var info = {};
if (bu.isBuffer(arg)) {
info = Block._fromBufferReader(BufferReader(arg));
} else if (ju.isValidJson(arg)) {
info = Block._fromJSON(arg);
} else if (_.isObject(arg)) {
info = {
magicnum: arg.magicnum,
blocksize: arg.blocksize,
blockheader: arg.blockheader,
txsvi: arg.txsvi,
txs: arg.txs
};
} else {
throw new TypeError('Unrecognized argument for Block');
}
return info;
};
/**
* @param {String|Object} - A JSON string or object
* @returns {Object} - An object representing block data
* @private
*/
Block._fromJSON = function _fromJSON(data) {
if (ju.isValidJson(data)) {
data = JSON.parse(data);
}
var txs = [];
data.txs.forEach(function(tx) {
txs.push(Transaction().fromJSON(tx));
});
var info = {
magicnum: data.magicnum,
blocksize: data.blocksize,
blockheader: BlockHeader.fromJSON(data.blockheader),
txsvi: Varint().fromJSON(data.txsvi),
txs: txs
};
return info;
};
/**
* @param {String|Object} - A JSON string or object
* @returns {Block} - An instance of block
*/
Block.fromJSON = function fromJSON(json) {
var info = Block._fromJSON(json);
return new Block(info);
};
/**
* @param {BufferReader} - Block data
* @returns {Object} - An object representing the block data
* @private
*/
Block._fromBufferReader = function _fromBufferReader(br) {
var info = {};
info.magicnum = br.readUInt32LE();
info.blocksize = br.readUInt32LE();
info.blockheader = BlockHeader.fromBufferReader(br);
info.txsvi = Varint(br.readVarintBuf());
var txslen = info.txsvi.toNumber();
info.txs = [];
for (var i = 0; i < txslen; i++) {
info.txs.push(Transaction().fromBufferReader(br));
}
return info;
};
/**
* @param {BufferReader} - A buffer reader of the block
* @returns {Block} - An instance of block
*/
Block.fromBufferReader = function fromBufferReader(br) {
var info = Block._fromBufferReader(br);
return new Block(info);
};
/**
* @param {Buffer} - A buffer of the block
* @returns {Block} - An instance of block
*/
Block.fromBuffer = function fromBuffer(buf) {
return Block.fromBufferReader(BufferReader(buf));
};
/**
* @param {Binary} - Raw block binary data or buffer
* @returns {Block} - An instance of block
*/
Block.fromRawBlock = function fromRawBlock(data) {
if (!bu.isBuffer(data)) {
data = new Buffer(data, 'binary');
}
var br = BufferReader(data);
var info = Block._fromBufferReader(br);
return new Block(info);
};
/**
* @returns {Object} - A JSON object with the block properties
*/
Block.prototype.toJSON = function toJSON() {
var txs = [];
this.txs.forEach(function(tx) {
txs.push(tx.toJSON());
});
return {
magicnum: this.magicnum,
blocksize: this.blocksize,
blockheader: this.blockheader.toJSON(),
txsvi: this.txsvi.toJSON(),
txs: txs
};
};
/**
* @returns {Buffer} - A buffer of the block
*/
Block.prototype.toBuffer = function toBuffer() {
return this.toBufferWriter().concat();
};
/**
* @param {BufferWriter} - An existing instance of BufferWriter
* @returns {BufferWriter} - An instance of BufferWriter representation of the Block
*/
Block.prototype.toBufferWriter = function toBufferWriter(bw) {
if (!bw) {
bw = new BufferWriter();
}
bw.writeUInt32LE(this.magicnum);
bw.writeUInt32LE(this.blocksize);
bw.write(this.blockheader.toBuffer());
bw.write(this.txsvi.buf);
var txslen = this.txsvi.toNumber();
for (var i = 0; i < txslen; i++) {
this.txs[i].toBufferWriter(bw);
}
return bw;
};
/**
* Will iterate through each transaction and return an array of hashes
* @returns {Array} - An array with transaction hashes
*/
Block.prototype.getTransactionHashes = function getTransactionHashes() {
var hashes = [];
if (this.txs.length === 0) {
return [Block.Values.NULL_HASH];
}
for (var t = 0; t < this.txs.length; t++) {
hashes.push(this.txs[t].hash());
}
return hashes;
};
/**
* Will build a merkle tree of all the transactions, ultimately arriving at
* a single point, the merkle root.
* @link https://en.bitcoin.it/wiki/Protocol_specification#Merkle_Trees
* @returns {Array} - An array with each level of the tree after the other.
*/
Block.prototype.getMerkleTree = function getMerkleTree() {
var tree = this.getTransactionHashes();
var j = 0;
for (var size = this.txs.length; size > 1; size = Math.floor((size + 1) / 2)) {
for (var i = 0; i < size; i += 2) {
var i2 = Math.min(i + 1, size - 1);
var buf = Buffer.concat([tree[j + i], tree[j + i2]]);
tree.push(Hash.sha256sha256(buf));
}
j += size;
}
return tree;
};
/**
* Calculates the merkleRoot from the transactions.
* @returns {Buffer} - A buffer of the merkle root hash
*/
Block.prototype.getMerkleRoot = function getMerkleRoot() {
var tree = this.getMerkleTree();
return tree[tree.length - 1];
};
/**
* Verifies that the transactions in the block match the blockheader merkle root
* @returns {Boolean} - If the merkle roots match
*/
Block.prototype.validMerkleRoot = function validMerkleRoot() {
var h = new BN(this.blockheader.merklerootbuf.toString('hex'), 'hex');
var c = new BN(this.getMerkleRoot().toString('hex'), 'hex');
if (h.cmp(c) !== 0) {
return false;
}
return true;
};
/**
* @returns {Buffer} - The little endian hash buffer of the header
*/
Block.prototype.hash = function hash() {
return this.blockheader.hash();
};
/**
* @returns {Buffer} - The big endian hash buffer of the header
*/
Block.prototype.id = function id() {
return this.blockheader.id();
};
/**
* @returns {String} - A string formated for the console
*/
Block.prototype.inspect = function inspect() {
return '<Block ' + this.id().toString('hex') + '>';
};
Block.Values = {
NULL_HASH: new Buffer('0000000000000000000000000000000000000000000000000000000000000000', 'hex')
};
module.exports = Block;
| 1 | 13,259 | getHash should be a public and without an `_` if used here. | bitpay-bitcore | js |
@@ -71,6 +71,10 @@ func (dv *DefaultBlockValidator) ValidateSemantic(ctx context.Context, child *ty
// ValidateSyntax validates a single block is correctly formed.
func (dv *DefaultBlockValidator) ValidateSyntax(ctx context.Context, blk *types.Block) error {
+ // hack validate genesisblock
+ if blk.Height == 0 {
+ return nil
+ }
now := uint64(dv.Now().Unix())
if uint64(blk.Timestamp) > now {
return fmt.Errorf("block %s with timestamp %d generate in future at time %d", blk.Cid().String(), blk.Timestamp, now) | 1 | package consensus
import (
"context"
"fmt"
"time"
"github.com/filecoin-project/go-filecoin/clock"
"github.com/filecoin-project/go-filecoin/types"
)
// BlockValidator defines an interface used to validate a blocks syntax and
// semantics.
type BlockValidator interface {
BlockSemanticValidator
BlockSyntaxValidator
}
// BlockSemanticValidator defines an interface used to validate a blocks
// semantics.
type BlockSemanticValidator interface {
ValidateSemantic(ctx context.Context, child *types.Block, parents *types.TipSet) error
}
// BlockSyntaxValidator defines an interface used to validate a blocks
// syntax.
type BlockSyntaxValidator interface {
ValidateSyntax(ctx context.Context, blk *types.Block) error
}
// DefaultBlockValidator implements the BlockValidator interface.
type DefaultBlockValidator struct {
clock.Clock
blockTime time.Duration
}
// NewDefaultBlockValidator returns a new DefaultBlockValidator. It uses `blkTime`
// to validate blocks and uses the DefaultBlockValidationClock.
func NewDefaultBlockValidator(blkTime time.Duration, c clock.Clock) *DefaultBlockValidator {
return &DefaultBlockValidator{
Clock: c,
blockTime: blkTime,
}
}
// ValidateSemantic validates a block is correctly derived from its parent.
func (dv *DefaultBlockValidator) ValidateSemantic(ctx context.Context, child *types.Block, parents *types.TipSet) error {
pmin, err := parents.MinTimestamp()
if err != nil {
return err
}
ph, err := parents.Height()
if err != nil {
return err
}
if uint64(child.Height) <= ph {
return fmt.Errorf("block %s has invalid height %d", child.Cid().String(), child.Height)
}
// check that child is appropriately delayed from its parents including
// null blocks.
// TODO replace check on height when #2222 lands
limit := uint64(pmin) + uint64(dv.BlockTime().Seconds())*(uint64(child.Height)-ph)
if uint64(child.Timestamp) < limit {
return fmt.Errorf("block %s with timestamp %d generated too far past parent, expected timestamp < %d", child.Cid().String(), child.Timestamp, limit)
}
return nil
}
// ValidateSyntax validates a single block is correctly formed.
func (dv *DefaultBlockValidator) ValidateSyntax(ctx context.Context, blk *types.Block) error {
now := uint64(dv.Now().Unix())
if uint64(blk.Timestamp) > now {
return fmt.Errorf("block %s with timestamp %d generate in future at time %d", blk.Cid().String(), blk.Timestamp, now)
}
if !blk.StateRoot.Defined() {
return fmt.Errorf("block %s has nil StateRoot", blk.Cid().String())
}
if blk.Miner.Empty() {
return fmt.Errorf("block %s has nil miner address", blk.Miner.String())
}
if len(blk.Ticket) == 0 {
return fmt.Errorf("block %s has nil ticket", blk.Cid().String())
}
// TODO validate block signature: 1054
return nil
}
// BlockTime returns the block time the DefaultBlockValidator uses to validate
/// blocks against.
func (dv *DefaultBlockValidator) BlockTime() time.Duration {
return dv.blockTime
}
| 1 | 20,247 | Unrelated to these changes -- we will probably want some special handling for genesis block validation. | filecoin-project-venus | go |
@@ -75,6 +75,10 @@ type diskBlockCacheSetter interface {
MakeDiskBlockCacheIfNotExists() error
}
+type diskBlockCacheFractionSetter interface {
+ SetDiskBlockCacheFraction(float64)
+}
+
type clockGetter interface {
Clock() Clock
} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"time"
kbname "github.com/keybase/client/go/kbun"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/chat1"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsedits"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/tlf"
metrics "github.com/rcrowley/go-metrics"
"golang.org/x/net/context"
billy "gopkg.in/src-d/go-billy.v4"
)
type dataVersioner interface {
// DataVersion returns the data version for this block
DataVersion() DataVer
}
type logMaker interface {
MakeLogger(module string) logger.Logger
}
type blockCacher interface {
BlockCache() BlockCache
}
type keyGetterGetter interface {
keyGetter() blockKeyGetter
}
type codecGetter interface {
Codec() kbfscodec.Codec
}
type blockServerGetter interface {
BlockServer() BlockServer
}
type cryptoPureGetter interface {
cryptoPure() cryptoPure
}
type cryptoGetter interface {
Crypto() Crypto
}
type chatGetter interface {
Chat() Chat
}
type currentSessionGetterGetter interface {
CurrentSessionGetter() CurrentSessionGetter
}
type signerGetter interface {
Signer() kbfscrypto.Signer
}
type diskBlockCacheGetter interface {
DiskBlockCache() DiskBlockCache
}
type diskBlockCacheSetter interface {
MakeDiskBlockCacheIfNotExists() error
}
type clockGetter interface {
Clock() Clock
}
type diskLimiterGetter interface {
DiskLimiter() DiskLimiter
}
type syncedTlfGetterSetter interface {
IsSyncedTlf(tlfID tlf.ID) bool
SetTlfSyncState(tlfID tlf.ID, isSynced bool) error
}
type blockRetrieverGetter interface {
BlockRetriever() BlockRetriever
}
// Offset is a generic representation of an offset to an indirect
// pointer within an indirect Block.
type Offset interface {
Equals(other Offset) bool
Less(other Offset) bool
}
// Block just needs to be (de)serialized using msgpack
type Block interface {
dataVersioner
// GetEncodedSize returns the encoded size of this block, but only
// if it has been previously set; otherwise it returns 0.
GetEncodedSize() uint32
// SetEncodedSize sets the encoded size of this block, locally
// caching it. The encoded size is not serialized.
SetEncodedSize(size uint32)
// NewEmpty returns a new block of the same type as this block
NewEmpty() Block
// Set sets this block to the same value as the passed-in block
Set(other Block)
// ToCommonBlock retrieves this block as a *CommonBlock.
ToCommonBlock() *CommonBlock
// IsIndirect indicates whether this block contains indirect pointers.
IsIndirect() bool
// OffsetExceedsData returns true if `off` is greater than the
// data contained in a direct block, assuming it starts at
// `startOff`. Note that the offset of the next block isn't
// relevant; this function should only indicate whether the offset
// is greater than what currently could be stored in this block.
OffsetExceedsData(startOff, off Offset) bool
// BytesCanBeDirtied returns the number of bytes that should be
// marked as dirtied if this block is dirtied.
BytesCanBeDirtied() int64
}
// BlockWithPtrs defines methods needed for interacting with indirect
// pointers.
type BlockWithPtrs interface {
Block
// FirstOffset returns the offset of the indirect pointer that
// points to the first (left-most) block in a block tree.
FirstOffset() Offset
// NumIndirectPtrs returns the number of indirect pointers in this
// block. The behavior is undefined when called on a non-indirect
// block.
NumIndirectPtrs() int
// IndirectPtr returns the block info and offset for the indirect
// pointer at index `i`. The behavior is undefined when called on
// a non-indirect block.
IndirectPtr(i int) (BlockInfo, Offset)
// AppendNewIndirectPtr appends a new indirect pointer at the
// given offset.
AppendNewIndirectPtr(ptr BlockPointer, off Offset)
// ClearIndirectPtrSize clears the encoded size of the indirect
// pointer stored at index `i`.
ClearIndirectPtrSize(i int)
// SetIndirectPtrType set the type of the indirect pointer stored
// at index `i`.
SetIndirectPtrType(i int, dt BlockDirectType)
// SetIndirectPtrOff set the offset of the indirect pointer stored
// at index `i`.
SetIndirectPtrOff(i int, off Offset)
// SetIndirectPtrInfo sets the block info of the indirect pointer
// stored at index `i`.
SetIndirectPtrInfo(i int, info BlockInfo)
// SwapIndirectPtrs swaps the indirect ptr at `i` in this block
// with the one at `otherI` in `other`.
SwapIndirectPtrs(i int, other BlockWithPtrs, otherI int)
}
// NodeID is a unique but transient ID for a Node. That is, two Node
// objects in memory at the same time represent the same file or
// directory if and only if their NodeIDs are equal (by pointer).
type NodeID interface {
// ParentID returns the NodeID of the directory containing the
// pointed-to file or directory, or nil if none exists.
ParentID() NodeID
}
// Node represents a direct pointer to a file or directory in KBFS.
// It is somewhat like an inode in a regular file system. Users of
// KBFS can use Node as a handle when accessing files or directories
// they have previously looked up.
type Node interface {
// GetID returns the ID of this Node. This should be used as a
// map key instead of the Node itself.
GetID() NodeID
// GetFolderBranch returns the folder ID and branch for this Node.
GetFolderBranch() FolderBranch
// GetBasename returns the current basename of the node, or ""
// if the node has been unlinked.
GetBasename() string
// Readonly returns true if KBFS should outright reject any write
// attempts on data or directory structures of this node. Though
// note that even if it returns false, KBFS can reject writes to
// the node for other reasons, such as TLF permissions. An
// implementation that wraps another `Node` (`inner`) must return
// `inner.Readonly()` if it decides not to return `true` on its
// own.
Readonly(ctx context.Context) bool
// ShouldCreateMissedLookup is called for Nodes representing
// directories, whenever `name` is looked up but is not found in
// the directory. If the Node decides a new entry should be
// created matching this lookup, it should return `true` as well
// as a context to use for the creation, the type of the new entry
// and the symbolic link contents if the entry is a Sym; the
// caller should then create this entry. Otherwise it should
// return false. It may return the type `FakeDir` to indicate
// that the caller should pretend the entry exists, even if it
// really does not. An implementation that wraps another `Node`
// (`inner`) must return `inner.ShouldCreateMissedLookup()` if it
// decides not to return `true` on its own.
ShouldCreateMissedLookup(ctx context.Context, name string) (
shouldCreate bool, newCtx context.Context, et EntryType, sympath string)
// ShouldRetryOnDirRead is called for Nodes representing
// directories, whenever a `Lookup` or `GetDirChildren` is done on
// them. It should return true to instruct the caller that it
// should re-sync its view of the directory and retry the
// operation.
ShouldRetryOnDirRead(ctx context.Context) bool
// RemoveDir is called on a `Node` before going through the normal
// `RemoveDir` flow, to give the Node a chance to handle it in a
// custom way. If the `Node` handles it internally, it should
// return `true`.
RemoveDir(ctx context.Context, dirName string) (
removeHandled bool, err error)
// WrapChild returns a wrapped version of child, if desired, to
// add custom behavior to the child node. An implementation that
// wraps another `Node` (`inner`) must first call
// `inner.WrapChild(child)` before performing its own wrapping
// operation, to ensure that all wrapping is preserved and that it
// happens in the correct order.
WrapChild(child Node) Node
// Unwrap returns the initial, unwrapped Node that was used to
// create this Node.
Unwrap() Node
// GetFS returns a file system interface that, if non-nil, should
// be used to satisfy any directory-related calls on this Node,
// instead of the standard, block-based method of acessing data.
// The provided context will be used, if possible, for any
// subsequent calls on the file system.
GetFS(ctx context.Context) billy.Filesystem
// GetFile returns a file interface that, if non-nil, should be
// used to satisfy any file-related calls on this Node, instead of
// the standard, block-based method of accessing data. The
// provided context will be used, if possible, for any subsequent
// calls on the file.
GetFile(ctx context.Context) billy.File
}
// KBFSOps handles all file system operations. Expands all indirect
// pointers. Operations that modify the server data change all the
// block IDs along the path, and so must return a path with the new
// BlockIds so the caller can update their references.
//
// KBFSOps implementations must guarantee goroutine-safety of calls on
// a per-top-level-folder basis.
//
// There are two types of operations that could block:
// * remote-sync operations, that need to synchronously update the
// MD for the corresponding top-level folder. When these
// operations return successfully, they will have guaranteed to
// have successfully written the modification to the KBFS servers.
// * remote-access operations, that don't sync any modifications to KBFS
// servers, but may block on reading data from the servers.
//
// KBFSOps implementations are supposed to give git-like consistency
// semantics for modification operations; they will be visible to
// other clients immediately after the remote-sync operations succeed,
// if and only if there was no other intervening modification to the
// same folder. If not, the change will be sync'd to the server in a
// special per-device "unmerged" area before the operation succeeds.
// In this case, the modification will not be visible to other clients
// until the KBFS code on this device performs automatic conflict
// resolution in the background.
//
// All methods take a Context (see https://blog.golang.org/context),
// and if that context is cancelled during the operation, KBFSOps will
// abort any blocking calls and return ctx.Err(). Any notifications
// resulting from an operation will also include this ctx (or a
// Context derived from it), allowing the caller to determine whether
// the notification is a result of their own action or an external
// action.
type KBFSOps interface {
// GetFavorites returns the logged-in user's list of favorite
// top-level folders. This is a remote-access operation.
GetFavorites(ctx context.Context) ([]Favorite, error)
// RefreshCachedFavorites tells the instances to forget any cached
// favorites list and fetch a new list from the server. The
// effects are asychronous; if there's an error refreshing the
// favorites, the cached favorites will become empty.
RefreshCachedFavorites(ctx context.Context)
// AddFavorite adds the favorite to both the server and
// the local cache.
AddFavorite(ctx context.Context, fav Favorite) error
// DeleteFavorite deletes the favorite from both the server and
// the local cache. Idempotent, so it succeeds even if the folder
// isn't favorited.
DeleteFavorite(ctx context.Context, fav Favorite) error
// GetTLFCryptKeys gets crypt key of all generations as well as
// TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by
// generation, starting with the key for FirstValidKeyGen.
GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) (
keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error)
// GetTLFID gets the TLF ID for tlfHandle.
GetTLFID(ctx context.Context, tlfHandle *TlfHandle) (tlf.ID, error)
// GetTLFHandle returns the TLF handle for a given node.
GetTLFHandle(ctx context.Context, node Node) (*TlfHandle, error)
// GetOrCreateRootNode returns the root node and root entry
// info associated with the given TLF handle and branch, if
// the logged-in user has read permissions to the top-level
// folder. It creates the folder if one doesn't exist yet (and
// branch == MasterBranch), and the logged-in user has write
// permissions to the top-level folder. This is a
// remote-access operation.
GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error)
// GetRootNode is like GetOrCreateRootNode but if the root node
// does not exist it will return a nil Node and not create it.
GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error)
// GetDirChildren returns a map of children in the directory,
// mapped to their EntryInfo, if the logged-in user has read
// permission for the top-level folder. This is a remote-access
// operation.
GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error)
// Lookup returns the Node and entry info associated with a
// given name in a directory, if the logged-in user has read
// permissions to the top-level folder. The returned Node is nil
// if the name is a symlink. This is a remote-access operation.
Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error)
// Stat returns the entry info associated with a
// given Node, if the logged-in user has read permissions to the
// top-level folder. This is a remote-access operation.
Stat(ctx context.Context, node Node) (EntryInfo, error)
// CreateDir creates a new subdirectory under the given node, if
// the logged-in user has write permission to the top-level
// folder. Returns the new Node for the created subdirectory, and
// its new entry info. This is a remote-sync operation.
CreateDir(ctx context.Context, dir Node, name string) (
Node, EntryInfo, error)
// CreateFile creates a new file under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new Node for the created file, and its new
// entry info. excl (when implemented) specifies whether this is an exclusive
// create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a
// Unix open() call.
//
// This is a remote-sync operation.
CreateFile(ctx context.Context, dir Node, name string, isExec bool, excl Excl) (
Node, EntryInfo, error)
// CreateLink creates a new symlink under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new entry info for the created symlink. This
// is a remote-sync operation.
CreateLink(ctx context.Context, dir Node, fromName string, toPath string) (
EntryInfo, error)
// RemoveDir removes the subdirectory represented by the given
// node, if the logged-in user has write permission to the
// top-level folder. Will return an error if the subdirectory is
// not empty. This is a remote-sync operation.
RemoveDir(ctx context.Context, dir Node, dirName string) error
// RemoveEntry removes the directory entry represented by the
// given node, if the logged-in user has write permission to the
// top-level folder. This is a remote-sync operation.
RemoveEntry(ctx context.Context, dir Node, name string) error
// Rename performs an atomic rename operation with a given
// top-level folder if the logged-in user has write permission to
// that folder, and will return an error if nodes from different
// folders are passed in. Also returns an error if the new name
// already has an entry corresponding to an existing directory
// (only non-dir types may be renamed over). This is a
// remote-sync operation.
Rename(ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) error
// Read fills in the given buffer with data from the file at the
// given node starting at the given offset, if the logged-in user
// has read permission to the top-level folder. The read data
// reflects any outstanding writes and truncates to that file that
// have been written through this KBFSOps object, even if those
// writes have not yet been sync'd. There is no guarantee that
// Read returns all of the requested data; it will return the
// number of bytes that it wrote to the dest buffer. Reads on an
// unlinked file may or may not succeed, depending on whether or
// not the data has been cached locally. If (0, nil) is returned,
// that means EOF has been reached. This is a remote-access
// operation.
Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error)
// Write modifies the file at the given node, by writing the given
// buffer at the given offset within the file, if the logged-in
// user has write permission to the top-level folder. It
// overwrites any data already there, and extends the file size as
// necessary to accomodate the new data. It guarantees to write
// the entire buffer in one operation. Writes on an unlinked file
// may or may not succeed as no-ops, depending on whether or not
// the necessary blocks have been locally cached. This is a
// remote-access operation.
Write(ctx context.Context, file Node, data []byte, off int64) error
// Truncate modifies the file at the given node, by either
// shrinking or extending its size to match the given size, if the
// logged-in user has write permission to the top-level folder.
// If extending the file, it pads the new data with 0s. Truncates
// on an unlinked file may or may not succeed as no-ops, depending
// on whether or not the necessary blocks have been locally
// cached. This is a remote-access operation.
Truncate(ctx context.Context, file Node, size uint64) error
// SetEx turns on or off the executable bit on the file
// represented by a given node, if the logged-in user has write
// permissions to the top-level folder. This is a remote-sync
// operation.
SetEx(ctx context.Context, file Node, ex bool) error
// SetMtime sets the modification time on the file represented by
// a given node, if the logged-in user has write permissions to
// the top-level folder. If mtime is nil, it is a noop. This is
// a remote-sync operation.
SetMtime(ctx context.Context, file Node, mtime *time.Time) error
// SyncAll flushes all outstanding writes and truncates for any
// dirty files to the KBFS servers within the given folder, if the
// logged-in user has write permissions to the top-level folder.
// If done through a file system interface, this may include
// modifications done via multiple file handles. This is a
// remote-sync operation.
SyncAll(ctx context.Context, folderBranch FolderBranch) error
// FolderStatus returns the status of a particular folder/branch, along
// with a channel that will be closed when the status has been
// updated (to eliminate the need for polling this method).
FolderStatus(ctx context.Context, folderBranch FolderBranch) (
FolderBranchStatus, <-chan StatusUpdate, error)
// Status returns the status of KBFS, along with a channel that will be
// closed when the status has been updated (to eliminate the need for
// polling this method). Note that this channel only applies to
// connection status changes.
//
// KBFSStatus can be non-empty even if there is an error.
Status(ctx context.Context) (
KBFSStatus, <-chan StatusUpdate, error)
// UnstageForTesting clears out this device's staged state, if
// any, and fast-forwards to the current head of this
// folder-branch.
UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error
// RequestRekey requests to rekey this folder. Note that this asynchronously
// requests a rekey, so canceling ctx doesn't cancel the rekey.
RequestRekey(ctx context.Context, id tlf.ID)
// SyncFromServer blocks until the local client has contacted the
// server and guaranteed that all known updates for the given
// top-level folder have been applied locally (and notifications
// sent out to any observers). It returns an error if this
// folder-branch is currently unmerged or dirty locally. If
// lockBeforeGet is non-nil, it blocks on idempotently taking the
// lock from server at the time it gets any metadata.
SyncFromServer(ctx context.Context,
folderBranch FolderBranch, lockBeforeGet *keybase1.LockID) error
// GetUpdateHistory returns a complete history of all the merged
// updates of the given folder, in a data structure that's
// suitable for encoding directly into JSON. This is an expensive
// operation, and should only be used for ocassional debugging.
// Note that the history does not include any unmerged changes or
// outstanding writes from the local device.
GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (
history TLFUpdateHistory, err error)
// GetEditHistory returns the edit history of the TLF, clustered
// by writer.
GetEditHistory(ctx context.Context, folderBranch FolderBranch) (
tlfHistory keybase1.FSFolderEditHistory, err error)
// GetNodeMetadata gets metadata associated with a Node.
GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error)
// Shutdown is called to clean up any resources associated with
// this KBFSOps instance.
Shutdown(ctx context.Context) error
// PushConnectionStatusChange updates the status of a service for
// human readable connection status tracking.
PushConnectionStatusChange(service string, newStatus error)
// PushStatusChange causes Status listeners to be notified via closing
// the status channel.
PushStatusChange()
// ClearPrivateFolderMD clears any cached private folder metadata,
// e.g. on a logout.
ClearPrivateFolderMD(ctx context.Context)
// ForceFastForward forwards the nodes of all folders that have
// been previously cleared with `ClearPrivateFolderMD` to their
// newest version. It works asynchronously, so no error is
// returned.
ForceFastForward(ctx context.Context)
// InvalidateNodeAndChildren sends invalidation messages for the
// given node and all of its children that are currently in the
// NodeCache. It's useful if the caller has outside knowledge of
// data changes to that node or its children that didn't come
// through the usual MD update channels (e.g., autogit nodes need
// invalidation when the corresponding git repo is updated).
InvalidateNodeAndChildren(ctx context.Context, node Node) error
// TeamNameChanged indicates that a team has changed its name, and
// we should clean up any outstanding handle info associated with
// the team ID.
TeamNameChanged(ctx context.Context, tid keybase1.TeamID)
// TeamAbandoned indicates that a team has been abandoned, and
// shouldn't be referred to by its previous name anymore.
TeamAbandoned(ctx context.Context, tid keybase1.TeamID)
// MigrateToImplicitTeam migrates the given folder from a private-
// or public-keyed folder, to a team-keyed folder. If it's
// already a private/public team-keyed folder, nil is returned.
MigrateToImplicitTeam(ctx context.Context, id tlf.ID) error
// KickoffAllOutstandingRekeys kicks off all outstanding rekeys. It does
// nothing to folders that have not scheduled a rekey. This should be
// called when we receive an event of "paper key cached" from service.
KickoffAllOutstandingRekeys() error
// NewNotificationChannel is called to notify any existing TLF
// matching `handle` that a new kbfs-edits channel is available.
NewNotificationChannel(
ctx context.Context, handle *TlfHandle, convID chat1.ConversationID,
channelName string)
}
type merkleRootGetter interface {
// GetCurrentMerkleRoot returns the current root of the global
// Keybase Merkle tree.
GetCurrentMerkleRoot(ctx context.Context) (
keybase1.MerkleRootV2, time.Time, error)
// VerifyMerkleRoot checks that the specified merkle root
// contains the given KBFS root; if not, it returns an error.
VerifyMerkleRoot(
ctx context.Context, root keybase1.MerkleRootV2,
kbfsRoot keybase1.KBFSRoot) error
}
type gitMetadataPutter interface {
PutGitMetadata(ctx context.Context, folder keybase1.Folder,
repoID keybase1.RepoID, metadata keybase1.GitLocalMetadata) error
}
// KeybaseService is an interface for communicating with the keybase
// service.
type KeybaseService interface {
merkleRootGetter
gitMetadataPutter
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UID) mapping
// can be trusted.
Resolve(ctx context.Context, assertion string) (
kbname.NormalizedUsername, keybase1.UserOrTeamID, error)
// Identify, given an assertion, returns a UserInfo struct
// with the user that matches that assertion, or an error
// otherwise. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (
kbname.NormalizedUsername, keybase1.UserOrTeamID, error)
// NormalizeSocialAssertion creates a SocialAssertion from its input and
// normalizes it. The service name will be lowercased. If the service is
// case-insensitive, then the username will also be lowercased. Colon
// assertions (twitter:user) will be transformed to the user@twitter
// format. Only registered services are allowed.
NormalizeSocialAssertion(
ctx context.Context, assertion string) (keybase1.SocialAssertion, error)
// ResolveIdentifyImplicitTeam resolves, and optionally
// identifies, an implicit team. If the implicit team doesn't yet
// exist, and doIdentifies is true, one is created.
ResolveIdentifyImplicitTeam(
ctx context.Context, assertions, suffix string, tlfType tlf.Type,
doIdentifies bool, reason string) (ImplicitTeamInfo, error)
// ResolveImplicitTeamByID resolves an implicit team to a team
// name, given a team ID.
ResolveImplicitTeamByID(
ctx context.Context, teamID keybase1.TeamID) (string, error)
// CreateTeamTLF associates the given TLF ID with the team ID in
// the team's sigchain. If the team already has a TLF ID
// associated with it, this overwrites it.
CreateTeamTLF(
ctx context.Context, teamID keybase1.TeamID, tlfID tlf.ID) error
// GetTeamSettings returns the KBFS settings for the given team.
GetTeamSettings(ctx context.Context, teamID keybase1.TeamID) (
keybase1.KBFSTeamSettings, error)
// LoadUserPlusKeys returns a UserInfo struct for a
// user with the specified UID.
// If you have the UID for a user and don't require Identify to
// validate an assertion or the identity of a user, use this to
// get UserInfo structs as it is much cheaper than Identify.
//
// pollForKID, if non empty, causes `PollForKID` field to be populated, which
// causes the service to poll for the given KID. This is useful during
// provisioning where the provisioner needs to get the MD revision that the
// provisionee has set the rekey bit on.
LoadUserPlusKeys(ctx context.Context,
uid keybase1.UID, pollForKID keybase1.KID) (UserInfo, error)
// LoadTeamPlusKeys returns a TeamInfo struct for a team with the
// specified TeamID. The caller can specify `desiredKeyGen` to
// force a server check if that particular key gen isn't yet
// known; it may be set to UnspecifiedKeyGen if no server check is
// required. The caller can specify `desiredUID` and
// `desiredRole` to force a server check if that particular UID
// isn't a member of the team yet according to local caches; it
// may be set to "" if no server check is required.
LoadTeamPlusKeys(ctx context.Context, tid keybase1.TeamID,
tlfType tlf.Type, desiredKeyGen kbfsmd.KeyGen,
desiredUser keybase1.UserVersion, desiredKey kbfscrypto.VerifyingKey,
desiredRole keybase1.TeamRole) (TeamInfo, error)
// CurrentSession returns a SessionInfo struct with all the
// information for the current session, or an error otherwise.
CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error)
// FavoriteAdd adds the given folder to the list of favorites.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteAdd removes the given folder from the list of
// favorites.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the current list of favorites.
FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error)
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
// NotifyPathUpdated sends a path updated notification.
NotifyPathUpdated(ctx context.Context, path string) error
// NotifySyncStatus sends a sync status notification.
NotifySyncStatus(ctx context.Context,
status *keybase1.FSPathSyncStatus) error
// FlushUserFromLocalCache instructs this layer to clear any
// KBFS-side, locally-cached information about the given user.
// This does NOT involve communication with the daemon, this is
// just to force future calls loading this user to fall through to
// the daemon itself, rather than being served from the cache.
FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID)
// TODO: Add CryptoClient methods, too.
// EstablishMountDir asks the service for the current mount path
// and sets it if not established.
EstablishMountDir(ctx context.Context) (string, error)
// Shutdown frees any resources associated with this
// instance. No other methods may be called after this is
// called.
Shutdown()
}
// KeybaseServiceCn defines methods needed to construct KeybaseService
// and Crypto implementations.
type KeybaseServiceCn interface {
NewKeybaseService(
config Config, params InitParams, ctx Context, log logger.Logger) (
KeybaseService, error)
NewCrypto(
config Config, params InitParams, ctx Context, log logger.Logger) (
Crypto, error)
NewChat(
config Config, params InitParams, ctx Context, log logger.Logger) (
Chat, error)
}
type resolver interface {
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UserOrTeamID) mapping
// can be trusted.
//
// TODO: some of the above assumptions on cacheability aren't
// right for subteams, which can change their name, so this may
// need updating.
Resolve(ctx context.Context, assertion string) (
kbname.NormalizedUsername, keybase1.UserOrTeamID, error)
// ResolveImplicitTeam resolves the given implicit team.
ResolveImplicitTeam(
ctx context.Context, assertions, suffix string, tlfType tlf.Type) (
ImplicitTeamInfo, error)
// ResolveImplicitTeamByID resolves the given implicit team, given
// a team ID.
ResolveImplicitTeamByID(
ctx context.Context, teamID keybase1.TeamID, tlfType tlf.Type) (
ImplicitTeamInfo, error)
// ResolveTeamTLFID returns the TLF ID associated with a given
// team ID, or tlf.NullID if no ID is yet associated with that
// team.
ResolveTeamTLFID(ctx context.Context, teamID keybase1.TeamID) (
tlf.ID, error)
// NormalizeSocialAssertion creates a SocialAssertion from its input and
// normalizes it. The service name will be lowercased. If the service is
// case-insensitive, then the username will also be lowercased. Colon
// assertions (twitter:user) will be transformed to the user@twitter
// format. Only registered services are allowed.
NormalizeSocialAssertion(
ctx context.Context, assertion string) (keybase1.SocialAssertion, error)
}
type identifier interface {
// Identify resolves an assertion (which could also be a
// username) to a UserInfo struct, spawning tracker popups if
// necessary. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (
kbname.NormalizedUsername, keybase1.UserOrTeamID, error)
// IdentifyImplicitTeam identifies (and creates if necessary) the
// given implicit team.
IdentifyImplicitTeam(
ctx context.Context, assertions, suffix string, tlfType tlf.Type,
reason string) (ImplicitTeamInfo, error)
}
type normalizedUsernameGetter interface {
// GetNormalizedUsername returns the normalized username
// corresponding to the given UID.
GetNormalizedUsername(ctx context.Context, id keybase1.UserOrTeamID) (
kbname.NormalizedUsername, error)
}
// CurrentSessionGetter is an interface for objects that can return
// session info.
type CurrentSessionGetter interface {
// GetCurrentSession gets the current session info.
GetCurrentSession(ctx context.Context) (SessionInfo, error)
}
// teamMembershipChecker is a copy of kbfsmd.TeamMembershipChecker for
// embedding in KBPKI. Unfortunately, this is necessary since mockgen
// can't handle embedded interfaces living in other packages.
type teamMembershipChecker interface {
// IsTeamWriter is a copy of
// kbfsmd.TeamMembershipChecker.IsTeamWriter.
IsTeamWriter(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID,
verifyingKey kbfscrypto.VerifyingKey) (bool, error)
// NoLongerTeamWriter returns the global Merkle root of the
// most-recent time the given user (with the given device key,
// which implies an eldest seqno) transitioned from being a writer
// to not being a writer on the given team. If the user was never
// a writer of the team, it returns an error.
NoLongerTeamWriter(
ctx context.Context, tid keybase1.TeamID, tlfType tlf.Type,
uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) (
keybase1.MerkleRootV2, error)
// IsTeamReader is a copy of
// kbfsmd.TeamMembershipChecker.IsTeamWriter.
IsTeamReader(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID) (
bool, error)
}
type teamKeysGetter interface {
// GetTeamTLFCryptKeys gets all of a team's secret crypt keys, by
// generation, as well as the latest key generation number for the
// team. The caller can specify `desiredKeyGen` to force a server
// check if that particular key gen isn't yet known; it may be set
// to UnspecifiedKeyGen if no server check is required.
GetTeamTLFCryptKeys(ctx context.Context, tid keybase1.TeamID,
desiredKeyGen kbfsmd.KeyGen) (
map[kbfsmd.KeyGen]kbfscrypto.TLFCryptKey, kbfsmd.KeyGen, error)
}
type teamRootIDGetter interface {
// GetTeamRootID returns the root team ID for the given (sub)team
// ID.
GetTeamRootID(ctx context.Context, tid keybase1.TeamID) (
keybase1.TeamID, error)
}
// KBPKI interacts with the Keybase daemon to fetch user info.
type KBPKI interface {
CurrentSessionGetter
resolver
identifier
normalizedUsernameGetter
merkleRootGetter
teamMembershipChecker
teamKeysGetter
teamRootIDGetter
gitMetadataPutter
// HasVerifyingKey returns nil if the given user has the given
// VerifyingKey, and an error otherwise. If the revoked key was
// valid according to the untrusted server timestamps, a special
// error type `RevokedDeviceVerificationError` is returned, which
// includes information the caller can use to verify the key using
// the merkle tree.
HasVerifyingKey(ctx context.Context, uid keybase1.UID,
verifyingKey kbfscrypto.VerifyingKey,
atServerTime time.Time) error
// GetCryptPublicKeys gets all of a user's crypt public keys (including
// paper keys).
GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) (
[]kbfscrypto.CryptPublicKey, error)
// TODO: Split the methods below off into a separate
// FavoriteOps interface.
// FavoriteAdd adds folder to the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteDelete deletes folder from the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the list of all favorite folders for
// the logged in user.
FavoriteList(ctx context.Context) ([]keybase1.Folder, error)
// CreateTeamTLF associates the given TLF ID with the team ID in
// the team's sigchain. If the team already has a TLF ID
// associated with it, this overwrites it.
CreateTeamTLF(
ctx context.Context, teamID keybase1.TeamID, tlfID tlf.ID) error
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
// NotifyPathUpdated sends a path updated notification.
NotifyPathUpdated(ctx context.Context, path string) error
}
// KeyMetadata is an interface for something that holds key
// information. This is usually implemented by RootMetadata.
type KeyMetadata interface {
// TlfID returns the ID of the TLF for which this object holds
// key info.
TlfID() tlf.ID
// TypeForKeying returns the keying type for this MD.
TypeForKeying() tlf.KeyingType
// LatestKeyGeneration returns the most recent key generation
// with key data in this object, or PublicKeyGen if this TLF
// is public.
LatestKeyGeneration() kbfsmd.KeyGen
// GetTlfHandle returns the handle for the TLF. It must not
// return nil.
//
// TODO: Remove the need for this function in this interface,
// so that kbfsmd.RootMetadata can implement this interface
// fully.
GetTlfHandle() *TlfHandle
// IsWriter checks that the given user is a valid writer of the TLF
// right now.
IsWriter(
ctx context.Context, checker kbfsmd.TeamMembershipChecker,
uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) (
bool, error)
// HasKeyForUser returns whether or not the given user has
// keys for at least one device. Returns an error if the TLF
// is public.
HasKeyForUser(user keybase1.UID) (bool, error)
// GetTLFCryptKeyParams returns all the necessary info to
// construct the TLF crypt key for the given key generation,
// user, and device (identified by its crypt public key), or
// false if not found. This returns an error if the TLF is
// public.
GetTLFCryptKeyParams(
keyGen kbfsmd.KeyGen, user keybase1.UID,
key kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFEphemeralPublicKey,
kbfscrypto.EncryptedTLFCryptKeyClientHalf,
kbfscrypto.TLFCryptKeyServerHalfID, bool, error)
// StoresHistoricTLFCryptKeys returns whether or not history keys are
// symmetrically encrypted; if not, they're encrypted per-device.
StoresHistoricTLFCryptKeys() bool
// GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given
// generation using the current generation's TLFCryptKey.
GetHistoricTLFCryptKey(codec kbfscodec.Codec, keyGen kbfsmd.KeyGen,
currentKey kbfscrypto.TLFCryptKey) (
kbfscrypto.TLFCryptKey, error)
}
// KeyMetadataWithRootDirEntry is like KeyMetadata, but can also
// return the root dir entry for the associated MD update.
type KeyMetadataWithRootDirEntry interface {
KeyMetadata
// GetRootDirEntry returns the root directory entry for the
// associated MD.
GetRootDirEntry() DirEntry
}
type encryptionKeyGetter interface {
// GetTLFCryptKeyForEncryption gets the crypt key to use for
// encryption (i.e., with the latest key generation) for the
// TLF with the given metadata.
GetTLFCryptKeyForEncryption(ctx context.Context, kmd KeyMetadata) (
kbfscrypto.TLFCryptKey, error)
}
type mdDecryptionKeyGetter interface {
// GetTLFCryptKeyForMDDecryption gets the crypt key to use for the
// TLF with the given metadata to decrypt the private portion of
// the metadata. It finds the appropriate key from mdWithKeys
// (which in most cases is the same as mdToDecrypt) if it's not
// already cached.
GetTLFCryptKeyForMDDecryption(ctx context.Context,
kmdToDecrypt, kmdWithKeys KeyMetadata) (
kbfscrypto.TLFCryptKey, error)
}
type blockDecryptionKeyGetter interface {
// GetTLFCryptKeyForBlockDecryption gets the crypt key to use
// for the TLF with the given metadata to decrypt the block
// pointed to by the given pointer.
GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer) (kbfscrypto.TLFCryptKey, error)
}
type blockKeyGetter interface {
encryptionKeyGetter
blockDecryptionKeyGetter
}
// KeyManager fetches and constructs the keys needed for KBFS file
// operations.
type KeyManager interface {
blockKeyGetter
mdDecryptionKeyGetter
// GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations
// for current devices. keys contains crypt keys from all generations, in
// order, starting from FirstValidKeyGen.
GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd KeyMetadata) (
keys []kbfscrypto.TLFCryptKey, err error)
// Rekey checks the given MD object, if it is a private TLF,
// against the current set of device keys for all valid
// readers and writers. If there are any new devices, it
// updates all existing key generations to include the new
// devices. If there are devices that have been removed, it
// creates a new epoch of keys for the TLF. If there was an
// error, or the RootMetadata wasn't changed, it returns false.
// Otherwise, it returns true. If a new key generation is
// added the second return value points to this new key. This
// is to allow for caching of the TLF crypt key only after a
// successful merged write of the metadata. Otherwise we could
// prematurely pollute the key cache.
//
// If the given MD object is a public TLF, it simply updates
// the TLF's handle with any newly-resolved writers.
//
// If promptPaper is set, prompts for any unlocked paper keys.
// promptPaper shouldn't be set if md is for a public TLF.
Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) (
bool, *kbfscrypto.TLFCryptKey, error)
}
// Reporter exports events (asynchronously) to any number of sinks
type Reporter interface {
// ReportErr records that a given error happened.
ReportErr(ctx context.Context, tlfName tlf.CanonicalName, t tlf.Type,
mode ErrorModeType, err error)
// AllKnownErrors returns all errors known to this Reporter.
AllKnownErrors() []ReportedError
// Notify sends the given notification to any sink.
Notify(ctx context.Context, notification *keybase1.FSNotification)
// NotifyPathUpdated sends the given notification to any sink.
NotifyPathUpdated(ctx context.Context, path string)
// NotifySyncStatus sends the given path sync status to any sink.
NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus)
// Shutdown frees any resources allocated by a Reporter.
Shutdown()
}
// MDCache gets and puts plaintext top-level metadata into the cache.
type MDCache interface {
// Get gets the metadata object associated with the given TLF ID,
// revision number, and branch ID (kbfsmd.NullBranchID for merged MD).
Get(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) (ImmutableRootMetadata, error)
// Put stores the metadata object, only if an MD matching that TLF
// ID, revision number, and branch ID isn't already cached. If
// there is already a matching item in the cache, we require that
// caller manages the cache explicitly by deleting or replacing it
// explicitly. This should be used when putting existing MDs
// being fetched from the server.
Put(md ImmutableRootMetadata) error
// Delete removes the given metadata object from the cache if it exists.
Delete(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID)
// Replace replaces the entry matching the md under the old branch
// ID with the new one. If the old entry doesn't exist, this is
// equivalent to a Put, except that it overrides anything else
// that's already in the cache. This should be used when putting
// new MDs created locally.
Replace(newRmd ImmutableRootMetadata, oldBID kbfsmd.BranchID) error
// MarkPutToServer sets `PutToServer` to true for the specified
// MD, if it already exists in the cache.
MarkPutToServer(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID)
// GetIDForHandle retrieves a cached, trusted TLF ID for the given
// handle, if one exists.
GetIDForHandle(handle *TlfHandle) (tlf.ID, error)
// PutIDForHandle caches a trusted TLF ID for the given handle.
PutIDForHandle(handle *TlfHandle, id tlf.ID) error
// ChangeHandleForID moves an ID to be under a new handle, if the
// ID is cached already.
ChangeHandleForID(oldHandle *TlfHandle, newHandle *TlfHandle)
}
// KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys.
type KeyCache interface {
// GetTLFCryptKey gets the crypt key for the given TLF.
GetTLFCryptKey(tlf.ID, kbfsmd.KeyGen) (kbfscrypto.TLFCryptKey, error)
// PutTLFCryptKey stores the crypt key for the given TLF.
PutTLFCryptKey(tlf.ID, kbfsmd.KeyGen, kbfscrypto.TLFCryptKey) error
}
// BlockCacheLifetime denotes the lifetime of an entry in BlockCache.
type BlockCacheLifetime int
func (l BlockCacheLifetime) String() string {
switch l {
case NoCacheEntry:
return "NoCacheEntry"
case TransientEntry:
return "TransientEntry"
case PermanentEntry:
return "PermanentEntry"
}
return "Unknown"
}
const (
// NoCacheEntry means that the entry will not be cached.
NoCacheEntry BlockCacheLifetime = iota
// TransientEntry means that the cache entry may be evicted at
// any time.
TransientEntry
// PermanentEntry means that the cache entry must remain until
// explicitly removed from the cache.
PermanentEntry
)
// BlockCacheSimple gets and puts plaintext dir blocks and file blocks into
// a cache. These blocks are immutable and identified by their
// content hash.
type BlockCacheSimple interface {
// Get gets the block associated with the given block ID.
Get(ptr BlockPointer) (Block, error)
// Put stores the final (content-addressable) block associated
// with the given block ID. If lifetime is TransientEntry,
// then it is assumed that the block exists on the server and
// the entry may be evicted from the cache at any time. If
// lifetime is PermanentEntry, then it is assumed that the
// block doesn't exist on the server and must remain in the
// cache until explicitly removed. As an intermediary state,
// as when a block is being sent to the server, the block may
// be put into the cache both with TransientEntry and
// PermanentEntry -- these are two separate entries. This is
// fine, since the block should be the same.
Put(ptr BlockPointer, tlf tlf.ID, block Block,
lifetime BlockCacheLifetime) error
}
// BlockCache specifies the interface of BlockCacheSimple, and also more
// advanced and internal methods.
type BlockCache interface {
BlockCacheSimple
// CheckForKnownPtr sees whether this cache has a transient
// entry for the given file block, which must be a direct file
// block containing data). Returns the full BlockPointer
// associated with that ID, including key and data versions.
// If no ID is known, return an uninitialized BlockPointer and
// a nil error.
CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (BlockPointer, error)
// DeleteTransient removes the transient entry for the given
// pointer from the cache, as well as any cached IDs so the block
// won't be reused.
DeleteTransient(ptr BlockPointer, tlf tlf.ID) error
// Delete removes the permanent entry for the non-dirty block
// associated with the given block ID from the cache. No
// error is returned if no block exists for the given ID.
DeletePermanent(id kbfsblock.ID) error
// DeleteKnownPtr removes the cached ID for the given file
// block. It does not remove the block itself.
DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error
// GetWithPrefetch retrieves a block from the cache, along with the block's
// prefetch status.
GetWithPrefetch(ptr BlockPointer) (block Block,
prefetchStatus PrefetchStatus, lifetime BlockCacheLifetime, err error)
// PutWithPrefetch puts a block into the cache, along with whether or not
// it has triggered or finished a prefetch.
PutWithPrefetch(ptr BlockPointer, tlf tlf.ID, block Block,
lifetime BlockCacheLifetime, prefetchStatus PrefetchStatus) error
// SetCleanBytesCapacity atomically sets clean bytes capacity for block
// cache.
SetCleanBytesCapacity(capacity uint64)
// GetCleanBytesCapacity atomically gets clean bytes capacity for block
// cache.
GetCleanBytesCapacity() (capacity uint64)
}
// DirtyPermChan is a channel that gets closed when the holder has
// permission to write. We are forced to define it as a type due to a
// bug in mockgen that can't handle return values with a chan
// struct{}.
type DirtyPermChan <-chan struct{}
type isDirtyProvider interface {
// IsDirty states whether or not the block associated with the
// given block pointer and branch name is dirty in this cache.
IsDirty(tlfID tlf.ID, ptr BlockPointer, branch BranchName) bool
}
// DirtyBlockCache gets and puts plaintext dir blocks and file blocks
// into a cache, which have been modified by the application and not
// yet committed on the KBFS servers. They are identified by a
// (potentially random) ID that may not have any relationship with
// their context, along with a Branch in case the same TLF is being
// modified via multiple branches. Dirty blocks are never evicted,
// they must be deleted explicitly.
type DirtyBlockCache interface {
isDirtyProvider
// Get gets the block associated with the given block ID. Returns
// the dirty block for the given ID, if one exists.
Get(tlfID tlf.ID, ptr BlockPointer, branch BranchName) (Block, error)
// Put stores a dirty block currently identified by the
// given block pointer and branch name.
Put(tlfID tlf.ID, ptr BlockPointer, branch BranchName, block Block) error
// Delete removes the dirty block associated with the given block
// pointer and branch from the cache. No error is returned if no
// block exists for the given ID.
Delete(tlfID tlf.ID, ptr BlockPointer, branch BranchName) error
// IsAnyDirty returns whether there are any dirty blocks in the
// cache. tlfID may be ignored.
IsAnyDirty(tlfID tlf.ID) bool
// RequestPermissionToDirty is called whenever a user wants to
// write data to a file. The caller provides an estimated number
// of bytes that will become dirty -- this is difficult to know
// exactly without pre-fetching all the blocks involved, but in
// practice we can just use the number of bytes sent in via the
// Write. It returns a channel that blocks until the cache is
// ready to receive more dirty data, at which point the channel is
// closed. The user must call
// `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has
// completed its write and called `UpdateUnsyncedBytes` for all
// the exact dirty block sizes.
RequestPermissionToDirty(ctx context.Context, tlfID tlf.ID,
estimatedDirtyBytes int64) (DirtyPermChan, error)
// UpdateUnsyncedBytes is called by a user, who has already been
// granted permission to write, with the delta in block sizes that
// were dirtied as part of the write. So for example, if a
// newly-dirtied block of 20 bytes was extended by 5 bytes, they
// should send 25. If on the next write (before any syncs), bytes
// 10-15 of that same block were overwritten, they should send 0
// over the channel because there were no new bytes. If an
// already-dirtied block is truncated, or if previously requested
// bytes have now been updated more accurately in previous
// requests, newUnsyncedBytes may be negative. wasSyncing should
// be true if `BlockSyncStarted` has already been called for this
// block.
UpdateUnsyncedBytes(tlfID tlf.ID, newUnsyncedBytes int64, wasSyncing bool)
// UpdateSyncingBytes is called when a particular block has
// started syncing, or with a negative number when a block is no
// longer syncing due to an error (and BlockSyncFinished will
// never be called).
UpdateSyncingBytes(tlfID tlf.ID, size int64)
// BlockSyncFinished is called when a particular block has
// finished syncing, though the overall sync might not yet be
// complete. This lets the cache know it might be able to grant
// more permission to writers.
BlockSyncFinished(tlfID tlf.ID, size int64)
// SyncFinished is called when a complete sync has completed and
// its dirty blocks have been removed from the cache. This lets
// the cache know it might be able to grant more permission to
// writers.
SyncFinished(tlfID tlf.ID, size int64)
// ShouldForceSync returns true if the sync buffer is full enough
// to force all callers to sync their data immediately.
ShouldForceSync(tlfID tlf.ID) bool
// Shutdown frees any resources associated with this instance. It
// returns an error if there are any unsynced blocks.
Shutdown() error
}
// DiskBlockCache caches blocks to the disk.
type DiskBlockCache interface {
// Get gets a block from the disk cache.
Get(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID) (
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf,
prefetchStatus PrefetchStatus, err error)
// Put puts a block to the disk cache. Returns after it has updated the
// metadata but before it has finished writing the block.
Put(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) error
// Delete deletes some blocks from the disk cache.
Delete(ctx context.Context, blockIDs []kbfsblock.ID) (numRemoved int,
sizeRemoved int64, err error)
// UpdateMetadata updates metadata for a given block in the disk cache.
UpdateMetadata(ctx context.Context, blockID kbfsblock.ID,
prefetchStatus PrefetchStatus) error
// Status returns the current status of the disk cache.
Status(ctx context.Context) map[string]DiskBlockCacheStatus
// Shutdown cleanly shuts down the disk block cache.
Shutdown(ctx context.Context)
}
// cryptoPure contains all methods of Crypto that don't depend on
// implicit state, i.e. they're pure functions of the input.
type cryptoPure interface {
// MakeRandomTlfID generates a dir ID using a CSPRNG.
MakeRandomTlfID(t tlf.Type) (tlf.ID, error)
// MakeRandomBranchID generates a per-device branch ID using a
// CSPRNG. It will not return LocalSquashBranchID or
// kbfsmd.NullBranchID.
MakeRandomBranchID() (kbfsmd.BranchID, error)
// MakeTemporaryBlockID generates a temporary block ID using a
// CSPRNG. This is used for indirect blocks before they're
// committed to the server.
MakeTemporaryBlockID() (kbfsblock.ID, error)
// MakeRefNonce generates a block reference nonce using a
// CSPRNG. This is used for distinguishing different references to
// the same BlockID.
MakeBlockRefNonce() (kbfsblock.RefNonce, error)
// MakeRandomTLFEphemeralKeys generates ephemeral keys using a
// CSPRNG for a TLF. These keys can then be used to key/rekey
// the TLF.
MakeRandomTLFEphemeralKeys() (kbfscrypto.TLFEphemeralPublicKey,
kbfscrypto.TLFEphemeralPrivateKey, error)
// MakeRandomTLFKeys generates keys using a CSPRNG for a
// single key generation of a TLF.
MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey,
kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error)
// MakeRandomBlockCryptKeyServerHalf generates the server-side of
// a block crypt key.
MakeRandomBlockCryptKeyServerHalf() (
kbfscrypto.BlockCryptKeyServerHalf, error)
// EncryptPrivateMetadata encrypts a PrivateMetadata object.
EncryptPrivateMetadata(
pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) (
kbfscrypto.EncryptedPrivateMetadata, error)
// DecryptPrivateMetadata decrypts a PrivateMetadata object.
DecryptPrivateMetadata(
encryptedPMD kbfscrypto.EncryptedPrivateMetadata,
key kbfscrypto.TLFCryptKey) (PrivateMetadata, error)
// EncryptBlocks encrypts a block. plainSize is the size of the encoded
// block; EncryptBlock() must guarantee that plainSize <=
// len(encryptedBlock).
EncryptBlock(block Block, key kbfscrypto.BlockCryptKey) (
plainSize int, encryptedBlock kbfscrypto.EncryptedBlock, err error)
// DecryptBlock decrypts a block. Similar to EncryptBlock(),
// DecryptBlock() must guarantee that (size of the decrypted
// block) <= len(encryptedBlock).
DecryptBlock(encryptedBlock kbfscrypto.EncryptedBlock,
key kbfscrypto.BlockCryptKey, block Block) error
}
// Crypto signs, verifies, encrypts, and decrypts stuff.
type Crypto interface {
cryptoPure
// Duplicate kbfscrypto.Signer here to work around gomock's
// limitations.
Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error)
SignForKBFS(context.Context, []byte) (kbfscrypto.SignatureInfo, error)
SignToString(context.Context, []byte) (string, error)
// DecryptTLFCryptKeyClientHalf decrypts a
// kbfscrypto.TLFCryptKeyClientHalf using the current device's
// private key and the TLF's ephemeral public key.
DecryptTLFCryptKeyClientHalf(ctx context.Context,
publicKey kbfscrypto.TLFEphemeralPublicKey,
encryptedClientHalf kbfscrypto.EncryptedTLFCryptKeyClientHalf) (
kbfscrypto.TLFCryptKeyClientHalf, error)
// DecryptTLFCryptKeyClientHalfAny decrypts one of the
// kbfscrypto.TLFCryptKeyClientHalf using the available
// private keys and the ephemeral public key. If promptPaper
// is true, the service will prompt the user for any unlocked
// paper keys.
DecryptTLFCryptKeyClientHalfAny(ctx context.Context,
keys []EncryptedTLFCryptKeyClientAndEphemeral,
promptPaper bool) (
kbfscrypto.TLFCryptKeyClientHalf, int, error)
// DecryptTeamMerkleLeaf decrypts a team-encrypted Merkle leaf
// using some team key generation greater than `minKeyGen`, and
// the provided ephemeral public key.
DecryptTeamMerkleLeaf(ctx context.Context, teamID keybase1.TeamID,
publicKey kbfscrypto.TLFEphemeralPublicKey,
encryptedMerkleLeaf kbfscrypto.EncryptedMerkleLeaf,
minKeyGen keybase1.PerTeamKeyGeneration) ([]byte, error)
// Shutdown frees any resources associated with this instance.
Shutdown()
}
type tlfIDGetter interface {
// GetIDForHandle returns the tlf.ID associated with the given
// handle, if the logged-in user has read permission on the
// folder. It may or may not create the folder if it doesn't
// exist yet, and it may return `tlf.NullID` with a `nil` error if
// it doesn't create a missing folder.
GetIDForHandle(ctx context.Context, handle *TlfHandle) (tlf.ID, error)
// ValidateLatestHandleForTLF returns true if the TLF ID contained
// in `h` does not currently map to a finalized TLF.
ValidateLatestHandleNotFinal(ctx context.Context, h *TlfHandle) (
bool, error)
}
// MDOps gets and puts root metadata to an MDServer. On a get, it
// verifies the metadata is signed by the metadata's signing key.
type MDOps interface {
tlfIDGetter
// GetForTLF returns the current metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
//
// If lockBeforeGet is not nil, it causes mdserver to take the lock on the
// lock ID before the get.
GetForTLF(ctx context.Context, id tlf.ID, lockBeforeGet *keybase1.LockID) (
ImmutableRootMetadata, error)
// GetForTLFByTime returns the newest merged MD update with a
// server timestamp less than or equal to `serverTime`.
GetForTLFByTime(ctx context.Context, id tlf.ID, serverTime time.Time) (
ImmutableRootMetadata, error)
// GetUnmergedForTLF is the same as the above but for unmerged
// metadata.
GetUnmergedForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) (
ImmutableRootMetadata, error)
// GetRange returns a range of metadata objects corresponding to
// the passed revision numbers (inclusive).
//
// If lockBeforeGet is not nil, it causes mdserver to take the lock on the
// lock ID before the get.
GetRange(ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision,
lockID *keybase1.LockID) ([]ImmutableRootMetadata, error)
// GetUnmergedRange is the same as the above but for unmerged
// metadata history (inclusive).
GetUnmergedRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID,
start, stop kbfsmd.Revision) ([]ImmutableRootMetadata, error)
// Put stores the metadata object for the given top-level folder.
// This also adds the resulting ImmutableRootMetadata object to
// the mdcache, if the Put is successful. Note that constructing
// the ImmutableRootMetadata requires knowing the verifying key,
// which might not be the same as the local user's verifying key
// if the MD has been copied from a previous update.
//
// If lockContext is not nil, it causes the mdserver to check a lockID at
// the time of the put, and optionally (if specified in lockContext)
// releases the lock on the lock ID if the put is successful. Releasing the
// lock in mdserver is idempotent. Note that journalMDOps doesn't support
// lockContext for now. If journaling is enabled, use FinishSinbleOp to
// require locks.
//
// The priority parameter specifies the priority of this particular MD put
// operation. When conflict happens, mdserver tries to prioritize writes
// with higher priorities. Caller should use pre-defined (or define new)
// constants in keybase1 package, such as keybase1.MDPriorityNormal. Note
// that journalMDOps doesn't support any priority other than
// MDPriorityNormal for now. If journaling is enabled, use FinishSinbleOp
// to override priority.
Put(ctx context.Context, rmd *RootMetadata,
verifyingKey kbfscrypto.VerifyingKey,
lockContext *keybase1.LockContext, priority keybase1.MDPriority) (
ImmutableRootMetadata, error)
// PutUnmerged is the same as the above but for unmerged metadata
// history. This also adds the resulting ImmutableRootMetadata
// object to the mdcache, if the PutUnmerged is successful. Note
// that constructing the ImmutableRootMetadata requires knowing
// the verifying key, which might not be the same as the local
// user's verifying key if the MD has been copied from a previous
// update.
PutUnmerged(ctx context.Context, rmd *RootMetadata,
verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error)
// PruneBranch prunes all unmerged history for the given TLF
// branch.
PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error
// ResolveBranch prunes all unmerged history for the given TLF
// branch, and also deletes any blocks in `blocksToDelete` that
// are still in the local journal. In addition, it appends the
// given MD to the journal. This also adds the resulting
// ImmutableRootMetadata object to the mdcache, if the
// ResolveBranch is successful. Note that constructing the
// ImmutableRootMetadata requires knowing the verifying key, which
// might not be the same as the local user's verifying key if the
// MD has been copied from a previous update.
ResolveBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID,
blocksToDelete []kbfsblock.ID, rmd *RootMetadata,
verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error)
// GetLatestHandleForTLF returns the server's idea of the latest
// handle for the TLF, which may not yet be reflected in the MD if
// the TLF hasn't been rekeyed since it entered into a conflicting
// state.
GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error)
}
// KeyOps fetches server-side key halves from the key server.
type KeyOps interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID kbfscrypto.TLFCryptKeyServerHalfID,
cryptPublicKey kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error
}
// Prefetcher is an interface to a block prefetcher.
type Prefetcher interface {
// ProcessBlockForPrefetch potentially triggers and monitors a prefetch.
ProcessBlockForPrefetch(ctx context.Context, ptr BlockPointer, block Block,
kmd KeyMetadata, priority int, lifetime BlockCacheLifetime,
prefetchStatus PrefetchStatus)
// CancelPrefetch notifies the prefetcher that a prefetch should be
// canceled.
CancelPrefetch(kbfsblock.ID)
// Shutdown shuts down the prefetcher idempotently. Future calls to
// the various Prefetch* methods will return io.EOF. The returned channel
// allows upstream components to block until all pending prefetches are
// complete. This feature is mainly used for testing, but also to toggle
// the prefetcher on and off.
Shutdown() <-chan struct{}
}
// BlockOps gets and puts data blocks to a BlockServer. It performs
// the necessary crypto operations on each block.
type BlockOps interface {
blockRetrieverGetter
// Get gets the block associated with the given block pointer
// (which belongs to the TLF with the given key metadata),
// decrypts it if necessary, and fills in the provided block
// object with its contents, if the logged-in user has read
// permission for that block. cacheLifetime controls the behavior of the
// write-through cache once a Get completes.
Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer,
block Block, cacheLifetime BlockCacheLifetime) error
// GetEncodedSize gets the encoded size of the block associated
// with the given block pointer (which belongs to the TLF with the
// given key metadata).
GetEncodedSize(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer) (uint32, keybase1.BlockStatus, error)
// Ready turns the given block (which belongs to the TLF with
// the given key metadata) into encoded (and encrypted) data,
// and calculates its ID and size, so that we can do a bunch
// of block puts in parallel for every write. Ready() must
// guarantee that plainSize <= readyBlockData.QuotaSize().
Ready(ctx context.Context, kmd KeyMetadata, block Block) (
id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData, err error)
// Delete instructs the server to delete the given block references.
// It returns the number of not-yet deleted references to
// each block reference
Delete(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) (
liveCounts map[kbfsblock.ID]int, err error)
// Archive instructs the server to mark the given block references
// as "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
Archive(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) error
// TogglePrefetcher activates or deactivates the prefetcher.
TogglePrefetcher(enable bool) <-chan struct{}
// Prefetcher retrieves this BlockOps' Prefetcher.
Prefetcher() Prefetcher
// Shutdown shuts down all the workers performing Get operations
Shutdown()
}
// Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around
// gomock's limitations.
type authTokenRefreshHandler interface {
RefreshAuthToken(context.Context)
}
// MDServer gets and puts metadata for each top-level directory. The
// instantiation should be able to fetch session/user details via KBPKI. On a
// put, the server is responsible for 1) ensuring the user has appropriate
// permissions for whatever modifications were made; 2) ensuring that
// LastModifyingWriter and LastModifyingUser are updated appropriately; and 3)
// detecting conflicting writes based on the previous root block ID (i.e., when
// it supports strict consistency). On a get, it verifies the logged-in user
// has read permissions.
//
// TODO: Add interface for searching by time
type MDServer interface {
authTokenRefreshHandler
// GetForHandle returns the current (signed/encrypted) metadata
// object corresponding to the given top-level folder's handle, if
// the logged-in user has read permission on the folder. It
// creates the folder if one doesn't exist yet, and the logged-in
// user has permission to do so.
//
// If lockBeforeGet is not nil, it takes a lock on the lock ID before
// trying to get anything. If taking the lock fails, an error is returned.
// Note that taking a lock from the mdserver is idempotent.
//
// If there is no returned error, then the returned ID must
// always be non-null. A nil *RootMetadataSigned may be
// returned, but if it is non-nil, then its ID must match the
// returned ID.
GetForHandle(ctx context.Context, handle tlf.Handle,
mStatus kbfsmd.MergeStatus, lockBeforeGet *keybase1.LockID) (
tlf.ID, *RootMetadataSigned, error)
// GetForTLF returns the current (signed/encrypted) metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
//
// If lockBeforeGet is not nil, it takes a lock on the lock ID before
// trying to get anything. If taking the lock fails, an error is returned.
// Note that taking a lock from the mdserver is idempotent.
GetForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus,
lockBeforeGet *keybase1.LockID) (*RootMetadataSigned, error)
// GetForTLFByTime returns the earliest merged MD update with a
// server timestamp equal or greater to `serverTime`.
GetForTLFByTime(ctx context.Context, id tlf.ID, serverTime time.Time) (
*RootMetadataSigned, error)
// GetRange returns a range of (signed/encrypted) metadata objects
// corresponding to the passed revision numbers (inclusive).
//
// If lockBeforeGet is not nil, it takes a lock on the lock ID before
// trying to get anything. If taking the lock fails, an error is returned.
// Note that taking a lock from the mdserver is idempotent.
GetRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus,
start, stop kbfsmd.Revision, lockBeforeGet *keybase1.LockID) (
[]*RootMetadataSigned, error)
// Put stores the (signed/encrypted) metadata object for the given
// top-level folder. Note: If the unmerged bit is set in the metadata
// block's flags bitmask it will be appended to the unmerged per-device
// history.
//
// If lockContext is not nil, it causes the mdserver to check a lockID at
// the time of the put, and optionally (if specified in lockContext)
// releases the lock on the lock ID if the put is successful. Releasing the
// lock in mdserver is idempotent.
Put(ctx context.Context, rmds *RootMetadataSigned, extra kbfsmd.ExtraMetadata,
lockContext *keybase1.LockContext, priority keybase1.MDPriority) error
// Lock ensures lockID for tlfID is taken by this session, i.e.,
// idempotently take the lock. If the lock is already taken by *another*
// session, mdserver returns a throttle error, causing RPC layer at client
// to retry. So caller of this method should observe a behavior similar to
// blocking call, which upon successful return, makes sure the lock is
// taken on the server. Note that the lock expires after certain time, so
// it's important to make writes contingent to the lock by requiring the
// lockID in Put.
Lock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error
// Release Lock ensures lockID for tlfID is not taken by this session, i.e.,
// idempotently release the lock. If the lock is already released or
// expired, this is a no-op.
ReleaseLock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error
// StartImplicitTeamMigration tells mdserver to put a implicit team
// migration lock on id, which prevents any rekey MD writes from going
// in. Normal classic MD updates can still happen after implicit team
// migration has started, until a iTeam-style MD is written.
StartImplicitTeamMigration(ctx context.Context, id tlf.ID) (err error)
// PruneBranch prunes all unmerged history for the given TLF branch.
PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error
// RegisterForUpdate tells the MD server to inform the caller when
// there is a merged update with a revision number greater than
// currHead, which did NOT originate from this same MD server
// session. This method returns a chan which can receive only a
// single error before it's closed. If the received err is nil,
// then there is updated MD ready to fetch which didn't originate
// locally; if it is non-nil, then the previous registration
// cannot send the next notification (e.g., the connection to the
// MD server may have failed). In either case, the caller must
// re-register to get a new chan that can receive future update
// notifications.
RegisterForUpdate(ctx context.Context, id tlf.ID,
currHead kbfsmd.Revision) (<-chan error, error)
// CancelRegistration lets the local MDServer instance know that
// we are no longer interested in updates for the specified
// folder. It does not necessarily forward this cancellation to
// remote servers.
CancelRegistration(ctx context.Context, id tlf.ID)
// CheckForRekeys initiates the rekey checking process on the
// server. The server is allowed to delay this request, and so it
// returns a channel for returning the error. Actual rekey
// requests are expected to come in asynchronously.
CheckForRekeys(ctx context.Context) <-chan error
// TruncateLock attempts to take the history truncation lock for
// this folder, for a TTL defined by the server. Returns true if
// the lock was successfully taken.
TruncateLock(ctx context.Context, id tlf.ID) (bool, error)
// TruncateUnlock attempts to release the history truncation lock
// for this folder. Returns true if the lock was successfully
// released.
TruncateUnlock(ctx context.Context, id tlf.ID) (bool, error)
// DisableRekeyUpdatesForTesting disables processing rekey updates
// received from the mdserver while testing.
DisableRekeyUpdatesForTesting()
// Shutdown is called to shutdown an MDServer connection.
Shutdown()
// IsConnected returns whether the MDServer is connected.
IsConnected() bool
// GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF,
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state. For the highest level of confidence, the caller
// should verify the mapping with a Merkle tree lookup.
GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error)
// OffsetFromServerTime is the current estimate for how off our
// local clock is from the mdserver clock. Add this to any
// mdserver-provided timestamps to get the "local" time of the
// corresponding event. If the returned bool is false, then we
// don't have a current estimate for the offset.
OffsetFromServerTime() (time.Duration, bool)
// GetKeyBundles looks up the key bundles for the given key
// bundle IDs. tlfID must be non-zero but either or both wkbID
// and rkbID can be zero, in which case nil will be returned
// for the respective bundle. If a bundle cannot be found, an
// error is returned and nils are returned for both bundles.
GetKeyBundles(ctx context.Context, tlfID tlf.ID,
wkbID kbfsmd.TLFWriterKeyBundleID, rkbID kbfsmd.TLFReaderKeyBundleID) (
*kbfsmd.TLFWriterKeyBundleV3, *kbfsmd.TLFReaderKeyBundleV3, error)
// CheckReachability is called when the Keybase service sends a notification
// that network connectivity has changed.
CheckReachability(ctx context.Context)
// FastForwardBackoff fast forwards any existing backoff timer for
// reconnects. If MD server is connected at the time this is called, it's
// essentially a no-op.
FastForwardBackoff()
// FindNextMD finds the serialized (and possibly encrypted) root
// metadata object from the leaf node of the second KBFS merkle
// tree to be produced after a given Keybase global merkle tree
// sequence number `rootSeqno` (and all merkle nodes between it
// and the root, and the root itself). It also returns the global
// merkle tree sequence number of the root that first included the
// returned metadata object.
FindNextMD(ctx context.Context, tlfID tlf.ID, rootSeqno keybase1.Seqno) (
nextKbfsRoot *kbfsmd.MerkleRoot, nextMerkleNodes [][]byte,
nextRootSeqno keybase1.Seqno, err error)
// GetMerkleRootLatest returns the latest KBFS merkle root for the
// given tree ID.
GetMerkleRootLatest(ctx context.Context, treeID keybase1.MerkleTreeID) (
root *kbfsmd.MerkleRoot, err error)
}
type mdServerLocal interface {
MDServer
addNewAssertionForTest(
uid keybase1.UID, newAssertion keybase1.SocialAssertion) error
getCurrentMergedHeadRevision(ctx context.Context, id tlf.ID) (
rev kbfsmd.Revision, err error)
isShutdown() bool
copy(config mdServerLocalConfig) mdServerLocal
enableImplicitTeams()
setKbfsMerkleRoot(treeID keybase1.MerkleTreeID, root *kbfsmd.MerkleRoot)
}
// BlockServer gets and puts opaque data blocks. The instantiation
// should be able to fetch session/user details via KBPKI. On a
// put/delete, the server is reponsible for: 1) checking that the ID
// matches the hash of the buffer; and 2) enforcing writer quotas.
type BlockServer interface {
authTokenRefreshHandler
// Get gets the (encrypted) block data associated with the given
// block ID and context, uses the provided block key to decrypt
// the block, and fills in the provided block object with its
// contents, if the logged-in user has read permission for that
// block.
Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error)
// GetEncodedSize gets the encoded size of the block associated
// with the given block pointer (which belongs to the TLF with the
// given key metadata).
GetEncodedSize(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context) (uint32, keybase1.BlockStatus, error)
// Put stores the (encrypted) block data under the given ID
// and context on the server, along with the server half of
// the block key. context should contain a kbfsblock.RefNonce
// of zero. There will be an initial reference for this block
// for the given context.
//
// Put should be idempotent, although it should also return an
// error if, for a given ID, any of the other arguments differ
// from previous Put calls with the same ID.
//
// If this returns a kbfsblock.ServerErrorOverQuota, with
// Throttled=false, the caller can treat it as informational
// and otherwise ignore the error.
Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error
// PutAgain re-stores a previously deleted block under the same ID
// with the same data.
PutAgain(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error
// AddBlockReference adds a new reference to the given block,
// defined by the given context (which should contain a
// non-zero kbfsblock.RefNonce). (Contexts with a
// kbfsblock.RefNonce of zero should be used when putting the
// block for the first time via Put().) Returns a
// kbfsblock.ServerErrorBlockNonExistent if id is unknown within this
// folder.
//
// AddBlockReference should be idempotent, although it should
// also return an error if, for a given ID and refnonce, any
// of the other fields of context differ from previous
// AddBlockReference calls with the same ID and refnonce.
//
// If this returns a kbfsblock.ServerErrorOverQuota, with
// Throttled=false, the caller can treat it as informational
// and otherwise ignore the error.
AddBlockReference(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context) error
// RemoveBlockReferences removes the references to the given block
// ID defined by the given contexts. If no references to the block
// remain after this call, the server is allowed to delete the
// corresponding block permanently. If the reference defined by
// the count has already been removed, the call is a no-op.
// It returns the number of remaining not-yet-deleted references after this
// reference has been removed
RemoveBlockReferences(ctx context.Context, tlfID tlf.ID,
contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error)
// ArchiveBlockReferences marks the given block references as
// "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
//
// For a given ID/refnonce pair, ArchiveBlockReferences should
// be idempotent, although it should also return an error if
// any of the other fields of the context differ from previous
// calls with the same ID/refnonce pair.
ArchiveBlockReferences(ctx context.Context, tlfID tlf.ID,
contexts kbfsblock.ContextMap) error
// IsUnflushed returns whether a given block is being queued
// locally for later flushing to another block server. If the
// block is currently being flushed to the server, this should
// return `true`, so that the caller will try to clean it up from
// the server if it's no longer needed.
IsUnflushed(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID) (
bool, error)
// Shutdown is called to shutdown a BlockServer connection.
Shutdown(ctx context.Context)
// GetUserQuotaInfo returns the quota for the logged-in user.
GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.QuotaInfo, err error)
// GetTeamQuotaInfo returns the quota for a team.
GetTeamQuotaInfo(ctx context.Context, tid keybase1.TeamID) (
info *kbfsblock.QuotaInfo, err error)
}
// blockServerLocal is the interface for BlockServer implementations
// that store data locally.
type blockServerLocal interface {
BlockServer
// getAllRefsForTest returns all the known block references
// for the given TLF, and should only be used during testing.
getAllRefsForTest(ctx context.Context, tlfID tlf.ID) (
map[kbfsblock.ID]blockRefMap, error)
}
// BlockSplitter decides when a file block needs to be split
type BlockSplitter interface {
// CopyUntilSplit copies data into the block until we reach the
// point where we should split, but only if writing to the end of
// the last block. If this is writing into the middle of a file,
// just copy everything that will fit into the block, and assume
// that block boundaries will be fixed later. Return how much was
// copied.
CopyUntilSplit(
block *FileBlock, lastBlock bool, data []byte, off int64) int64
// CheckSplit, given a block, figures out whether it ends at the
// right place. If so, return 0. If not, return either the
// offset in the block where it should be split, or -1 if more
// bytes from the next block should be appended.
CheckSplit(block *FileBlock) int64
// MaxPtrsPerBlock describes the number of indirect pointers we
// can fit into one indirect block.
MaxPtrsPerBlock() int
// ShouldEmbedBlockChanges decides whether we should keep the
// block changes embedded in the MD or not.
ShouldEmbedBlockChanges(bc *BlockChanges) bool
// SplitDirIfNeeded splits a direct DirBlock into multiple blocks
// if needed. It may modify `block`. If a split isn't needed, it
// returns a one-element slice containing `block`. If a split is
// needed, it returns a non-nil offset for the new block.
SplitDirIfNeeded(block *DirBlock) ([]*DirBlock, *StringOffset)
}
// KeyServer fetches/writes server-side key halves from/to the key server.
type KeyServer interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID kbfscrypto.TLFCryptKeyServerHalfID,
cryptPublicKey kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error
// Shutdown is called to free any KeyServer resources.
Shutdown()
}
// NodeChange represents a change made to a node as part of an atomic
// file system operation.
type NodeChange struct {
Node Node
// Basenames of entries added/removed.
DirUpdated []string
FileUpdated []WriteRange
}
// Observer can be notified that there is an available update for a
// given directory. The notification callbacks should not block, or
// make any calls to the Notifier interface. Nodes passed to the
// observer should not be held past the end of the notification
// callback.
type Observer interface {
// LocalChange announces that the file at this Node has been
// updated locally, but not yet saved at the server.
LocalChange(ctx context.Context, node Node, write WriteRange)
// BatchChanges announces that the nodes have all been updated
// together atomically. Each NodeChange in `changes` affects the
// same top-level folder and branch. `allAffectedNodeIDs` is a
// list of all the nodes that had their underlying data changed,
// even if it wasn't an user-visible change (e.g., if a
// subdirectory was updated, the directory block for the TLF root
// is updated but that wouldn't be visible to a user).
BatchChanges(ctx context.Context, changes []NodeChange,
allAffectedNodeIDs []NodeID)
// TlfHandleChange announces that the handle of the corresponding
// folder branch has changed, likely due to previously-unresolved
// assertions becoming resolved. This indicates that the listener
// should switch over any cached paths for this folder-branch to
// the new name. Nodes that were acquired under the old name will
// still continue to work, but new lookups on the old name may
// either encounter alias errors or entirely new TLFs (in the case
// of conflicts).
TlfHandleChange(ctx context.Context, newHandle *TlfHandle)
}
// Notifier notifies registrants of directory changes
type Notifier interface {
// RegisterForChanges declares that the given Observer wants to
// subscribe to updates for the given top-level folders.
RegisterForChanges(folderBranches []FolderBranch, obs Observer) error
// UnregisterFromChanges declares that the given Observer no
// longer wants to subscribe to updates for the given top-level
// folders.
UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error
}
// Clock is an interface for getting the current time
type Clock interface {
// Now returns the current time.
Now() time.Time
}
// ConflictRenamer deals with names for conflicting directory entries.
type ConflictRenamer interface {
// ConflictRename returns the appropriately modified filename.
ConflictRename(ctx context.Context, op op, original string) (
string, error)
}
// Tracer maybe adds traces to contexts.
type Tracer interface {
// MaybeStartTrace, if tracing is on, returns a new context
// based on the given one with an attached trace made with the
// given family and title. Otherwise, it returns the given
// context unchanged.
MaybeStartTrace(ctx context.Context, family, title string) context.Context
// MaybeFinishTrace, finishes the trace attached to the given
// context, if any.
MaybeFinishTrace(ctx context.Context, err error)
}
// InitMode encapsulates mode differences.
type InitMode interface {
// Type returns the InitModeType of this mode.
Type() InitModeType
// IsTestMode returns whether we are running a test.
IsTestMode() bool
// BlockWorkers returns the number of block workers to run.
BlockWorkers() int
// PrefetchWorkers returns the number of prefetch workers to run.
PrefetchWorkers() int
// RekeyWorkers returns the number of rekey workers to run.
RekeyWorkers() int
// RekeyQueueSize returns the size of the rekey queue.
RekeyQueueSize() int
// DirtyBlockCacheEnabled indicates if we should run a dirty block
// cache.
DirtyBlockCacheEnabled() bool
// BackgroundFlushesEnabled indicates if we should periodically be
// flushing unsynced dirty writes to the server or journal.
BackgroundFlushesEnabled() bool
// MetricsEnabled indicates if we should be collecting metrics.
MetricsEnabled() bool
// ConflictResolutionEnabled indicated if we should be running
// the conflict resolution background process.
ConflictResolutionEnabled() bool
// BlockManagementEnabled indicates whether we should be running
// the block archive/delete background process, and whether we
// should be re-embedding block change blocks in MDs.
BlockManagementEnabled() bool
// QuotaReclamationEnabled indicates whether we should be running
// the quota reclamation background process.
QuotaReclamationEnabled() bool
// QuotaReclamationPeriod indicates how often should each TLF
// should check for quota to reclaim. If the Duration.Seconds()
// == 0, quota reclamation should not run automatically.
QuotaReclamationPeriod() time.Duration
// QuotaReclamationMinUnrefAge indicates the minimum time a block
// must have been unreferenced before it can be reclaimed.
QuotaReclamationMinUnrefAge() time.Duration
// QuotaReclamationMinHeadAge indicates the minimum age of the
// most recently merged MD update before we can run reclamation,
// to avoid conflicting with a currently active writer.
QuotaReclamationMinHeadAge() time.Duration
// NodeCacheEnabled indicates whether we should be caching data nodes.
NodeCacheEnabled() bool
// TLFUpdatesEnabled indicates whether we should be registering
// ourselves with the mdserver for TLF updates.
TLFUpdatesEnabled() bool
// KBFSServiceEnabled indicates whether we should launch a local
// service for answering incoming KBFS-related RPCs.
KBFSServiceEnabled() bool
// JournalEnabled indicates whether this mode supports a journal.
JournalEnabled() bool
// UnmergedTLFsEnabled indicates whether it's possible for a
// device in this mode to have unmerged TLFs.
UnmergedTLFsEnabled() bool
// ServiceKeepaliveEnabled indicates whether we need to send
// keepalive probes to the Keybase service daemon.
ServiceKeepaliveEnabled() bool
// TLFEditHistoryEnabled indicates whether we should be running
// the background TLF edit history process.
TLFEditHistoryEnabled() bool
// SendEditNotificationsEnabled indicates whether we should send
// edit notifications on FS writes.
SendEditNotificationsEnabled() bool
// ClientType indicates the type we should advertise to the
// Keybase service.
ClientType() keybase1.ClientType
}
type initModeGetter interface {
// Mode indicates how KBFS is configured to run.
Mode() InitMode
// IsTestMode() inidicates whether KBFS is running in a test.
IsTestMode() bool
}
// Config collects all the singleton instance instantiations needed to
// run KBFS in one place. The methods below are self-explanatory and
// do not require comments.
type Config interface {
dataVersioner
logMaker
blockCacher
blockServerGetter
codecGetter
cryptoPureGetter
keyGetterGetter
cryptoGetter
chatGetter
signerGetter
currentSessionGetterGetter
diskBlockCacheGetter
diskBlockCacheSetter
clockGetter
diskLimiterGetter
syncedTlfGetterSetter
initModeGetter
Tracer
KBFSOps() KBFSOps
SetKBFSOps(KBFSOps)
KBPKI() KBPKI
SetKBPKI(KBPKI)
KeyManager() KeyManager
SetKeyManager(KeyManager)
Reporter() Reporter
SetReporter(Reporter)
MDCache() MDCache
SetMDCache(MDCache)
KeyCache() KeyCache
SetKeyBundleCache(kbfsmd.KeyBundleCache)
KeyBundleCache() kbfsmd.KeyBundleCache
SetKeyCache(KeyCache)
SetBlockCache(BlockCache)
DirtyBlockCache() DirtyBlockCache
SetDirtyBlockCache(DirtyBlockCache)
SetCrypto(Crypto)
SetChat(Chat)
SetCodec(kbfscodec.Codec)
MDOps() MDOps
SetMDOps(MDOps)
KeyOps() KeyOps
SetKeyOps(KeyOps)
BlockOps() BlockOps
SetBlockOps(BlockOps)
MDServer() MDServer
SetMDServer(MDServer)
SetBlockServer(BlockServer)
KeyServer() KeyServer
SetKeyServer(KeyServer)
KeybaseService() KeybaseService
SetKeybaseService(KeybaseService)
BlockSplitter() BlockSplitter
SetBlockSplitter(BlockSplitter)
Notifier() Notifier
SetNotifier(Notifier)
SetClock(Clock)
ConflictRenamer() ConflictRenamer
SetConflictRenamer(ConflictRenamer)
UserHistory() *kbfsedits.UserHistory
SetUserHistory(*kbfsedits.UserHistory)
MetadataVersion() kbfsmd.MetadataVer
SetMetadataVersion(kbfsmd.MetadataVer)
DefaultBlockType() keybase1.BlockType
SetDefaultBlockType(blockType keybase1.BlockType)
RekeyQueue() RekeyQueue
SetRekeyQueue(RekeyQueue)
// ReqsBufSize indicates the number of read or write operations
// that can be buffered per folder
ReqsBufSize() int
// MaxNameBytes indicates the maximum supported size of a
// directory entry name in bytes.
MaxNameBytes() uint32
// DoBackgroundFlushes says whether we should periodically try to
// flush dirty files, even without a sync from the user. Should
// be true except for during some testing.
DoBackgroundFlushes() bool
SetDoBackgroundFlushes(bool)
// RekeyWithPromptWaitTime indicates how long to wait, after
// setting the rekey bit, before prompting for a paper key.
RekeyWithPromptWaitTime() time.Duration
SetRekeyWithPromptWaitTime(time.Duration)
// PrefetchStatus returns the prefetch status of a block.
PrefetchStatus(context.Context, tlf.ID, BlockPointer) PrefetchStatus
// GracePeriod specifies a grace period for which a delayed cancellation
// waits before actual cancels the context. This is useful for giving
// critical portion of a slow remote operation some extra time to finish as
// an effort to avoid conflicting. Example include an O_EXCL Create call
// interrupted by ALRM signal actually makes it to the server, while
// application assumes not since EINTR is returned. A delayed cancellation
// allows us to distinguish between successful cancel (where remote operation
// didn't make to server) or failed cancel (where remote operation made to
// the server). However, the optimal value of this depends on the network
// conditions. A long grace period for really good network condition would
// just unnecessarily slow down Ctrl-C.
//
// TODO: make this adaptive and self-change over time based on network
// conditions.
DelayedCancellationGracePeriod() time.Duration
SetDelayedCancellationGracePeriod(time.Duration)
// ResetCaches clears and re-initializes all data and key caches.
ResetCaches()
// StorageRoot returns the path to the storage root for this config.
StorageRoot() string
// MetricsRegistry may be nil, which should be interpreted as
// not using metrics at all. (i.e., as if UseNilMetrics were
// set). This differs from how go-metrics treats nil Registry
// objects, which is to use the default registry.
MetricsRegistry() metrics.Registry
SetMetricsRegistry(metrics.Registry)
// SetTraceOptions set the options for tracing (via x/net/trace).
SetTraceOptions(enabled bool)
// TLFValidDuration is the time TLFs are valid before identification needs to be redone.
TLFValidDuration() time.Duration
// SetTLFValidDuration sets TLFValidDuration.
SetTLFValidDuration(time.Duration)
// BGFlushDirOpBatchSize returns the directory op batch size for
// background flushes.
BGFlushDirOpBatchSize() int
// SetBGFlushDirOpBatchSize sets the directory op batch size for
// background flushes.
SetBGFlushDirOpBatchSize(s int)
// BGFlushPeriod returns how long to wait for a batch to fill up
// before syncing a set of changes to the servers.
BGFlushPeriod() time.Duration
// SetBGFlushPeriod sets how long to wait for a batch to fill up
// before syncing a set of changes to the servers.
SetBGFlushPeriod(p time.Duration)
// Shutdown is called to free config resources.
Shutdown(context.Context) error
// CheckStateOnShutdown tells the caller whether or not it is safe
// to check the state of the system on shutdown.
CheckStateOnShutdown() bool
// GetRekeyFSMLimiter returns the global rekey FSM limiter.
GetRekeyFSMLimiter() *OngoingWorkLimiter
// RootNodeWrappers returns the set of root node wrapper functions
// that will be applied to each newly-created root node.
RootNodeWrappers() []func(Node) Node
// AddRootNodeWrapper adds a new wrapper function that will be
// applied whenever a root Node is created. This will only apply
// to TLFs that are first accessed after `AddRootNodeWrapper` is
// called.
AddRootNodeWrapper(func(Node) Node)
}
// NodeCache holds Nodes, and allows libkbfs to update them when
// things change about the underlying KBFS blocks. It is probably
// most useful to instantiate this on a per-folder-branch basis, so
// that it can create a Path with the correct DirId and Branch name.
type NodeCache interface {
// GetOrCreate either makes a new Node for the given
// BlockPointer, or returns an existing one. TODO: If we ever
// support hard links, we will have to revisit the "name" and
// "parent" parameters here. name must not be empty. Returns
// an error if parent cannot be found.
GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error)
// Get returns the Node associated with the given ptr if one
// already exists. Otherwise, it returns nil.
Get(ref BlockRef) Node
// UpdatePointer updates the BlockPointer for the corresponding
// Node. NodeCache ignores this call when oldRef is not cached in
// any Node. Returns whether the ID of the node that was updated,
// or `nil` if nothing was updated.
UpdatePointer(oldRef BlockRef, newPtr BlockPointer) NodeID
// Move swaps the parent node for the corresponding Node, and
// updates the node's name. NodeCache ignores the call when ptr
// is not cached. If newParent is nil, it treats the ptr's
// corresponding node as being unlinked from the old parent
// completely. If successful, it returns a function that can be
// called to undo the effect of the move (or `nil` if nothing
// needs to be done); if newParent cannot be found, it returns an
// error and a `nil` undo function.
Move(ref BlockRef, newParent Node, newName string) (
undoFn func(), err error)
// Unlink set the corresponding node's parent to nil and caches
// the provided path in case the node is still open. NodeCache
// ignores the call when ptr is not cached. The path is required
// because the caller may have made changes to the parent nodes
// already that shouldn't be reflected in the cached path. It
// returns a function that can be called to undo the effect of the
// unlink (or `nil` if nothing needs to be done).
Unlink(ref BlockRef, oldPath path, oldDe DirEntry) (undoFn func())
// IsUnlinked returns whether `Unlink` has been called for the
// reference behind this node.
IsUnlinked(node Node) bool
// UnlinkedDirEntry returns a directory entry if `Unlink` has been
// called for the reference behind this node.
UnlinkedDirEntry(node Node) DirEntry
// UpdateUnlinkedDirEntry modifies a cached directory entry for a
// node that has already been unlinked.
UpdateUnlinkedDirEntry(node Node, newDe DirEntry)
// PathFromNode creates the path up to a given Node.
PathFromNode(node Node) path
// AllNodes returns the complete set of nodes currently in the
// cache. The returned Nodes are not wrapped, and shouldn't be
// used for data access.
AllNodes() []Node
// AllNodeChildren returns the complete set of nodes currently in
// the cache, for which the given node `n` is a parent (direct or
// indirect). The returned slice does not include `n` itself.
// The returned Nodes are not wrapped, and shouldn't be used for
// data access.
AllNodeChildren(n Node) []Node
// AddRootWrapper adds a new wrapper function that will be applied
// whenever a root Node is created.
AddRootWrapper(func(Node) Node)
}
// fileBlockDeepCopier fetches a file block, makes a deep copy of it
// (duplicating pointer for any indirect blocks) and generates a new
// random temporary block ID for it. It returns the new BlockPointer,
// and internally saves the block for future uses.
type fileBlockDeepCopier func(context.Context, string, BlockPointer) (
BlockPointer, error)
// crAction represents a specific action to take as part of the
// conflict resolution process.
type crAction interface {
// swapUnmergedBlock should be called before do(), and if it
// returns true, the caller must use the merged block
// corresponding to the returned BlockPointer instead of
// unmergedBlock when calling do(). If BlockPointer{} is zeroPtr
// (and true is returned), just swap in the regular mergedBlock.
swapUnmergedBlock(
ctx context.Context, unmergedChains, mergedChains *crChains,
unmergedDir *dirData) (bool, BlockPointer, error)
// do modifies the given merged `dirData` in place to resolve the
// conflict, and potentially uses the provided
// `fileBlockDeepCopier`s to obtain copies of other blocks (along
// with new BlockPointers) when requiring a block copy. It
// returns a set of block infos that need to be unreferenced as
// part of this conflict resolution.
do(
ctx context.Context, unmergedCopier, mergedCopier fileBlockDeepCopier,
unmergedDir, mergedDir *dirData) (unrefs []BlockInfo, err error)
// updateOps potentially modifies, in place, the slices of
// unmerged and merged operations stored in the corresponding
// crChains for the given unmerged and merged most recent
// pointers. Eventually, the "unmerged" ops will be pushed as
// part of a MD update, and so should contain any necessarily
// operations to fully merge the unmerged data, including any
// conflict resolution. The "merged" ops will be played through
// locally, to notify any caches about the newly-obtained merged
// data (and any changes to local data that were required as part
// of conflict resolution, such as renames). A few things to note:
// * A particular action's updateOps method may be called more than
// once for different sets of chains, however it should only add
// new directory operations (like create/rm/rename) into directory
// chains.
// * updateOps doesn't necessarily result in correct BlockPointers within
// each of those ops; that must happen in a later phase.
// * mergedDir can be nil if the chain is for a file.
updateOps(
ctx context.Context, unmergedMostRecent, mergedMostRecent BlockPointer,
unmergedDir, mergedDir *dirData,
unmergedChains, mergedChains *crChains) error
// String returns a string representation for this crAction, used
// for debugging.
String() string
}
// RekeyQueue is a managed queue of folders needing some rekey action taken
// upon them by the current client.
type RekeyQueue interface {
// Enqueue enqueues a folder for rekey action. If the TLF is already in the
// rekey queue, the error channel of the existing one is returned.
Enqueue(tlf.ID)
// IsRekeyPending returns true if the given folder is in the rekey queue.
// Note that an ongoing rekey doesn't count as "pending".
IsRekeyPending(tlf.ID) bool
// Shutdown cancels all pending rekey actions and clears the queue. It
// doesn't cancel ongoing rekeys. After Shutdown() is called, the same
// RekeyQueue shouldn't be used anymore.
Shutdown()
}
// RekeyFSM is a Finite State Machine (FSM) for housekeeping rekey states for a
// FolderBranch. Each FolderBranch has its own FSM for rekeys.
//
// See rekey_fsm.go for implementation details.
//
// TODO: report FSM status in FolderBranchStatus?
type RekeyFSM interface {
// Event sends an event to the FSM.
Event(event RekeyEvent)
// Shutdown shuts down the FSM. No new event should be sent into the FSM
// after this method is called.
Shutdown()
// listenOnEvent adds a listener (callback) to the FSM so that when
// event happens, callback is called with the received event. If repeatedly
// is set to false, callback is called only once. Otherwise it's called every
// time event happens.
//
// Currently this is only used in tests and for RekeyFile. See comment for
// RequestRekeyAndWaitForOneFinishEvent for more details.
listenOnEvent(
event rekeyEventType, callback func(RekeyEvent), repeatedly bool)
}
// BlockRetriever specifies how to retrieve blocks.
type BlockRetriever interface {
// Request retrieves blocks asynchronously.
Request(ctx context.Context, priority int, kmd KeyMetadata,
ptr BlockPointer, block Block, lifetime BlockCacheLifetime) <-chan error
// RequestNoPrefetch retrieves blocks asynchronously, but doesn't trigger a
// prefetch unless the block had to be retrieved from the server.
RequestNoPrefetch(ctx context.Context, priority int, kmd KeyMetadata,
ptr BlockPointer, block Block, lifetime BlockCacheLifetime) <-chan error
// PutInCaches puts the block into the in-memory cache, and ensures that
// the disk cache metadata is updated.
PutInCaches(ctx context.Context, ptr BlockPointer, tlfID tlf.ID,
block Block, lifetime BlockCacheLifetime,
prefetchStatus PrefetchStatus) error
// TogglePrefetcher creates a new prefetcher.
TogglePrefetcher(enable bool, syncCh <-chan struct{}) <-chan struct{}
}
// ChatChannelNewMessageCB is a callback function that can be called
// when there's a new message on a given conversation.
type ChatChannelNewMessageCB func(convID chat1.ConversationID, body string)
// Chat specifies a minimal interface for Keybase chatting.
type Chat interface {
// GetConversationID returns the chat conversation ID associated
// with the given TLF name, type, chat type and channel name.
GetConversationID(
ctx context.Context, tlfName tlf.CanonicalName, tlfType tlf.Type,
channelName string, chatType chat1.TopicType) (
chat1.ConversationID, error)
// SendTextMessage (asynchronously) sends a text chat message to
// the given conversation and channel.
SendTextMessage(
ctx context.Context, tlfName tlf.CanonicalName, tlfType tlf.Type,
convID chat1.ConversationID, body string) error
// GetGroupedInbox returns the TLFs with the most-recent chat
// messages of the given type, up to `maxChats` of them.
GetGroupedInbox(
ctx context.Context, chatType chat1.TopicType, maxChats int) (
[]*TlfHandle, error)
// GetChannels returns a list of all the channels for a given
// chat. The entries in `convIDs` and `channelNames` have a 1-to-1
// correspondence.
GetChannels(
ctx context.Context, tlfName tlf.CanonicalName, tlfType tlf.Type,
chatType chat1.TopicType) (
convIDs []chat1.ConversationID, channelNames []string, err error)
// ReadChannel returns a set of text messages from a channel, and
// a `nextPage` pointer to the following set of messages. If the
// given `startPage` is non-nil, it's used to specify the starting
// point for the set of messages returned.
ReadChannel(
ctx context.Context, convID chat1.ConversationID, startPage []byte) (
messages []string, nextPage []byte, err error)
// RegisterForMessages registers a callback that will be called
// for each new messages that reaches convID.
RegisterForMessages(convID chat1.ConversationID, cb ChatChannelNewMessageCB)
// ClearCache is called to force this instance to forget
// everything it might have cached, e.g. when a user logs out.
ClearCache()
}
| 1 | 20,318 | This is probably overkill since we don't actually need to pass this specific interface anywhere, but I don't mind it! | keybase-kbfs | go |
@@ -56,6 +56,8 @@ func (d *Driver) AddPeer(ctx context.Context, addr swarm.Address) error {
d.mtx.Unlock()
return nil
}
+
+ d.receivedPeers[addr.ByteString()] = struct{}{}
d.mtx.Unlock()
connectedPeers := d.p2pService.Peers() | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package full
import (
"context"
"math/rand"
"sync"
"time"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/discovery"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var _ topology.Driver = (*Driver)(nil)
// Driver drives the connectivity between nodes. It is a basic implementation of a connectivity Driver.
// that enabled full connectivity in the sense that:
// - Every peer which is added to the Driver gets broadcasted to every other peer regardless of its address.
// - A random peer is picked when asking for a peer to retrieve an arbitrary chunk (Peerer interface).
type Driver struct {
discovery discovery.Driver
addressBook addressbook.GetPutter
p2pService p2p.Service
receivedPeers map[string]struct{} // track already received peers. Note: implement cleanup or expiration if needed to stop infinite grow
mtx sync.Mutex // guards received peers
logger logging.Logger
}
func New(disc discovery.Driver, addressBook addressbook.GetPutter, p2pService p2p.Service, logger logging.Logger) *Driver {
return &Driver{
discovery: disc,
addressBook: addressBook,
p2pService: p2pService,
receivedPeers: make(map[string]struct{}),
logger: logger,
}
}
// AddPeer adds a new peer to the topology driver.
// The peer would be subsequently broadcasted to all connected peers.
// All conneceted peers are also broadcasted to the new peer.
func (d *Driver) AddPeer(ctx context.Context, addr swarm.Address) error {
d.mtx.Lock()
if _, ok := d.receivedPeers[addr.ByteString()]; ok {
d.mtx.Unlock()
return nil
}
d.mtx.Unlock()
connectedPeers := d.p2pService.Peers()
ma, exists := d.addressBook.Get(addr)
if !exists {
return topology.ErrNotFound
}
if !isConnected(addr, connectedPeers) {
peerAddr, err := d.p2pService.Connect(ctx, ma)
if err != nil {
return err
}
// update addr if it is wrong or it has been changed
if !addr.Equal(peerAddr) {
addr = peerAddr
d.addressBook.Put(peerAddr, ma)
}
}
connectedAddrs := []swarm.Address{}
for _, addressee := range connectedPeers {
// skip newly added peer
if addressee.Address.Equal(addr) {
continue
}
connectedAddrs = append(connectedAddrs, addressee.Address)
if err := d.discovery.BroadcastPeers(context.Background(), addressee.Address, addr); err != nil {
return err
}
}
if len(connectedAddrs) == 0 {
return nil
}
if err := d.discovery.BroadcastPeers(context.Background(), addr, connectedAddrs...); err != nil {
return err
}
d.mtx.Lock()
d.receivedPeers[addr.ByteString()] = struct{}{}
d.mtx.Unlock()
return nil
}
// ChunkPeer is used to suggest a peer to ask a certain chunk from.
func (d *Driver) ChunkPeer(addr swarm.Address) (peerAddr swarm.Address, err error) {
connectedPeers := d.p2pService.Peers()
if len(connectedPeers) == 0 {
return swarm.Address{}, topology.ErrNotFound
}
itemIdx := rand.Intn(len(connectedPeers))
i := 0
for _, v := range connectedPeers {
if i == itemIdx {
return v.Address, nil
}
i++
}
return swarm.Address{}, topology.ErrNotFound
}
func isConnected(addr swarm.Address, connectedPeers []p2p.Peer) bool {
for _, p := range connectedPeers {
if p.Address.Equal(addr) {
return true
}
}
return false
}
| 1 | 9,057 | @janos I moved this back here, to avoid having multiple add peer calls being handled. I think it's either this or a global lock across the whole function. I think this is a better approach, especially since `hive.Broadcast` is blocking . We can remove the map entry on errors if needed. LMKWYT? | ethersphere-bee | go |
@@ -0,0 +1,19 @@
+<% if display_response_actions?(cart, current_user) %>
+ <div id='reply-section'>
+ <p>
+ <%= link_to "Approve",
+ approval_response_path(approver_action: 'approve',
+ cart_id: cart.id,
+ scheme: 'https',
+ user_id: current_user.id,
+ ), class: 'form-button' %>
+
+ <%= link_to "Reject",
+ approval_response_path(approver_action: 'reject',
+ cart_id: cart.id,
+ scheme: 'https',
+ user_id: current_user.id,
+ ), class: 'reject-link' %>
+ </p>
+ </div>
+<% end %> | 1 | 1 | 12,431 | Minor: can we put the conditional outside of the partial? | 18F-C2 | rb |
|
@@ -887,7 +887,8 @@ class AsyncSniffer(object):
if isinstance(offline, Packet):
tempfile_written, offline = _write_to_pcap([offline])
- elif isinstance(offline, list) and \
+ elif (isinstance(offline, list) or
+ isinstance(offline, PacketList)) and \
all(isinstance(elt, Packet) for elt in offline):
tempfile_written, offline = _write_to_pcap(offline)
| 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Functions to send and receive packets.
"""
from __future__ import absolute_import, print_function
from collections import namedtuple
import itertools
from threading import Thread, Event
import os
import re
import subprocess
import time
import types
from scapy.compat import plain_str
from scapy.data import ETH_P_ALL
from scapy.config import conf
from scapy.error import warning
from scapy.interfaces import network_name, resolve_iface
from scapy.packet import Gen, Packet
from scapy.utils import get_temp_file, tcpdump, wrpcap, \
ContextManagerSubprocess, PcapReader
from scapy.plist import PacketList, SndRcvList
from scapy.error import log_runtime, log_interactive, Scapy_Exception
from scapy.base_classes import SetGen
from scapy.modules import six
from scapy.modules.six.moves import map
from scapy.sessions import DefaultSession
from scapy.supersocket import SuperSocket
if conf.route is None:
# unused import, only to initialize conf.route and conf.iface*
import scapy.route # noqa: F401
#################
# Debug class #
#################
class debug:
recv = []
sent = []
match = []
crashed_on = None
####################
# Send / Receive #
####################
QueryAnswer = namedtuple("QueryAnswer", ["query", "answer"])
_DOC_SNDRCV_PARAMS = """
:param pks: SuperSocket instance to send/receive packets
:param pkt: the packet to send
:param rcv_pks: if set, will be used instead of pks to receive packets.
packets will still be sent through pks
:param nofilter: put 1 to avoid use of BPF filters
:param retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets
are answered
:param timeout: how much time to wait after the last packet has been sent
:param verbose: set verbosity level
:param multi: whether to accept multiple answers for the same stimulus
:param prebuild: pre-build the packets before starting to send them.
Automatically enabled when a generator is passed as the packet
"""
class SndRcvHandler(object):
"""
Util to send/receive packets, used by sr*().
Do not use directly.
This matches the requests and answers.
Notes::
- threaded mode: enabling threaded mode will likely
break packet timestamps, but might result in a speedup
when sending a big amount of packets. Disabled by default
- DEVS: store the outgoing timestamp right BEFORE sending the packet
to avoid races that could result in negative latency. We aren't Stadia
"""
def __init__(self, pks, pkt,
timeout=None, inter=0, verbose=None,
chainCC=False,
retry=0, multi=False, rcv_pks=None,
prebuild=False, _flood=None,
threaded=False,
session=None):
# Instantiate all arguments
if verbose is None:
verbose = conf.verb
if conf.debug_match:
debug.recv = PacketList([], "Received")
debug.sent = PacketList([], "Sent")
debug.match = SndRcvList([], "Matched")
self.nbrecv = 0
self.ans = []
self.pks = pks
self.rcv_pks = rcv_pks or pks
self.inter = inter
self.verbose = verbose
self.chainCC = chainCC
self.multi = multi
self.timeout = timeout
self.session = session
# Instantiate packet holders
if _flood:
self.tobesent = pkt
self.notans = _flood[0]
else:
if isinstance(pkt, types.GeneratorType) or prebuild:
self.tobesent = [p for p in pkt]
self.notans = len(self.tobesent)
else:
self.tobesent = (
SetGen(pkt) if not isinstance(pkt, Gen) else pkt
)
self.notans = self.tobesent.__iterlen__()
if retry < 0:
autostop = retry = -retry
else:
autostop = 0
if timeout is not None and timeout < 0:
self.timeout = None
while retry >= 0:
self.hsent = {}
if threaded or _flood:
# Send packets in thread.
# https://github.com/secdev/scapy/issues/1791
snd_thread = Thread(
target=self._sndrcv_snd
)
snd_thread.setDaemon(True)
# Start routine with callback
self._sndrcv_rcv(snd_thread.start)
# Ended. Let's close gracefully
if _flood:
# Flood: stop send thread
_flood[1]()
snd_thread.join()
else:
self._sndrcv_rcv(self._sndrcv_snd)
if multi:
remain = [
p for p in itertools.chain(*six.itervalues(self.hsent))
if not hasattr(p, '_answered')
]
else:
remain = list(itertools.chain(*six.itervalues(self.hsent)))
if autostop and len(remain) > 0 and \
len(remain) != len(self.tobesent):
retry = autostop
self.tobesent = remain
if len(self.tobesent) == 0:
break
retry -= 1
if conf.debug_match:
debug.sent = PacketList(remain[:], "Sent")
debug.match = SndRcvList(self.ans[:])
# Clean the ans list to delete the field _answered
if multi:
for snd, _ in self.ans:
if hasattr(snd, '_answered'):
del snd._answered
if verbose:
print(
"\nReceived %i packets, got %i answers, "
"remaining %i packets" % (
self.nbrecv + len(self.ans), len(self.ans), self.notans
)
)
self.ans_result = SndRcvList(self.ans)
self.unans_result = PacketList(remain, "Unanswered")
def results(self):
return self.ans_result, self.unans_result
def _sndrcv_snd(self):
"""Function used in the sending thread of sndrcv()"""
try:
if self.verbose:
print("Begin emission:")
i = 0
for p in self.tobesent:
# Populate the dictionary of _sndrcv_rcv
# _sndrcv_rcv won't miss the answer of a packet that
# has not been sent
self.hsent.setdefault(p.hashret(), []).append(p)
# Send packet
self.pks.send(p)
time.sleep(self.inter)
i += 1
if self.verbose:
print("Finished sending %i packets." % i)
except SystemExit:
pass
except Exception:
log_runtime.exception("--- Error sending packets")
def _process_packet(self, r):
"""Internal function used to process each packet."""
if r is None:
return
ok = False
h = r.hashret()
if h in self.hsent:
hlst = self.hsent[h]
for i, sentpkt in enumerate(hlst):
if r.answers(sentpkt):
self.ans.append(QueryAnswer(sentpkt, r))
if self.verbose > 1:
os.write(1, b"*")
ok = True
if not self.multi:
del hlst[i]
self.notans -= 1
else:
if not hasattr(sentpkt, '_answered'):
self.notans -= 1
sentpkt._answered = 1
break
if self.notans <= 0 and not self.multi:
self.sniffer.stop(join=False)
if not ok:
if self.verbose > 1:
os.write(1, b".")
self.nbrecv += 1
if conf.debug_match:
debug.recv.append(r)
def _sndrcv_rcv(self, callback):
"""Function used to receive packets and check their hashret"""
self.sniffer = None
try:
self.sniffer = AsyncSniffer()
self.sniffer._run(
prn=self._process_packet,
timeout=self.timeout,
store=False,
opened_socket=self.pks,
session=self.session,
started_callback=callback
)
except KeyboardInterrupt:
if self.chainCC:
raise
def sndrcv(*args, **kwargs):
"""Scapy raw function to send a packet and receive its answer.
WARNING: This is an internal function. Using sr/srp/sr1/srp is
more appropriate in many cases.
"""
sndrcver = SndRcvHandler(*args, **kwargs)
return sndrcver.results()
def __gen_send(s, x, inter=0, loop=0, count=None, verbose=None, realtime=None, return_packets=False, *args, **kargs): # noqa: E501
if isinstance(x, str):
x = conf.raw_layer(load=x)
if not isinstance(x, Gen):
x = SetGen(x)
if verbose is None:
verbose = conf.verb
n = 0
if count is not None:
loop = -count
elif not loop:
loop = -1
if return_packets:
sent_packets = PacketList()
try:
while loop:
dt0 = None
for p in x:
if realtime:
ct = time.time()
if dt0:
st = dt0 + float(p.time) - ct
if st > 0:
time.sleep(st)
else:
dt0 = ct - float(p.time)
s.send(p)
if return_packets:
sent_packets.append(p)
n += 1
if verbose:
os.write(1, b".")
time.sleep(inter)
if loop < 0:
loop += 1
except KeyboardInterrupt:
pass
if verbose:
print("\nSent %i packets." % n)
if return_packets:
return sent_packets
def _send(x, _func, inter=0, loop=0, iface=None, count=None,
verbose=None, realtime=None,
return_packets=False, socket=None, **kargs):
"""Internal function used by send and sendp"""
need_closing = socket is None
iface = resolve_iface(iface or conf.iface)
socket = socket or _func(iface)(iface=iface, **kargs)
results = __gen_send(socket, x, inter=inter, loop=loop,
count=count, verbose=verbose,
realtime=realtime, return_packets=return_packets)
if need_closing:
socket.close()
return results
@conf.commands.register
def send(x, iface=None, *args, **kargs):
"""
Send packets at layer 3
:param x: the packets
:param inter: time (in s) between two packets (default 0)
:param loop: send packet indefinetly (default 0)
:param count: number of packets to send (default None=1)
:param verbose: verbose mode (default None=conf.verbose)
:param realtime: check that a packet was sent before sending the next one
:param return_packets: return the sent packets
:param socket: the socket to use (default is conf.L3socket(kargs))
:param iface: the interface to send the packets on
:param monitor: (not on linux) send in monitor mode
:returns: None
"""
iface = _interface_selection(iface, x)
return _send(
x,
lambda iface: iface.l3socket(), iface=iface,
*args, **kargs
)
@conf.commands.register
def sendp(x, iface=None, iface_hint=None, socket=None, *args, **kargs):
"""
Send packets at layer 2
:param x: the packets
:param inter: time (in s) between two packets (default 0)
:param loop: send packet indefinetly (default 0)
:param count: number of packets to send (default None=1)
:param verbose: verbose mode (default None=conf.verbose)
:param realtime: check that a packet was sent before sending the next one
:param return_packets: return the sent packets
:param socket: the socket to use (default is conf.L3socket(kargs))
:param iface: the interface to send the packets on
:param monitor: (not on linux) send in monitor mode
:returns: None
"""
if iface is None and iface_hint is not None and socket is None:
iface = conf.route.route(iface_hint)[0]
return _send(
x,
lambda iface: iface.l2socket(),
*args,
iface=iface,
socket=socket,
**kargs
)
@conf.commands.register
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None, replay_args=None, # noqa: E501
parse_results=False):
"""Send packets at layer 2 using tcpreplay for performance
:param pps: packets per second
:param mpbs: MBits per second
:param realtime: use packet's timestamp, bending time with real-time value
:param loop: number of times to process the packet list
:param file_cache: cache packets in RAM instead of reading from
disk at each iteration
:param iface: output interface
:param replay_args: List of additional tcpreplay args (List[str])
:param parse_results: Return a dictionary of information
outputted by tcpreplay (default=False)
:returns: stdout, stderr, command used
"""
if iface is None:
iface = conf.iface
argv = [conf.prog.tcpreplay, "--intf1=%s" % network_name(iface)]
if pps is not None:
argv.append("--pps=%i" % pps)
elif mbps is not None:
argv.append("--mbps=%f" % mbps)
elif realtime is not None:
argv.append("--multiplier=%f" % realtime)
else:
argv.append("--topspeed")
if loop:
argv.append("--loop=%i" % loop)
if file_cache:
argv.append("--preload-pcap")
# Check for any additional args we didn't cover.
if replay_args is not None:
argv.extend(replay_args)
f = get_temp_file()
argv.append(f)
wrpcap(f, x)
results = None
with ContextManagerSubprocess(conf.prog.tcpreplay):
try:
cmd = subprocess.Popen(argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
except Exception:
os.unlink(f)
raise
else:
stdout, stderr = cmd.communicate()
if stderr:
log_runtime.warning(stderr.decode())
if parse_results:
results = _parse_tcpreplay_result(stdout, stderr, argv)
elif conf.verb > 2:
log_runtime.info(stdout.decode())
os.unlink(f)
return results
def _parse_tcpreplay_result(stdout, stderr, argv):
"""
Parse the output of tcpreplay and modify the results_dict to populate output information. # noqa: E501
Tested with tcpreplay v3.4.4
Tested with tcpreplay v4.1.2
:param stdout: stdout of tcpreplay subprocess call
:param stderr: stderr of tcpreplay subprocess call
:param argv: the command used in the subprocess call
:return: dictionary containing the results
"""
try:
results = {}
stdout = plain_str(stdout).lower()
stderr = plain_str(stderr).strip().split("\n")
elements = {
"actual": (int, int, float),
"rated": (float, float, float),
"flows": (int, float, int, int),
"attempted": (int,),
"successful": (int,),
"failed": (int,),
"truncated": (int,),
"retried packets (eno": (int,),
"retried packets (eag": (int,),
}
multi = {
"actual": ("packets", "bytes", "time"),
"rated": ("bps", "mbps", "pps"),
"flows": ("flows", "fps", "flow_packets", "non_flow"),
"retried packets (eno": ("retried_enobufs",),
"retried packets (eag": ("retried_eagain",),
}
float_reg = r"([0-9]*\.[0-9]+|[0-9]+)"
int_reg = r"([0-9]+)"
any_reg = r"[^0-9]*"
r_types = {int: int_reg, float: float_reg}
for line in stdout.split("\n"):
line = line.strip()
for elt, _types in elements.items():
if line.startswith(elt):
regex = any_reg.join([r_types[x] for x in _types])
matches = re.search(regex, line)
for i, typ in enumerate(_types):
name = multi.get(elt, [elt])[i]
results[name] = typ(matches.group(i + 1))
results["command"] = " ".join(argv)
results["warnings"] = stderr[:-1]
return results
except Exception as parse_exception:
if not conf.interactive:
raise
log_runtime.error("Error parsing output: %s", parse_exception)
return {}
@conf.commands.register
def sr(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs):
"""
Send and receive packets at layer 3
"""
s = conf.L3socket(promisc=promisc, filter=filter,
iface=iface, nofilter=nofilter)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
def _interface_selection(iface, packet):
"""
Select the network interface according to the layer 3 destination
"""
if iface is None:
try:
iff = packet.route()[0]
except AttributeError:
iff = None
return iff or conf.iface
return iface
@conf.commands.register
def sr1(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs):
"""
Send packets at layer 3 and return only the first answer
"""
iface = _interface_selection(iface, x)
s = conf.L3socket(promisc=promisc, filter=filter,
nofilter=nofilter, iface=iface)
ans, _ = sndrcv(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
@conf.commands.register
def srp(x, promisc=None, iface=None, iface_hint=None, filter=None,
nofilter=0, type=ETH_P_ALL, *args, **kargs):
"""
Send and receive packets at layer 2
"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
iface = resolve_iface(iface or conf.iface)
s = iface.l2socket()(promisc=promisc, iface=iface,
filter=filter, nofilter=nofilter, type=type)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
@conf.commands.register
def srp1(*args, **kargs):
"""
Send and receive packets at layer 2 and return only the first answer
"""
ans, _ = srp(*args, **kargs)
if len(ans) > 0:
return ans[0][1]
# Append doc
for sr_func in [srp, srp1, sr, sr1]:
if sr_func.__doc__ is not None:
sr_func.__doc__ += _DOC_SNDRCV_PARAMS
# SEND/RECV LOOP METHODS
def __sr_loop(srfunc, pkts, prn=lambda x: x[1].summary(),
prnfail=lambda x: x.summary(),
inter=1, timeout=None, count=None, verbose=None, store=1,
*args, **kargs):
n = 0
r = 0
ct = conf.color_theme
if verbose is None:
verbose = conf.verb
parity = 0
ans = []
unans = []
if timeout is None:
timeout = min(2 * inter, 5)
try:
while True:
parity ^= 1
col = [ct.even, ct.odd][parity]
if count is not None:
if count == 0:
break
count -= 1
start = time.time()
if verbose > 1:
print("\rsend...\r", end=' ')
res = srfunc(pkts, timeout=timeout, verbose=0, chainCC=True, *args, **kargs) # noqa: E501
n += len(res[0]) + len(res[1])
r += len(res[0])
if verbose > 1 and prn and len(res[0]) > 0:
msg = "RECV %i:" % len(res[0])
print("\r" + ct.success(msg), end=' ')
for p in res[0]:
print(col(prn(p)))
print(" " * len(msg), end=' ')
if verbose > 1 and prnfail and len(res[1]) > 0:
msg = "fail %i:" % len(res[1])
print("\r" + ct.fail(msg), end=' ')
for p in res[1]:
print(col(prnfail(p)))
print(" " * len(msg), end=' ')
if verbose > 1 and not (prn or prnfail):
print("recv:%i fail:%i" % tuple(map(len, res[:2])))
if store:
ans += res[0]
unans += res[1]
end = time.time()
if end - start < inter:
time.sleep(inter + start - end)
except KeyboardInterrupt:
pass
if verbose and n > 0:
print(ct.normal("\nSent %i packets, received %i packets. %3.1f%% hits." % (n, r, 100.0 * r / n))) # noqa: E501
return SndRcvList(ans), PacketList(unans)
@conf.commands.register
def srloop(pkts, *args, **kargs):
"""Send a packet at layer 3 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(sr, pkts, *args, **kargs)
@conf.commands.register
def srploop(pkts, *args, **kargs):
"""Send a packet at layer 2 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(srp, pkts, *args, **kargs)
# SEND/RECV FLOOD METHODS
def sndrcvflood(pks, pkt, inter=0, verbose=None, chainCC=False, timeout=None):
"""sndrcv equivalent for flooding."""
stopevent = Event()
def send_in_loop(tobesent, stopevent):
"""Infinite generator that produces the same
packet until stopevent is triggered."""
while True:
for p in tobesent:
if stopevent.is_set():
return
yield p
infinite_gen = send_in_loop(pkt, stopevent)
_flood_len = pkt.__iterlen__() if isinstance(pkt, Gen) else len(pkt)
_flood = [_flood_len, stopevent.set]
return sndrcv(
pks, infinite_gen,
inter=inter, verbose=verbose,
chainCC=chainCC, timeout=timeout,
_flood=_flood
)
@conf.commands.register
def srflood(x, promisc=None, filter=None, iface=None, nofilter=None, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 3
:param prn: function applied to packets received
:param unique: only consider packets whose print
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
iface = resolve_iface(iface or conf.iface)
s = iface.l3socket()(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) # noqa: E501
r = sndrcvflood(s, x, *args, **kargs)
s.close()
return r
@conf.commands.register
def sr1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 3 and return only the first answer
:param prn: function applied to packets received
:param verbose: set verbosity level
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
iface = resolve_iface(iface or conf.iface)
s = iface.l3socket()(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
@conf.commands.register
def srpflood(x, promisc=None, filter=None, iface=None, iface_hint=None, nofilter=None, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 2
:param prn: function applied to packets received
:param unique: only consider packets whose print
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
iface = resolve_iface(iface or conf.iface)
s = iface.l2socket()(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) # noqa: E501
r = sndrcvflood(s, x, *args, **kargs)
s.close()
return r
@conf.commands.register
def srp1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 2 and return only the first answer
:param prn: function applied to packets received
:param verbose: set verbosity level
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
iface = resolve_iface(iface or conf.iface)
s = iface.l2socket()(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
# SNIFF METHODS
class AsyncSniffer(object):
"""
Sniff packets and return a list of packets.
Args:
count: number of packets to capture. 0 means infinity.
store: whether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned, it
is displayed.
--Ex: prn = lambda x: x.summary()
session: a session = a flow decoder used to handle stream of packets.
--Ex: session=TCPSession
See below for more details.
filter: BPF filter to apply.
lfilter: Python function applied to each packet to determine if
further action may be done.
--Ex: lfilter = lambda x: x.haslayer(Padding)
offline: PCAP file (or list of PCAP files) to read packets from,
instead of sniffing them
quiet: when set to True, the process stderr is discarded
(default: False).
timeout: stop sniffing after a given time (default: None).
L2socket: use the provided L2socket (default: use conf.L2listen).
opened_socket: provide an object (or a list of objects) ready to use
.recv() on.
stop_filter: Python function applied to each packet to determine if
we have to stop the capture after this packet.
--Ex: stop_filter = lambda x: x.haslayer(TCP)
iface: interface or list of interfaces (default: None for sniffing
on all interfaces).
monitor: use monitor mode. May not be available on all OS
started_callback: called as soon as the sniffer starts sniffing
(default: None).
The iface, offline and opened_socket parameters can be either an
element, a list of elements, or a dict object mapping an element to a
label (see examples below).
For more information about the session argument, see
https://scapy.rtfd.io/en/latest/usage.html#advanced-sniffing-sniffing-sessions
Examples: synchronous
>>> sniff(filter="arp")
>>> sniff(filter="tcp",
... session=IPSession, # defragment on-the-flow
... prn=lambda x: x.summary())
>>> sniff(lfilter=lambda pkt: ARP in pkt)
>>> sniff(iface="eth0", prn=Packet.summary)
>>> sniff(iface=["eth0", "mon0"],
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
>>> sniff(iface={"eth0": "Ethernet", "mon0": "Wifi"},
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
Examples: asynchronous
>>> t = AsyncSniffer(iface="enp0s3")
>>> t.start()
>>> time.sleep(1)
>>> print("nice weather today")
>>> t.stop()
"""
def __init__(self, *args, **kwargs):
# Store keyword arguments
self.args = args
self.kwargs = kwargs
self.running = False
self.thread = None
self.results = None
def _setup_thread(self):
# Prepare sniffing thread
self.thread = Thread(
target=self._run,
args=self.args,
kwargs=self.kwargs,
name="AsyncSniffer"
)
self.thread.setDaemon(True)
def _run(self,
count=0, store=True, offline=None,
quiet=False, prn=None, lfilter=None,
L2socket=None, timeout=None, opened_socket=None,
stop_filter=None, iface=None, started_callback=None,
session=None, session_args=[], session_kwargs={},
*arg, **karg):
self.running = True
# Start main thread
# instantiate session
if not isinstance(session, DefaultSession):
session = session or DefaultSession
session = session(prn=prn, store=store,
*session_args, **session_kwargs)
else:
session.prn = prn
session.store = store
# sniff_sockets follows: {socket: label}
sniff_sockets = {}
if opened_socket is not None:
if isinstance(opened_socket, list):
sniff_sockets.update(
(s, "socket%d" % i)
for i, s in enumerate(opened_socket)
)
elif isinstance(opened_socket, dict):
sniff_sockets.update(
(s, label)
for s, label in six.iteritems(opened_socket)
)
else:
sniff_sockets[opened_socket] = "socket0"
if offline is not None:
flt = karg.get('filter')
if isinstance(offline, list) and \
all(isinstance(elt, str) for elt in offline):
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname, args=["-w", "-"], flt=flt, getfd=True)
), fname) for fname in offline)
elif isinstance(offline, dict):
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname, args=["-w", "-"], flt=flt, getfd=True)
), label) for fname, label in six.iteritems(offline))
else:
# Write Scapy Packet objects to a pcap file
def _write_to_pcap(packets_list):
filename = get_temp_file(autoext=".pcap")
wrpcap(filename, offline)
return filename, filename
if isinstance(offline, Packet):
tempfile_written, offline = _write_to_pcap([offline])
elif isinstance(offline, list) and \
all(isinstance(elt, Packet) for elt in offline):
tempfile_written, offline = _write_to_pcap(offline)
sniff_sockets[PcapReader(
offline if flt is None else
tcpdump(offline,
args=["-w", "-"],
flt=flt,
getfd=True,
quiet=quiet)
)] = offline
if not sniff_sockets or iface is not None:
iface = resolve_iface(iface or conf.iface)
if L2socket is None:
L2socket = iface.l2listen()
if isinstance(iface, list):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, *arg, **karg),
ifname)
for ifname in iface
)
elif isinstance(iface, dict):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, *arg, **karg),
iflabel)
for ifname, iflabel in six.iteritems(iface)
)
else:
sniff_sockets[L2socket(type=ETH_P_ALL, iface=iface,
*arg, **karg)] = iface
# Get select information from the sockets
_main_socket = next(iter(sniff_sockets))
select_func = _main_socket.select
_backup_read_func = _main_socket.__class__.recv
nonblocking_socket = _main_socket.nonblocking_socket
# We check that all sockets use the same select(), or raise a warning
if not all(select_func == sock.select for sock in sniff_sockets):
warning("Warning: inconsistent socket types ! "
"The used select function "
"will be the one of the first socket")
if nonblocking_socket:
# select is non blocking
def stop_cb():
self.continue_sniff = False
self.stop_cb = stop_cb
close_pipe = None
else:
# select is blocking: Add special control socket
from scapy.automaton import ObjectPipe
close_pipe = ObjectPipe()
sniff_sockets[close_pipe] = "control_socket"
def stop_cb():
if self.running:
close_pipe.send(None)
self.continue_sniff = False
self.stop_cb = stop_cb
try:
if started_callback:
started_callback()
self.continue_sniff = True
# Start timeout
if timeout is not None:
stoptime = time.time() + timeout
remain = None
while sniff_sockets and self.continue_sniff:
if timeout is not None:
remain = stoptime - time.time()
if remain <= 0:
break
sockets, read_func = select_func(sniff_sockets, remain)
read_func = read_func or _backup_read_func
dead_sockets = []
for s in sockets:
if s is close_pipe:
break
try:
p = read_func(s)
except EOFError:
# End of stream
try:
s.close()
except Exception:
pass
dead_sockets.append(s)
continue
except Exception as ex:
msg = " It was closed."
try:
# Make sure it's closed
s.close()
except Exception as ex:
msg = " close() failed with '%s'" % ex
warning(
"Socket %s failed with '%s'." % (s, ex) + msg
)
dead_sockets.append(s)
if conf.debug_dissector >= 2:
raise
continue
if p is None:
continue
if lfilter and not lfilter(p):
continue
p.sniffed_on = sniff_sockets[s]
# on_packet_received handles the prn/storage
session.on_packet_received(p)
# check
if (stop_filter and stop_filter(p)) or \
(0 < count <= session.count):
self.continue_sniff = False
break
# Removed dead sockets
for s in dead_sockets:
del sniff_sockets[s]
except KeyboardInterrupt:
pass
self.running = False
if opened_socket is None:
for s in sniff_sockets:
s.close()
elif close_pipe:
close_pipe.close()
self.results = session.toPacketList()
def start(self):
"""Starts AsyncSniffer in async mode"""
self._setup_thread()
self.thread.start()
def stop(self, join=True):
"""Stops AsyncSniffer if not in async mode"""
if self.running:
try:
self.stop_cb()
except AttributeError:
raise Scapy_Exception(
"Unsupported (offline or unsupported socket)"
)
if join:
self.join()
return self.results
else:
raise Scapy_Exception("Not started !")
def join(self, *args, **kwargs):
if self.thread:
self.thread.join(*args, **kwargs)
@conf.commands.register
def sniff(*args, **kwargs):
sniffer = AsyncSniffer()
sniffer._run(*args, **kwargs)
return sniffer.results
sniff.__doc__ = AsyncSniffer.__doc__
@conf.commands.register
def bridge_and_sniff(if1, if2, xfrm12=None, xfrm21=None, prn=None, L2socket=None, # noqa: E501
*args, **kargs):
"""Forward traffic between interfaces if1 and if2, sniff and return
the exchanged packets.
:param if1: the interfaces to use (interface names or opened sockets).
:param if2:
:param xfrm12: a function to call when forwarding a packet from if1 to
if2. If it returns True, the packet is forwarded as it. If it
returns False or None, the packet is discarded. If it returns a
packet, this packet is forwarded instead of the original packet
one.
:param xfrm21: same as xfrm12 for packets forwarded from if2 to if1.
The other arguments are the same than for the function sniff(),
except for offline, opened_socket and iface that are ignored.
See help(sniff) for more.
"""
for arg in ['opened_socket', 'offline', 'iface']:
if arg in kargs:
log_runtime.warning("Argument %s cannot be used in "
"bridge_and_sniff() -- ignoring it.", arg)
del kargs[arg]
def _init_socket(iface, count, L2socket=L2socket):
if isinstance(iface, SuperSocket):
return iface, "iface%d" % count
else:
if not L2socket:
iface = resolve_iface(iface or conf.iface)
L2socket = iface.l2socket()
return L2socket(iface=iface), iface
sckt1, if1 = _init_socket(if1, 1)
sckt2, if2 = _init_socket(if2, 2)
peers = {if1: sckt2, if2: sckt1}
xfrms = {}
if xfrm12 is not None:
xfrms[if1] = xfrm12
if xfrm21 is not None:
xfrms[if2] = xfrm21
def prn_send(pkt):
try:
sendsock = peers[pkt.sniffed_on]
except KeyError:
return
if pkt.sniffed_on in xfrms:
try:
newpkt = xfrms[pkt.sniffed_on](pkt)
except Exception:
log_runtime.warning(
'Exception in transformation function for packet [%s] '
'received on %s -- dropping',
pkt.summary(), pkt.sniffed_on, exc_info=True
)
return
else:
if newpkt is True:
newpkt = pkt
elif not newpkt:
return
else:
newpkt = pkt
try:
sendsock.send(newpkt)
except Exception:
log_runtime.warning('Cannot forward packet [%s] received on %s',
pkt.summary(), pkt.sniffed_on, exc_info=True)
if prn is None:
prn = prn_send
else:
prn_orig = prn
def prn(pkt):
prn_send(pkt)
return prn_orig(pkt)
return sniff(opened_socket={sckt1: if1, sckt2: if2}, prn=prn,
*args, **kargs)
@conf.commands.register
def tshark(*args, **kargs):
"""Sniff packets and print them calling pkt.summary().
This tries to replicate what text-wireshark (tshark) would look like"""
if 'iface' in kargs:
iface = kargs.get('iface')
elif 'opened_socket' in kargs:
iface = kargs.get('opened_socket').iface
else:
iface = conf.iface
print("Capturing on '%s'" % iface)
# This should be a nonlocal variable, using a mutable object
# for Python 2 compatibility
i = [0]
def _cb(pkt):
print("%5d\t%s" % (i[0], pkt.summary()))
i[0] += 1
sniff(prn=_cb, store=False, *args, **kargs)
print("\n%d packet%s captured" % (i[0], 's' if i[0] > 1 else ''))
| 1 | 18,888 | We need to find something more efficient than this. We can probably just check the first element and assume the type list is consistent.. | secdev-scapy | py |
@@ -29,6 +29,9 @@ var (
executions and is good for jobs which need to run frequently. "Fixed Schedule"
lets you use a predefined or custom cron schedule and is good for less-frequent
jobs or those which require specific execution schedules.`
+
+ fmtJobInitTypeHelp = `A %s is a task which can be programmatically run, either on a schedule or via
+service code in response to events.`
)
const ( | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/cli/group"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/initialize"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/aws/copilot-cli/internal/pkg/workspace"
"github.com/spf13/afero"
"github.com/spf13/cobra"
)
var (
jobInitSchedulePrompt = "How would you like to " + color.Emphasize("schedule") + " this job?"
jobInitScheduleHelp = `How to determine this job's schedule. "Rate" lets you define the time between
executions and is good for jobs which need to run frequently. "Fixed Schedule"
lets you use a predefined or custom cron schedule and is good for less-frequent
jobs or those which require specific execution schedules.`
)
const (
job = "job"
)
type initJobOpts struct {
initWkldVars
// Interfaces to interact with dependencies.
fs afero.Fs
store store
init jobInitializer
prompt prompter
sel initJobSelector
// Outputs stored on successful actions.
manifestPath string
}
func newInitJobOpts(vars initWkldVars) (*initJobOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, fmt.Errorf("couldn't connect to config store: %w", err)
}
ws, err := workspace.New()
if err != nil {
return nil, fmt.Errorf("workspace cannot be created: %w", err)
}
p := sessions.NewProvider()
sess, err := p.Default()
if err != nil {
return nil, err
}
jobInitter := &initialize.WorkloadInitializer{
Store: store,
Ws: ws,
Prog: termprogress.NewSpinner(),
Deployer: cloudformation.New(sess),
}
prompter := prompt.New()
sel := selector.NewWorkspaceSelect(prompter, store, ws)
return &initJobOpts{
initWkldVars: vars,
fs: &afero.Afero{Fs: afero.NewOsFs()},
store: store,
init: jobInitter,
prompt: prompter,
sel: sel,
}, nil
}
// Validate returns an error if the flag values passed by the user are invalid.
func (o *initJobOpts) Validate() error {
if o.wkldType != "" {
if err := validateJobType(o.wkldType); err != nil {
return err
}
}
if o.name != "" {
if err := validateJobName(o.name); err != nil {
return err
}
}
if o.dockerfilePath != "" && o.image != "" {
return fmt.Errorf("--%s and --%s cannot be specified together", dockerFileFlag, imageFlag)
}
if o.dockerfilePath != "" {
if _, err := o.fs.Stat(o.dockerfilePath); err != nil {
return err
}
}
if o.schedule != "" {
if err := validateSchedule(o.schedule); err != nil {
return err
}
}
if o.timeout != "" {
if err := validateTimeout(o.timeout); err != nil {
return err
}
}
if o.retries < 0 {
return errors.New("number of retries must be non-negative")
}
return nil
}
// Ask prompts for fields that are required but not passed in.
func (o *initJobOpts) Ask() error {
if err := o.askJobType(); err != nil {
return err
}
if err := o.askJobName(); err != nil {
return err
}
dfSelected, err := o.askDockerfile()
if err != nil {
return err
}
if !dfSelected {
if err := o.askImage(); err != nil {
return err
}
}
if err := o.askSchedule(); err != nil {
return err
}
return nil
}
// Execute writes the job's manifest file, creates an ECR repo, and stores the name in SSM.
func (o *initJobOpts) Execute() error {
manifestPath, err := o.init.Job(&initialize.JobProps{
WorkloadProps: initialize.WorkloadProps{
App: o.appName,
Name: o.name,
Type: o.wkldType,
DockerfilePath: o.dockerfilePath,
Image: o.image,
},
Schedule: o.schedule,
Timeout: o.timeout,
Retries: o.retries,
})
if err != nil {
return err
}
o.manifestPath = manifestPath
return nil
}
func (o *initJobOpts) askJobType() error {
if o.wkldType != "" {
return nil
}
// short circuit since there's only one valid job type.
o.wkldType = manifest.ScheduledJobType
return nil
}
func (o *initJobOpts) askJobName() error {
if o.name != "" {
return nil
}
name, err := o.prompt.Get(
fmt.Sprintf(fmtWkldInitNamePrompt, color.Emphasize("name"), color.HighlightUserInput(o.wkldType)),
fmt.Sprintf(fmtWkldInitNameHelpPrompt, job, o.appName),
validateSvcName,
prompt.WithFinalMessage("Job name:"),
)
if err != nil {
return fmt.Errorf("get job name: %w", err)
}
o.name = name
return nil
}
func (o *initJobOpts) askImage() error {
if o.image != "" {
return nil
}
image, err := o.prompt.Get(wkldInitImagePrompt, wkldInitImagePromptHelp, nil,
prompt.WithFinalMessage("Image:"))
if err != nil {
return fmt.Errorf("get image location: %w", err)
}
o.image = image
return nil
}
// isDfSelected indicates if any Dockerfile is in use.
func (o *initJobOpts) askDockerfile() (isDfSelected bool, err error) {
if o.dockerfilePath != "" || o.image != "" {
return true, nil
}
df, err := o.sel.Dockerfile(
fmt.Sprintf(fmtWkldInitDockerfilePrompt, color.HighlightUserInput(o.name)),
fmt.Sprintf(fmtWkldInitDockerfilePathPrompt, color.HighlightUserInput(o.name)),
wkldInitDockerfileHelpPrompt,
wkldInitDockerfilePathHelpPrompt,
func(v interface{}) error {
return validatePath(afero.NewOsFs(), v)
},
)
if err != nil {
return false, fmt.Errorf("select Dockerfile: %w", err)
}
if df == selector.DockerfilePromptUseImage {
return false, nil
}
o.dockerfilePath = df
return true, nil
}
func (o *initJobOpts) askSchedule() error {
if o.schedule != "" {
return nil
}
schedule, err := o.sel.Schedule(
jobInitSchedulePrompt,
jobInitScheduleHelp,
validateSchedule,
validateRate,
)
if err != nil {
return fmt.Errorf("get schedule: %w", err)
}
o.schedule = schedule
return nil
}
// RecommendedActions returns follow-up actions the user can take after successfully executing the command.
func (o *initJobOpts) RecommendedActions() []string {
return []string{
fmt.Sprintf("Update your manifest %s to change the defaults.", color.HighlightResource(o.manifestPath)),
fmt.Sprintf("Run %s to deploy your job to a %s environment.",
color.HighlightCode(fmt.Sprintf("copilot job deploy --name %s --env %s", o.name, defaultEnvironmentName)),
defaultEnvironmentName),
}
}
// buildJobInitCmd builds the command for creating a new job.
func buildJobInitCmd() *cobra.Command {
vars := initWkldVars{}
cmd := &cobra.Command{
Use: "init",
Short: "Creates a new scheduled job in an application.",
Example: `
Create a "reaper" scheduled task to run once per day.
/code $ copilot job init --name reaper --dockerfile ./frontend/Dockerfile --schedule "every 2 hours"
Create a "report-generator" scheduled task with retries.
/code $ copilot job init --name report-generator --schedule "@monthly" --retries 3 --timeout 900s`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newInitJobOpts(vars)
if err != nil {
return err
}
if err := opts.Validate(); err != nil { // validate flags
return err
}
if err := opts.Ask(); err != nil {
return err
}
if err := opts.Execute(); err != nil {
return err
}
log.Infoln("Recommended follow-up actions:")
for _, followup := range opts.RecommendedActions() {
log.Infof("- %s\n", followup)
}
return nil
}),
}
cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription)
cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", jobFlagDescription)
cmd.Flags().StringVarP(&vars.wkldType, jobTypeFlag, jobTypeFlagShort, "", jobTypeFlagDescription)
cmd.Flags().StringVarP(&vars.dockerfilePath, dockerFileFlag, dockerFileFlagShort, "", dockerFileFlagDescription)
cmd.Flags().StringVarP(&vars.schedule, scheduleFlag, scheduleFlagShort, "", scheduleFlagDescription)
cmd.Flags().StringVar(&vars.timeout, timeoutFlag, "", timeoutFlagDescription)
cmd.Flags().IntVar(&vars.retries, retriesFlag, 0, retriesFlagDescription)
cmd.Flags().StringVarP(&vars.image, imageFlag, imageFlagShort, "", imageFlagDescription)
cmd.Annotations = map[string]string{
"group": group.Develop,
}
return cmd
}
| 1 | 15,531 | How " via service code in response to event."? I don't think we'll get in the no schedule option to the release. We should probably wait for some customer feedback first to make sure we're addressing different event-triggers the best way | aws-copilot-cli | go |
@@ -65,7 +65,6 @@ PER_YIELD = 1024
def generate_model_handle():
"""Generate random model handle.
-
Returns:
str: random bytes for handle
""" | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database abstraction objects for Forseti Server."""
# pylint: disable=too-many-lines
# pylint: disable=too-many-branches
import binascii
import collections
import hmac
import json
import os
import struct
from threading import Lock
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import Integer
from sqlalchemy import Boolean
from sqlalchemy import String
from sqlalchemy import Sequence
from sqlalchemy import ForeignKey
from sqlalchemy import Text
from sqlalchemy import create_engine as sqlalchemy_create_engine
from sqlalchemy import Table
from sqlalchemy import DateTime
from sqlalchemy import or_
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import reconstructor
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import select
from sqlalchemy.sql import union
from sqlalchemy.ext.declarative import declarative_base
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.services.utils import mutual_exclusive
from google.cloud.forseti.services.utils import to_full_resource_name
from google.cloud.forseti.services import db
from google.cloud.forseti.services.utils import get_sql_dialect
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
POOL_RECYCLE_SECONDS = 300
PER_YIELD = 1024
def generate_model_handle():
"""Generate random model handle.
Returns:
str: random bytes for handle
"""
return binascii.hexlify(os.urandom(16))
def generate_model_seed():
"""Generate random model seed.
Returns:
str: random bytes
"""
return binascii.hexlify(os.urandom(16))
MODEL_BASE = declarative_base()
class Model(MODEL_BASE):
"""Explain model object in database."""
__tablename__ = 'model'
name = Column(String(32), primary_key=True)
handle = Column(String(32))
state = Column(String(32))
description = Column(Text())
watchdog_timer_datetime = Column(DateTime())
created_at_datetime = Column(DateTime())
etag_seed = Column(String(32), nullable=False)
message = Column(Text(16777215))
warnings = Column(Text(16777215))
def __init__(self, *args, **kwargs):
"""Initialize
Args:
*args (list): Arguments.
**kwargs (dict): Arguments.
"""
super(Model, self).__init__(*args, **kwargs)
# Non-SQL attributes
self.warning_store = list()
@reconstructor
def init_on_load(self):
"""Initialization of model when reconstructed from query."""
self.warning_store = list()
def kick_watchdog(self):
"""Used during import to notify the import is still progressing."""
self.watchdog_timer_datetime = date_time.get_utc_now_datetime()
def add_warning(self, warning):
"""Add a warning to the model.
Args:
warning (str): Warning message
"""
if warning:
self.warning_store.append(warning)
def get_warnings(self):
"""Returns any stored warnings.
Returns:
str: warning message
"""
if self.warning_store:
return '\n'.join(self.warning_store)
return ''
def set_inprogress(self):
"""Set state to 'in progress'."""
self.state = 'INPROGRESS'
def add_description(self, description):
"""Add new description to the model
Args:
description (str): the description to be added in json format
"""
new_desc = json.loads(description)
model_desc = json.loads(self.description)
for new_item in new_desc:
model_desc[new_item] = new_desc[new_item]
self.description = json.dumps(model_desc, sort_keys=True)
def set_done(self, message=''):
"""Indicate a finished import.
Args:
message (str): Success message or ''
"""
warnings = self.get_warnings()
if warnings:
LOGGER.debug('warnings = %s', warnings)
self.warnings = warnings
self.state = 'PARTIAL_SUCCESS'
else:
self.state = 'SUCCESS'
self.message = message
def set_error(self, message):
"""Indicate a broken import.
Args:
message (str): error message
"""
self.state = 'BROKEN'
self.warnings = self.get_warnings()
self.message = message
LOGGER.error('warning = %s, message = %s',
self.warnings, self.message)
def __repr__(self):
"""String representation.
Returns:
str: Model represented as
(name='{}', handle='{}' state='{}')
"""
return '<Model(name={}, handle={} state={})>'.format(
self.name, self.handle, self.state)
# pylint: disable=too-many-locals,no-member
def define_model(model_name, dbengine, model_seed):
"""Defines table classes which point to the corresponding model.
This means, for each model being accessed this function needs to
be called in order to generate a full set of table definitions.
Models are name spaced via a random model seed such that multiple
models can exist within the same database. In order to implement
the name spacing in an abstract way.
Args:
model_name (str): model handle
dbengine (object): db engine
model_seed (str): seed to get etag
Returns:
tuple: (sessionmaker, ModelAccess)
"""
base = declarative_base()
denormed_group_in_group = '{}_group_in_group'.format(model_name)
bindings_tablename = '{}_bindings'.format(model_name)
roles_tablename = '{}_roles'.format(model_name)
permissions_tablename = '{}_permissions'.format(model_name)
members_tablename = '{}_members'.format(model_name)
resources_tablename = '{}_resources'.format(model_name)
role_permissions = Table('{}_role_permissions'.format(model_name),
base.metadata,
Column(
'roles_name', ForeignKey(
'{}.name'.format(roles_tablename)),
primary_key=True),
Column(
'permissions_name', ForeignKey(
'{}.name'.format(permissions_tablename)),
primary_key=True), )
binding_members = Table('{}_binding_members'.format(model_name),
base.metadata,
Column(
'bindings_id', ForeignKey(
'{}.id'.format(bindings_tablename)),
primary_key=True),
Column(
'members_name', ForeignKey(
'{}.name'.format(members_tablename)),
primary_key=True), )
group_members = Table(
'{}_group_members'.format(model_name),
base.metadata,
Column('group_name',
ForeignKey('{}.name'.format(members_tablename)),
primary_key=True),
Column('members_name',
ForeignKey('{}.name'.format(members_tablename)),
primary_key=True),
)
def get_string_by_dialect(db_dialect, column_size):
"""Get Sqlalchemy String by dialect.
Sqlite doesn't support collation type, need to define different
column types for different database engine.
This is used to make MySQL column case sensitive by adding
an encoding type.
Args:
db_dialect (String): The db dialect.
column_size (Integer): The size of the column.
Returns:
String: Sqlalchemy String.
"""
if db_dialect.lower() == 'sqlite':
return String(column_size)
return String(column_size, collation='utf8mb4_bin')
class Resource(base):
"""Row entry for a GCP resource."""
__tablename__ = resources_tablename
full_name = Column(String(2048), nullable=False)
type_name = Column(get_string_by_dialect(dbengine.dialect.name, 512),
primary_key=True)
parent_type_name = Column(
get_string_by_dialect(dbengine.dialect.name, 512),
ForeignKey('{}.type_name'.format(resources_tablename)))
name = Column(String(256), nullable=False)
type = Column(String(128), nullable=False)
policy_update_counter = Column(Integer, default=0)
display_name = Column(String(256), default='')
email = Column(String(256), default='')
data = Column(Text(16777215))
parent = relationship('Resource', remote_side=[type_name])
bindings = relationship('Binding', back_populates='resource')
def increment_update_counter(self):
"""Increments counter for this object's db updates.
"""
self.policy_update_counter += 1
def get_etag(self):
"""Return the etag for this resource.
Returns:
str: etag to avoid race condition when set policy
"""
serialized_ctr = struct.pack('>I', self.policy_update_counter)
msg = binascii.hexlify(serialized_ctr)
msg += self.full_name
return hmac.new(model_seed.encode('utf-8'), msg).hexdigest()
def __repr__(self):
"""String representation.
Returns:
str: Resource represented as
(full_name='{}', name='{}' type='{}')
"""
return '<Resource(full_name={}, name={} type={})>'.format(
self.full_name, self.name, self.type)
Resource.children = relationship(
'Resource', order_by=Resource.full_name, back_populates='parent')
class Binding(base):
"""Row for a binding between resource, roles and members."""
__tablename__ = bindings_tablename
id = Column(Integer, Sequence('{}_id_seq'.format(bindings_tablename)),
primary_key=True)
resource_type_name = Column(
get_string_by_dialect(dbengine.dialect.name, 512),
ForeignKey('{}.type_name'.format(resources_tablename)))
role_name = Column(String(128), ForeignKey(
'{}.name'.format(roles_tablename)))
resource = relationship('Resource', remote_side=[resource_type_name])
role = relationship('Role', remote_side=[role_name])
members = relationship('Member',
secondary=binding_members,
back_populates='bindings')
def __repr__(self):
"""String Representation
Returns:
str: Binding represented as
(id='{}', role='{}', resource='{}' members='{}')
"""
fmt_s = '<Binding(id={}, role={}, resource={} members={})>'
return fmt_s.format(
self.id,
self.role_name,
self.resource_type_name,
self.members)
class Member(base):
"""Row entry for a policy member."""
__tablename__ = members_tablename
name = Column(String(256), primary_key=True)
type = Column(String(64))
member_name = Column(String(256))
parents = relationship(
'Member',
secondary=group_members,
primaryjoin=name == group_members.c.members_name,
secondaryjoin=name == group_members.c.group_name)
children = relationship(
'Member',
secondary=group_members,
primaryjoin=name == group_members.c.group_name,
secondaryjoin=name == group_members.c.members_name)
bindings = relationship('Binding',
secondary=binding_members,
back_populates='members')
def __repr__(self):
"""String representation.
Returns:
str: Member represented as (name='{}', type='{}')
"""
return '<Member(name={}, type={})>'.format(
self.name, self.type)
class GroupInGroup(base):
"""Row for a group-in-group membership."""
__tablename__ = denormed_group_in_group
parent = Column(String(256), primary_key=True)
member = Column(String(256), primary_key=True)
def __repr__(self):
"""String representation.
Returns:
str: GroupInGroup represented as (parent='{}', member='{}')
"""
return '<GroupInGroup(parent={}, member={})>'.format(
self.parent,
self.member)
class Role(base):
"""Row entry for an IAM role."""
__tablename__ = roles_tablename
name = Column(String(128), primary_key=True)
title = Column(String(128), default='')
stage = Column(String(128), default='')
description = Column(String(1024), default='')
custom = Column(Boolean, default=False)
permissions = relationship('Permission',
secondary=role_permissions,
back_populates='roles')
def __repr__(self):
"""String Representation
Returns:
str: Role represented by name
"""
return '<Role(name=%s)>' % self.name
class Permission(base):
"""Row entry for an IAM permission."""
__tablename__ = permissions_tablename
name = Column(String(128), primary_key=True)
roles = relationship('Role',
secondary=role_permissions,
back_populates='permissions')
def __repr__(self):
"""String Representation
Returns:
str: Permission represented by name
"""
return '<Permission(name=%s)>' % self.name
# pylint: disable=too-many-public-methods
class ModelAccess(object):
"""Data model facade, implement main API against database."""
TBL_GROUP_IN_GROUP = GroupInGroup
TBL_BINDING = Binding
TBL_MEMBER = Member
TBL_PERMISSION = Permission
TBL_ROLE = Role
TBL_RESOURCE = Resource
TBL_MEMBERSHIP = group_members
# Set of member binding types that expand like groups.
GROUP_TYPES = {'group',
'projecteditor',
'projectowner',
'projectviewer'}
# Members that represent all users
ALL_USER_MEMBERS = ['allusers', 'allauthenticatedusers']
@classmethod
def delete_all(cls, engine):
"""Delete all data from the model.
Args:
engine (object): database engine
"""
LOGGER.info('Deleting all data from the model.')
role_permissions.drop(engine)
binding_members.drop(engine)
group_members.drop(engine)
Binding.__table__.drop(engine)
Permission.__table__.drop(engine)
GroupInGroup.__table__.drop(engine)
Role.__table__.drop(engine)
Member.__table__.drop(engine)
Resource.__table__.drop(engine)
@classmethod
def denorm_group_in_group(cls, session):
"""Denormalize group-in-group relation.
This method will fill the GroupInGroup table with
(parent, member) if parent is an ancestor of member,
whenever adding or removing a new group or group-group
relationship, this method should be called to re-denormalize
Args:
session (object): Database session to use.
Returns:
int: Number of iterations.
Raises:
Exception: dernomalize fail
"""
tbl1 = aliased(GroupInGroup.__table__, name='alias1')
tbl2 = aliased(GroupInGroup.__table__, name='alias2')
tbl3 = aliased(GroupInGroup.__table__, name='alias3')
if get_sql_dialect(session) != 'sqlite':
# Lock tables for denormalization
# including aliases 1-3
locked_tables = [
'`{}`'.format(GroupInGroup.__tablename__),
'`{}` as {}'.format(
GroupInGroup.__tablename__,
tbl1.name),
'`{}` as {}'.format(
GroupInGroup.__tablename__,
tbl2.name),
'`{}` as {}'.format(
GroupInGroup.__tablename__,
tbl3.name),
'`{}`'.format(group_members.name)]
lock_stmts = ['{} WRITE'.format(tbl) for tbl in locked_tables]
query = 'LOCK TABLES {}'.format(', '.join(lock_stmts))
session.execute(query)
try:
# Remove all existing rows in the denormalization
session.execute(GroupInGroup.__table__.delete())
# Select member relation into GroupInGroup
qry = (GroupInGroup.__table__.insert().from_select(
['parent', 'member'], group_members.select().where(
group_members.c.group_name.startswith('group/')
).where(
group_members.c.members_name.startswith('group/')
)
))
session.execute(qry)
iterations = 0
rows_affected = True
while rows_affected:
# Join membership on its own to find transitive
expansion = tbl1.join(tbl2, tbl1.c.member == tbl2.c.parent)
# Left outjoin to find the entries that
# are already in the table to prevent
# inserting already existing entries
expansion = expansion.outerjoin(
tbl3,
and_(tbl1.c.parent == tbl3.c.parent,
tbl2.c.member == tbl3.c.member))
# Select only such elements that are not
# already in the table, indicated as NULL
# values through the outer-left-join
stmt = (
select([tbl1.c.parent,
tbl2.c.member])
.select_from(expansion)
# pylint: disable=singleton-comparison
.where(tbl3.c.parent == None)
.distinct()
)
# Execute the query and insert into the table
qry = (GroupInGroup.__table__
.insert()
.from_select(['parent', 'member'], stmt))
rows_affected = bool(session.execute(qry).rowcount)
iterations += 1
except Exception as e:
LOGGER.exception(e)
session.rollback()
raise
finally:
if get_sql_dialect(session) != 'sqlite':
session.execute('UNLOCK TABLES')
session.commit()
return iterations
@classmethod
def expand_special_members(cls, session):
"""Create dynamic groups for project(Editor|Owner|Viewer).
Should be called after IAM bindings are added to the model.
Args:
session (object): Database session to use.
"""
member_type_map = {
'projecteditor': 'roles/editor',
'projectowner': 'roles/owner',
'projectviewer': 'roles/viewer'}
for parent_member in cls.list_group_members(
session, '', member_types=member_type_map.keys()):
member_type, project_id = parent_member.split('/')
role = member_type_map[member_type]
try:
iam_policy = cls.get_iam_policy(
session,
'project/{}'.format(project_id),
roles=[role])
LOGGER.info('iam_policy: %s', iam_policy)
except NoResultFound:
LOGGER.warning('Found a non-existent project, or project '
'outside of the organization, in an IAM '
'binding: %s', parent_member)
continue
members = iam_policy.get('bindings', {}).get(role, [])
expanded_members = cls.expand_members(session, members)
for member in expanded_members:
stmt = cls.TBL_MEMBERSHIP.insert(
{'group_name': parent_member,
'members_name': member.name})
session.execute(stmt)
if member.type == 'group' and member.name in members:
session.add(cls.TBL_GROUP_IN_GROUP(
parent=parent_member,
member=member.name))
session.commit()
@classmethod
def explain_granted(cls, session, member_name, resource_type_name,
role, permission):
"""Provide info about how the member has access to the resource.
For example, member m1 can access resource r1 with permission p
it might be granted by binding (r2, rol, g1),
r1 is a child resource in a project or folder r2,
role rol contains permission p,
m1 is a member in group g1.
This method list bindings that grant the access, member relation
and resource hierarchy
Args:
session (object): Database session.
member_name (str): name of the member
resource_type_name (str): type_name of the resource
role (str): role to query
permission (str): permission to query
Returns:
tuples: (bindings, member_graph, resource_type_names) bindings,
the bindings to grant the access member_graph, the graph to
have member included in the binding esource_type_names, the
resource tree
Raises:
Exception: not granted
"""
members, member_graph = cls.reverse_expand_members(
session, [member_name], request_graph=True)
member_names = [m.name for m in members]
resource_type_names = [r.type_name for r in
cls.find_resource_path(session,
resource_type_name)]
if role:
roles = set([role])
qry = session.query(Binding, Member).join(
binding_members).join(Member)
else:
roles = [r.name for r in
cls.get_roles_by_permission_names(
session,
[permission])]
qry = session.query(Binding, Member)
qry = qry.join(binding_members).join(Member)
qry = qry.join(Role).join(role_permissions).join(Permission)
qry = qry.filter(Binding.role_name.in_(roles))
qry = qry.filter(Member.name.in_(member_names))
qry = qry.filter(
Binding.resource_type_name.in_(resource_type_names))
result = qry.all()
if not result:
error_message = 'Grant not found: ({},{},{})'.format(
member_name,
resource_type_name,
role if role is not None else permission)
LOGGER.error(error_message)
raise Exception(error_message)
else:
bindings = [(b.resource_type_name, b.role_name, m.name)
for b, m in result]
return bindings, member_graph, resource_type_names
@classmethod
def scanner_iter(cls, session, resource_type,
parent_type_name=None):
"""Iterate over all resources with the specified type.
Args:
session (object): Database session.
resource_type (str): type of the resource to scan
parent_type_name (str): type_name of the parent resource
Yields:
Resource: resource that match the query
"""
qry = (
session.query(Resource)
.filter(Resource.type == resource_type)
.options(joinedload(Resource.parent))
.enable_eagerloads(True))
if parent_type_name:
qry = qry.filter(Resource.parent_type_name == parent_type_name)
for resource in qry.yield_per(PER_YIELD):
yield resource
@classmethod
def explain_denied(cls, session, member_name, resource_type_names,
permission_names, role_names):
"""Explain why an access is denied
Provide information how to grant access to a member if such
access is denied with current IAM policies.
For example, member m1 cannot access resource r1 with permission
p, this method shows the bindings with rol that covered the
desired permission on the resource r1 and its ancestors.
If adding this member to any of these bindings, such access
can be granted. An overgranting level is also provided
Args:
session (object): Database session.
member_name (str): name of the member
resource_type_names (list): list of type_names of resources
permission_names (list): list of permissions
role_names (list): list of roles
Returns:
list: list of tuples,
(overgranting,[(role_name,member_name,resource_name)])
Raises:
Exception: No roles covering requested permission set,
Not possible
"""
if not role_names:
role_names = [r.name for r in
cls.get_roles_by_permission_names(
session,
permission_names)]
if not role_names:
error_message = 'No roles covering requested permission set'
LOGGER.error(error_message)
raise Exception(error_message)
resource_hierarchy = (
cls.resource_ancestors(session,
resource_type_names))
def find_binding_candidates(resource_hierarchy):
"""Find the root node in the ancestors.
From there, walk down the resource tree and add
every node until a node has more than one child.
This is the set of nodes which grants access to
at least all of the resources requested.
There is always a chain with a single node root.
Args:
resource_hierarchy (dict): graph of the resource hierarchy
Returns:
list: candidates to add to bindings that potentially grant
access
"""
root = None
for parent in resource_hierarchy.iterkeys():
is_root = True
for children in resource_hierarchy.itervalues():
if parent in children:
is_root = False
break
if is_root:
root = parent
chain = [root]
cur = root
while len(resource_hierarchy[cur]) == 1:
cur = iter(resource_hierarchy[cur]).next()
chain.append(cur)
return chain
bind_res_candidates = find_binding_candidates(
resource_hierarchy)
bindings = (
session.query(Binding, Member)
.join(binding_members)
.join(Member)
.join(Role)
.filter(Binding.resource_type_name.in_(
bind_res_candidates))
.filter(Role.name.in_(role_names))
.filter(or_(Member.type == 'group',
Member.name == member_name))
.filter(and_((binding_members.c.bindings_id ==
Binding.id),
(binding_members.c.members_name ==
Member.name)))
.filter(Role.name == Binding.role_name)
.all())
strategies = []
for resource in bind_res_candidates:
for role_name in role_names:
overgranting = (len(bind_res_candidates) -
bind_res_candidates.index(resource) -
1)
strategies.append(
(overgranting, [
(role, member_name, resource)
for role in [role_name]]))
if bindings:
for binding, member in bindings:
overgranting = (len(bind_res_candidates) - 1 -
bind_res_candidates.index(
binding.resource_type_name))
strategies.append(
(overgranting, [
(binding.role_name,
member.name,
binding.resource_type_name)]))
return strategies
@classmethod
def query_access_by_member(cls, session, member_name, permission_names,
expand_resources=False,
reverse_expand_members=True):
"""Return the set of resources the member has access to.
By default, this method expand group_member relation,
so the result includes all resources can be accessed by the
groups that the member is in.
By default, this method does not expand resource hierarchy,
so the result does not include a resource if such resource does
not have a direct binding to allow access.
Args:
session (object): Database session.
member_name (str): name of the member
permission_names (list): list of names of permissions to query
expand_resources (bool): whether to expand resources
reverse_expand_members (bool): whether to expand members
Returns:
list: list of access tuples, ("role_name", "resource_type_name")
"""
if reverse_expand_members:
member_names = [m.name for m in
cls.reverse_expand_members(session,
[member_name],
False)]
else:
member_names = [member_name]
roles = cls.get_roles_by_permission_names(
session, permission_names)
qry = (
session.query(Binding)
.join(binding_members)
.join(Member)
.filter(Binding.role_name.in_([r.name for r in roles]))
.filter(Member.name.in_(member_names))
)
bindings = qry.yield_per(1024)
if not expand_resources:
return [(binding.role_name,
[binding.resource_type_name]) for binding in bindings]
r_type_names = [binding.resource_type_name for binding in bindings]
expansion = cls.expand_resources_by_type_names(
session,
r_type_names)
res_exp = {k.type_name: [v.type_name for v in values]
for k, values in expansion.iteritems()}
return [(binding.role_name,
res_exp[binding.resource_type_name])
for binding in bindings]
@classmethod
def query_access_by_permission(cls,
session,
role_name=None,
permission_name=None,
expand_groups=False,
expand_resources=False):
"""Query access via the specified permission
Return all the (Principal, Resource) combinations allowing
satisfying access via the specified permission.
By default, the group relation and resource hierarchy will not be
expanded, so the results will only contains direct bindings
filtered by permission. But the relations can be expanded
Args:
session (object): Database session.
role_name (str): Role name to query for
permission_name (str): Permission name to query for.
expand_groups (bool): Whether or not to expand groups.
expand_resources (bool): Whether or not to expand resources.
Yields:
obejct: A generator of access tuples.
Raises:
ValueError: If neither role nor permission is set.
"""
if role_name:
role_names = [role_name]
elif permission_name:
role_names = [p.name for p in
cls.get_roles_by_permission_names(
session,
[permission_name])]
else:
error_message = 'Either role or permission must be set'
LOGGER.error(error_message)
raise ValueError(error_message)
if expand_resources:
expanded_resources = aliased(Resource)
qry = (
session.query(expanded_resources, Binding, Member)
.filter(binding_members.c.bindings_id == Binding.id)
.filter(binding_members.c.members_name == Member.name)
.filter(expanded_resources.full_name.startswith(
Resource.full_name))
.filter((Resource.type_name ==
Binding.resource_type_name))
.filter(Binding.role_name.in_(role_names))
.order_by(expanded_resources.name.asc(),
Binding.role_name.asc())
)
else:
qry = (
session.query(Resource, Binding, Member)
.filter(binding_members.c.bindings_id == Binding.id)
.filter(binding_members.c.members_name == Member.name)
.filter((Resource.type_name ==
Binding.resource_type_name))
.filter(Binding.role_name.in_(role_names))
.order_by(Resource.name.asc(), Binding.role_name.asc())
)
if expand_groups:
to_expand = set([m.name for _, _, m in
qry.yield_per(PER_YIELD)])
expansion = cls.expand_members_map(session, to_expand,
show_group_members=False,
member_contain_self=True)
qry = qry.distinct()
cur_resource = None
cur_role = None
cur_members = set()
for resource, binding, member in qry.yield_per(PER_YIELD):
if cur_resource != resource.type_name:
if cur_resource is not None:
yield cur_role, cur_resource, cur_members
cur_resource = resource.type_name
cur_role = binding.role_name
cur_members = set()
if expand_groups:
for member_name in expansion[member.name]:
cur_members.add(member_name)
else:
cur_members.add(member.name)
if cur_resource is not None:
yield cur_role, cur_resource, cur_members
@classmethod
def query_access_by_resource(cls, session, resource_type_name,
permission_names, expand_groups=False):
"""Query access by resource
Return members who have access to the given resource.
The resource hierarchy will always be expanded, so even if the
current resource does not have that binding, if its ancestors
have the binding, the access will be shown
By default, the group relationship will not be expanded
Args:
session (object): db session
resource_type_name (str): type_name of the resource to query
permission_names (list): list of strs, names of the permissions
to query
expand_groups (bool): whether to expand groups
Returns:
dict: role_member_mapping, <"role_name", "member_names">
"""
roles = cls.get_roles_by_permission_names(
session, permission_names)
resources = cls.find_resource_path(session, resource_type_name)
res = (session.query(Binding, Member)
.filter(
Binding.role_name.in_([r.name for r in roles]),
Binding.resource_type_name.in_(
[r.type_name for r in resources]))
.join(binding_members).join(Member))
role_member_mapping = collections.defaultdict(set)
for binding, member in res:
role_member_mapping[binding.role_name].add(member.name)
if expand_groups:
for role in role_member_mapping:
role_member_mapping[role] = (
[m.name for m in cls.expand_members(
session,
role_member_mapping[role])])
return role_member_mapping
@classmethod
def query_permissions_by_roles(cls, session, role_names, role_prefixes,
_=1024):
"""Resolve permissions for the role.
Args:
session (object): db session
role_names (list): list of strs, names of the roles
role_prefixes (list): list of strs, prefixes of the roles
_ (int): place occupation
Returns:
list: list of (Role, Permission)
Raises:
Exception: No roles or role prefixes specified
"""
if not role_names and not role_prefixes:
error_message = 'No roles or role prefixes specified'
LOGGER.error(error_message)
raise Exception(error_message)
qry = session.query(Role, Permission).join(
role_permissions).join(Permission)
if role_names:
qry = qry.filter(Role.name.in_(role_names))
if role_prefixes:
qry = qry.filter(
or_(*[Role.name.startswith(prefix)
for prefix in role_prefixes]))
return qry.all()
@classmethod
def set_iam_policy(cls,
session,
resource_type_name,
policy,
update_members=False):
"""Set IAM policy
Sets an IAM policy for the resource, check the etag when setting
new policy and reassign new etag.
Check etag to avoid race condition
Args:
session (object): db session
resource_type_name (str): type_name of the resource
policy (dict): the policy to set on the resource
update_members (bool): If true, then add new members to Member
table. This must be set when the call to set_iam_policy
happens outside of the model InventoryImporter class. Tests
or users that manually add an IAM policy need to mark this
as true to ensure the model remains consistent.
Raises:
Exception: Etag doesn't match
"""
LOGGER.info('Setting IAM policy, resource_type_name = %s, policy'
' = %s, session = %s',
resource_type_name, policy, session)
old_policy = cls.get_iam_policy(session, resource_type_name)
if policy['etag'] != old_policy['etag']:
error_message = 'Etags distinct, stored={}, provided={}'.format(
old_policy['etag'], policy['etag'])
LOGGER.error(error_message)
raise Exception(error_message)
old_policy = old_policy['bindings']
policy = policy['bindings']
def filter_etag(policy):
"""Filter etag key/value out of policy map.
Args:
policy (dict): the policy to filter
Returns:
dict: policy without etag, <"bindings":[<role, members>]>
Raises:
"""
return {k: v for k, v in policy.iteritems() if k != 'etag'}
def calculate_diff(policy, old_policy):
"""Calculate the grant/revoke difference between policies.
The diff = policy['bindings'] - old_policy['bindings']
Args:
policy (dict): the new policy in dict format
old_policy (dict): the old policy in dict format
Returns:
dict: <role, members> diff of bindings
"""
diff = collections.defaultdict(list)
for role, members in filter_etag(policy).iteritems():
if role in old_policy:
for member in members:
if member not in old_policy[role]:
diff[role].append(member)
else:
diff[role] = members
return diff
grants = calculate_diff(policy, old_policy)
revocations = calculate_diff(old_policy, policy)
for role, members in revocations.iteritems():
bindings = (
session.query(Binding)
.filter((Binding.resource_type_name ==
resource_type_name))
.filter(Binding.role_name == role)
.join(binding_members).join(Member)
.filter(Member.name.in_(members)).all())
for binding in bindings:
session.delete(binding)
for role, members in grants.iteritems():
inserted = False
existing_bindings = (
session.query(Binding)
.filter((Binding.resource_type_name ==
resource_type_name))
.filter(Binding.role_name == role)
.all())
if update_members:
for member in members:
if not cls.get_member(session, member):
try:
# This is the default case, e.g. 'group/foobar'
m_type, name = member.split('/', 1)
except ValueError:
# Special groups like 'allUsers'
m_type, name = member, member
session.add(cls.TBL_MEMBER(
name=member,
type=m_type,
member_name=name))
for binding in existing_bindings:
if binding.role_name == role:
inserted = True
for member in members:
binding.members.append(
session.query(Member).filter(
Member.name == member).one())
if not inserted:
binding = Binding(
resource_type_name=resource_type_name,
role=session.query(Role).filter(
Role.name == role).one())
binding.members = session.query(Member).filter(
Member.name.in_(members)).all()
session.add(binding)
resource = session.query(Resource).filter(
Resource.type_name == resource_type_name).one()
resource.increment_update_counter()
session.commit()
@classmethod
def get_iam_policy(cls, session, resource_type_name, roles=None):
"""Return the IAM policy for a resource.
Args:
session (object): db session
resource_type_name (str): type_name of the resource to query
roles (list): An optional list of roles to limit the results to
Returns:
dict: the IAM policy
"""
resource = session.query(Resource).filter(
Resource.type_name == resource_type_name).one()
policy = {'etag': resource.get_etag(),
'bindings': {},
'resource': resource.type_name}
bindings = session.query(Binding).filter(
Binding.resource_type_name == resource_type_name)
if roles:
bindings = bindings.filter(Binding.role_name.in_(roles))
for binding in bindings.all():
role = binding.role_name
members = [m.name for m in binding.members]
policy['bindings'][role] = members
return policy
@classmethod
def check_iam_policy(cls, session, resource_type_name, permission_name,
member_name):
"""Check access according to the resource IAM policy.
Args:
session (object): db session
resource_type_name (str): type_name of the resource to check
permission_name (str): name of the permission to check
member_name (str): name of the member to check
Returns:
bool: whether such access is allowed
Raises:
Exception: member or resource not found
"""
member_names = [m.name for m in
cls.reverse_expand_members(
session,
[member_name])]
resource_type_names = [r.type_name for r in cls.find_resource_path(
session,
resource_type_name)]
if not member_names:
error_message = 'Member not found: {}'.format(member_name)
LOGGER.error(error_message)
raise Exception(error_message)
if not resource_type_names:
error_message = 'Resource not found: {}'.format(
resource_type_name)
LOGGER.error(error_message)
raise Exception(error_message)
return (session.query(Permission)
.filter(Permission.name == permission_name)
.join(role_permissions).join(Role).join(Binding)
.filter(Binding.resource_type_name.in_(resource_type_names))
.join(binding_members).join(Member)
.filter(Member.name.in_(member_names)).first() is not None)
@classmethod
def list_roles_by_prefix(cls, session, role_prefix):
"""Provides a list of roles matched via name prefix.
Args:
session (object): db session
role_prefix (str): prefix of the role_name
Returns:
list: list of role_names that match the query
"""
return [r.name for r in session.query(Role).filter(
Role.name.startswith(role_prefix)).all()]
@classmethod
def add_role_by_name(cls, session, role_name, permission_names):
"""Creates a new role.
Args:
session (object): db session
role_name (str): name of the role to add
permission_names (list): list of permissions in the role
"""
LOGGER.info('Creating a new role, role_name = %s, permission_names'
' = %s, session = %s',
role_name, permission_names, session)
permission_names = set(permission_names)
existing_permissions = session.query(Permission).filter(
Permission.name.in_(permission_names)).all()
for existing_permission in existing_permissions:
try:
permission_names.remove(existing_permission.name)
except KeyError:
LOGGER.warn('existing_permissions.name = %s, KeyError',
existing_permission.name)
new_permissions = [Permission(name=n) for n in permission_names]
for perm in new_permissions:
session.add(perm)
cls.add_role(session, role_name,
existing_permissions + new_permissions)
session.commit()
@classmethod
def add_group_member(cls,
session,
member_type_name,
parent_type_names,
denorm=False):
"""Add member, optionally with parent relationship.
Args:
session (object): db session
member_type_name (str): type_name of the member to add
parent_type_names (list): type_names of the parents
denorm (bool): whether to denorm the groupingroup table after
addition
"""
LOGGER.info('Adding a member, member_type_name = %s,'
' parent_type_names = %s, denorm = %s, session = %s',
member_type_name, parent_type_names, denorm, session)
cls.add_member(session,
member_type_name,
parent_type_names,
denorm)
session.commit()
@classmethod
def list_group_members(cls,
session,
member_name_prefix,
member_types=None):
"""Returns members filtered by prefix.
Args:
session (object): db session
member_name_prefix (str): the prefix of the member_name
member_types (list): an optional list of member types to filter
the results by.
Returns:
list: list of Members that match the query
"""
qry = session.query(Member).filter(
Member.member_name.startswith(member_name_prefix))
if member_types:
qry = qry.filter(Member.type.in_(member_types))
return [m.name for m in qry.all()]
@classmethod
def iter_groups(cls, session):
"""Returns iterator of all groups in model.
Args:
session (object): db session
Yields:
Member: group in the model
"""
qry = session.query(Member).filter(Member.type == 'group')
for group in qry.yield_per(1024):
yield group
@classmethod
def iter_resources_by_prefix(cls,
session,
full_resource_name_prefix=None,
type_name_prefix=None,
type_prefix=None,
name_prefix=None):
"""Returns iterator to resources filtered by prefix.
Args:
session (object): db session
full_resource_name_prefix (str): the prefix of the
full_resource_name
type_name_prefix (str): the prefix of the type_name
type_prefix (str): the prefix of the type
name_prefix (ste): the prefix of the name
Yields:
Resource: that match the query
Raises:
Exception: No prefix given
"""
if not any([arg is not None for arg in [full_resource_name_prefix,
type_name_prefix,
type_prefix,
name_prefix]]):
error_message = 'At least one prefix must be set'
LOGGER.error(error_message)
raise Exception(error_message)
qry = session.query(Resource)
if full_resource_name_prefix:
qry = qry.filter(Resource.full_name.startswith(
full_resource_name_prefix))
if type_name_prefix:
qry = qry.filter(Resource.type_name.startswith(
type_name_prefix))
if type_prefix:
qry = qry.filter(Resource.type.startswith(
type_prefix))
if name_prefix:
qry = qry.filter(Resource.name.startswith(
name_prefix))
for resource in qry.yield_per(1024):
yield resource
@classmethod
def list_resources_by_prefix(cls,
session,
full_resource_name_prefix=None,
type_name_prefix=None,
type_prefix=None,
name_prefix=None):
"""Returns resources filtered by prefix.
Args:
session (object): db session
full_resource_name_prefix (str): the prefix of the
full_resource_name
type_name_prefix (str): the prefix of the type_name
type_prefix (str): the prefix of the type
name_prefix (ste): the prefix of the name
Returns:
list: list of Resources match the query
Raises:
"""
return list(
cls.iter_resources_by_prefix(session,
full_resource_name_prefix,
type_name_prefix,
type_prefix,
name_prefix))
@classmethod
def add_resource_by_name(cls,
session,
resource_type_name,
parent_type_name,
no_require_parent):
"""Adds resource specified via full name.
Args:
session (object): db session
resource_type_name (str): name of the resource
parent_type_name (str): name of the parent resource
no_require_parent (bool): if this resource has a parent
Returns:
Resource: Created new resource
"""
LOGGER.info('Adding resource via full name, resource_type_name'
' = %s, parent_type_name = %s, no_require_parent = %s,'
' session = %s', resource_type_name,
parent_type_name, no_require_parent, session)
if not no_require_parent:
parent = session.query(Resource).filter(
Resource.type_name == parent_type_name).one()
else:
parent = None
return cls.add_resource(session, resource_type_name, parent)
@classmethod
def add_resource(cls, session, resource_type_name, parent=None):
"""Adds resource by name.
Args:
session (object): db session
resource_type_name (str): name of the resource
parent (Resource): parent of the resource
Returns:
Resource: Created new resource
"""
LOGGER.info('Adding resource by name, resource_type_name = %s,'
' session = %s', resource_type_name, session)
res_type, res_name = resource_type_name.split('/')
parent_full_resource_name = (
'' if parent is None else parent.full_name)
full_resource_name = to_full_resource_name(
parent_full_resource_name,
resource_type_name)
resource = Resource(full_name=full_resource_name,
type_name=resource_type_name,
name=res_name,
type=res_type,
parent=parent)
session.add(resource)
return resource
@classmethod
def add_role(cls, session, name, permissions=None):
"""Add role by name.
Args:
session (object): db session
name (str): name of the role to add
permissions (list): permissions to add in the role
Returns:
Role: The created role
"""
LOGGER.info('Adding role, name = %s, permissions = %s,'
' session = %s', name, permissions, session)
permissions = [] if permissions is None else permissions
role = Role(name=name, permissions=permissions)
session.add(role)
return role
@classmethod
def add_permission(cls, session, name, roles=None):
"""Add permission by name.
Args:
session (object): db session
name (str): name of the permission
roles (list): list od roles to add the permission
Returns:
Permission: The created permission
"""
LOGGER.info('Adding permission, name = %s, roles = %s'
' session = %s', name, roles, session)
roles = [] if roles is None else roles
permission = Permission(name=name, roles=roles)
session.add(permission)
return permission
@classmethod
def add_binding(cls, session, resource, role, members):
"""Add a binding to the model.
Args:
session (object): db session
resource (str): Resource to be added in the binding
role (str): Role to be added in the binding
members (list): members to be added in the binding
Returns:
Binding: the created binding
"""
LOGGER.info('Adding a binding to the model, resource = %s,'
' role = %s, members = %s, session = %s',
resource, role, members, session)
binding = Binding(resource=resource, role=role, members=members)
session.add(binding)
return binding
@classmethod
def add_member(cls,
session,
type_name,
parent_type_names=None,
denorm=False):
"""Add a member to the model.
Args:
session (object): db session
type_name (str): type_name of the resource to add
parent_type_names (list): list of parent names to add
denorm (bool): whether to denormalize the GroupInGroup relation
Returns:
Member: the created member
Raises:
Exception: parent not found
"""
LOGGER.info('Adding a member to the model, type_name = %s,'
' parent_type_names = %s, denorm = %s, session = %s',
type_name, parent_type_names, denorm, session)
if not parent_type_names:
parent_type_names = []
res_type, name = type_name.split('/', 1)
parents = session.query(Member).filter(
Member.name.in_(parent_type_names)).all()
if len(parents) != len(parent_type_names):
msg = 'Parents: {}, expected: {}'.format(
parents, parent_type_names)
error_message = 'Parent not found, {}'.format(msg)
LOGGER.error(error_message)
raise Exception(error_message)
member = Member(name=type_name,
member_name=name,
type=res_type,
parents=parents)
session.add(member)
session.commit()
if denorm and res_type == 'group' and parents:
cls.denorm_group_in_group(session)
return member
@classmethod
def expand_resources_by_type_names(cls, session, res_type_names):
"""Expand resources by type/name format.
Args:
session (object): db session
res_type_names (list): list of resources in type_names
Returns:
dict: mapping in the form:
{res_type_name: Expansion(res_type_name), ... }
"""
res_key = aliased(Resource, name='res_key')
res_values = aliased(Resource, name='res_values')
expressions = []
for res_type_name in res_type_names:
expressions.append(and_(
res_key.type_name == res_type_name))
res = (
session.query(res_key, res_values)
.filter(res_key.type_name.in_(res_type_names))
.filter(res_values.full_name.startswith(
res_key.full_name))
.yield_per(1024)
)
mapping = collections.defaultdict(set)
for k, value in res:
mapping[k].add(value)
return mapping
@classmethod
def reverse_expand_members(cls, session, member_names,
request_graph=False):
"""Expand members to their groups.
List all groups that contains these members. Also return
the graph if requested.
Args:
session (object): db session
member_names (list): list of members to expand
request_graph (bool): wether the parent-child graph is provided
Returns:
object: set if graph not requested, set and graph if requested
"""
member_names.extend(cls.ALL_USER_MEMBERS)
members = session.query(Member).filter(
Member.name.in_(member_names)).all()
membership_graph = collections.defaultdict(set)
member_set = set()
new_member_set = set()
def add_to_sets(members, child):
"""Adds the members & children to the sets.
Args:
members (list): list of Members to be added
child (Member): child to be added
"""
for member in members:
if request_graph and child:
membership_graph[child.name].add(member.name)
if request_graph and not child:
if member.name not in membership_graph:
membership_graph[member.name] = set()
if member not in member_set:
new_member_set.add(member)
member_set.add(member)
add_to_sets(members, None)
while new_member_set:
members_to_walk = new_member_set
new_member_set = set()
for member in members_to_walk:
add_to_sets(member.parents, member)
if request_graph:
return member_set, membership_graph
return member_set
@classmethod
def expand_members_map(cls,
session,
member_names,
show_group_members=True,
member_contain_self=True):
"""Expand group membership keyed by member.
Args:
session (object): db session
member_names (set): Member names to expand
show_group_members (bool): Whether to include subgroups
member_contain_self (bool): Whether to include a parent
as its own member
Returns:
dict: <Member, set(Children)>
"""
def separate_groups(member_names):
"""Separate groups and other members in two lists.
This is a helper function. groups are needed to query on
group_in_group table
Args:
member_names (list): list of members to be separated
Returns:
tuples: two lists of strs containing groups and others
"""
groups = []
others = []
for name in member_names:
member_type = name.split('/')[0]
if member_type in cls.GROUP_TYPES:
groups.append(name)
else:
others.append(name)
return groups, others
selectables = []
group_names, other_names = separate_groups(member_names)
t_ging = GroupInGroup.__table__
t_members = group_members
# This resolves groups to its transitive non-group members.
transitive_membership = (
select([t_ging.c.parent, t_members.c.members_name])
.select_from(t_ging.join(t_members,
(t_ging.c.member ==
t_members.c.group_name)))
).where(t_ging.c.parent.in_(group_names))
if not show_group_members:
transitive_membership = transitive_membership.where(
not_(t_members.c.members_name.startswith('group/')))
selectables.append(
transitive_membership.alias('transitive_membership'))
direct_membership = (
select([t_members.c.group_name,
t_members.c.members_name])
.where(t_members.c.group_name.in_(group_names))
)
if not show_group_members:
direct_membership = direct_membership.where(
not_(t_members.c.members_name.startswith('group/')))
selectables.append(
direct_membership.alias('direct_membership'))
if show_group_members:
# Show groups as members of other groups
group_in_groups = (
select([t_ging.c.parent,
t_ging.c.member]).where(
t_ging.c.parent.in_(group_names))
)
selectables.append(
group_in_groups.alias('group_in_groups'))
# Union all the queries
qry = union(*selectables)
# Build the result dict
result = collections.defaultdict(set)
for parent, child in session.execute(qry):
result[parent].add(child)
for parent in other_names:
result[parent] = set()
# Add each parent as its own member
if member_contain_self:
for name in member_names:
result[name].add(name)
return result
@classmethod
def expand_members(cls, session, member_names):
"""Expand group membership towards the members.
Args:
session (object): db session
member_names (list): list of strs of member names
Returns:
set: expanded group members
"""
members = session.query(Member).filter(
Member.name.in_(member_names)).all()
def is_group(member):
"""Returns true iff the member is a group.
Args:
member (Member): member to check
Returns:
bool: whether the member is a group
"""
return member.type in cls.GROUP_TYPES
group_set = set()
non_group_set = set()
new_group_set = set()
def add_to_sets(members):
"""Adds new members to the sets.
Args:
members (list): members to be added
"""
for member in members:
if is_group(member):
if member not in group_set:
new_group_set.add(member)
group_set.add(member)
else:
non_group_set.add(member)
add_to_sets(members)
while new_group_set:
groups_to_walk = new_group_set
new_group_set = set()
for group in groups_to_walk:
add_to_sets(group.children)
return group_set.union(non_group_set)
@classmethod
def resource_ancestors(cls, session, resource_type_names):
"""Resolve the transitive ancestors by type/name format.
Given a group of resource and find out all their parents.
Then this method group the pairs with parent. Used to determine
resource candidates to grant access in explain denied.
Args:
session (object): db session
resource_type_names (list): list of strs, resources to query
Returns:
dict: <parent, childs> graph of the resource hierarchy
"""
resource_names = resource_type_names
resource_graph = collections.defaultdict(set)
res_childs = aliased(Resource, name='res_childs')
res_anc = aliased(Resource, name='resource_parent')
resources_set = set(resource_names)
resources_new = set(resource_names)
for resource in resources_new:
resource_graph[resource] = set()
while resources_new:
resources_new = set()
for parent, child in (
session.query(res_anc, res_childs)
.filter(res_childs.type_name.in_(resources_set))
.filter(res_childs.parent_type_name ==
res_anc.type_name)
.all()):
if parent.type_name not in resources_set:
resources_new.add(parent.type_name)
resources_set.add(parent.type_name)
resources_set.add(child.type_name)
resource_graph[parent.type_name].add(child.type_name)
return resource_graph
@classmethod
def find_resource_path(cls, session, resource_type_name):
"""Find resource ancestors by type/name format.
Find all ancestors of a resource and return them in order
Args:
session (object): db session
resource_type_name (str): resource to query
Returns:
list: list of Resources, transitive ancestors for the given
resource
"""
qry = (
session.query(Resource).filter(
Resource.type_name == resource_type_name)
)
resources = qry.all()
return cls._find_resource_path(session, resources)
@classmethod
def _find_resource_path(cls, _, resources):
"""Find the list of transitive ancestors for the given resource.
Args:
_ (object): position holder
resources (list): list of the resources to query
Returns:
list: list of Resources, transitive ancestors for the given
resource
"""
if not resources:
return []
path = []
resource = resources[0]
path.append(resource)
while resource.parent:
resource = resource.parent
path.append(resource)
return path
@classmethod
def get_roles_by_permission_names(cls, session, permission_names):
"""Return the list of roles covering the specified permissions.
Args:
session (object): db session
permission_names (list): permissions to be covered by.
Returns:
set: roles set that cover the permissions
"""
permission_set = set(permission_names)
qry = session.query(Permission)
if permission_set:
qry = qry.filter(Permission.name.in_(permission_set))
permissions = qry.all()
roles = set()
for permission in permissions:
for role in permission.roles:
roles.add(role)
result_set = set()
for role in roles:
role_permissions = set(
[p.name for p in role.permissions])
if permission_set.issubset(role_permissions):
result_set.add(role)
return result_set
@classmethod
def get_member(cls, session, name):
"""Get member by name.
Args:
session (object): db session
name (str): the name the member to query
Returns:
list: Members from the query
"""
return session.query(Member).filter(Member.name == name).all()
base.metadata.create_all(dbengine)
return sessionmaker(bind=dbengine), ModelAccess
def undefine_model(session_maker, data_access):
"""Deletes an entire model and the corresponding data in the database.
Args:
session_maker (func): session_maker function
data_access (ModelAccess): data access layer
"""
session = session_maker()
data_access.delete_all(session)
LOCK = Lock()
class ModelManager(object):
"""The Central class to create,list,get and delete models.
ModelManager is mostly used to do the lookup from model name to the
session cache which is given in each client's request.
"""
def __init__(self, dbengine):
"""Initialization
Args:
dbengine (object): Database engine
"""
self.engine = dbengine
self.modelmaker = self._create_model_session()
self.sessionmakers = {}
def _create_model_session(self):
"""Create a session to read from the models table.
Returns:
object: db session created
"""
MODEL_BASE.metadata.create_all(self.engine)
return db.ScopedSessionMaker(
sessionmaker(
bind=self.engine),
auto_commit=True)
@mutual_exclusive(LOCK)
def create(self, name):
"""Create a new model entry in the database.
Args:
name (str): model name
Returns:
str: the handle of the model
"""
LOGGER.info('Creating a new model entry in the database,'
' name = %s', name)
handle = generate_model_handle()
with self.modelmaker() as session:
utc_now = date_time.get_utc_now_datetime()
model = Model(
handle=handle,
name=name,
state='CREATED',
created_at_datetime=utc_now,
watchdog_timer_datetime=utc_now,
etag_seed=generate_model_seed(),
description='{}'
)
session.add(model)
self.sessionmakers[model.handle] = define_model(
model.handle, self.engine, model.etag_seed)
return handle
def get(self, model):
"""Get model data by handle.
Args:
model (str): model handle
Returns:
tuple: session and ModelAccess object
"""
session_maker, data_access = self._get(model)
return db.ScopedSession(session_maker()), data_access
def get_readonly_session(self):
"""Get read-only session.
Returns:
Session: The read-only session."""
return db.create_scoped_readonly_session(self.engine)
def _get(self, handle):
"""Get model data by name internal.
Args:
handle (str): the model handle
Returns:
Model: the model in the session maker
Raises:
KeyError: model handle not available
"""
if handle not in [m.handle for m in self.models()]:
error_message = 'handle={}, available={}'.format(
handle,
[m.handle for m in self.models()]
)
LOGGER.error(error_message)
raise KeyError(error_message)
try:
return self.sessionmakers[handle]
except KeyError:
LOGGER.debug('Sessionmakers doesn\'t contain handle = %s,'
' creating a new handle.', handle)
with self.modelmaker() as session:
model = (
session.query(Model).filter(Model.handle == handle).one()
)
self.sessionmakers[model.handle] = define_model(
model.handle, self.engine, model.etag_seed)
return self.sessionmakers[model.handle]
@mutual_exclusive(LOCK)
def delete(self, model_name):
"""Delete a model entry in the database by name.
Args:
model_name (str): the name of the model to be deleted
"""
LOGGER.info('Deleting model by name, model_name = %s', model_name)
_, data_access = self._get(model_name)
if model_name in self.sessionmakers:
del self.sessionmakers[model_name]
with self.modelmaker() as session:
session.query(Model).filter(Model.handle == model_name).delete()
data_access.delete_all(self.engine)
def _models(self, expunge=False):
"""Return the list of models from the database.
Args:
expunge (bool): Whether or not to detach the object from
the session for use in another session.
Returns:
list: list of Models in the db
"""
with self.modelmaker() as session:
items = session.query(Model).all()
if expunge:
session.expunge_all()
return items
def models(self):
"""Expunging wrapper for _models.
Returns:
list: list of Models in the db
"""
return self._models(expunge=True)
def model(self, model_name, expunge=True, session=None):
"""Get model from database by name.
Args:
model_name (str): Model name or handle
expunge (bool): Whether or not to detach the object from
the session for use in another session.
session (object): Database session.
Returns:
Model: the dbo of the queried model
"""
def instantiate_model(session, model_name, expunge):
"""Creates a model object by querying the database.
Args:
session (object): Database session.
model_name (str): Model name to instantiate.
expunge (bool): Whether or not to detach the object from
the session for use in another session.
Returns:
Model: the dbo of the queried model
"""
item = session.query(Model).filter(
Model.handle == model_name).one()
if expunge:
session.expunge(item)
return item
if not session:
with self.modelmaker() as scoped_session:
return instantiate_model(scoped_session, model_name, expunge)
else:
return instantiate_model(session, model_name, expunge)
def get_model(self, model, expunge=True, session=None):
"""Get model from database by name or handle.
Args:
model (str): Model name or handle
expunge (bool): Whether or not to detach the object from
the session for use in another session.
session (object): Database session.
Returns:
Model: the dbo of the queried model
"""
def query_model(session, model, expunge):
"""Get a model object by querying the database.
Args:
session (object): Database session.
model (str): Model name or handle.
expunge (bool): Whether or not to detach the object from
the session for use in another session.
Returns:
Model: the dbo of the queried model
"""
item = session.query(Model).filter(or_(
Model.handle == model,
Model.name == model)).first()
if expunge and item:
session.expunge(item)
return item
if not session:
with self.modelmaker() as scoped_session:
return query_model(scoped_session, model, expunge)
else:
return query_model(session, model, expunge)
def add_description(self, model_name, new_description, session=None):
"""Add description to a model.
Args:
model_name (str): Model name
new_description (str): The description in json format.
session (object): Database session.
"""
if not session:
with self.modelmaker() as scoped_session:
model = scoped_session.query(Model).filter(
Model.handle == model_name).one()
else:
model = session.query(Model).filter(
Model.handle == model_name).one()
model.add_description(new_description)
def get_description(self, model_name, session=None):
"""Get the description to a model.
Args:
model_name (str): Model name
session (object): Database session.
Returns:
json: Dictionary of the model description.
"""
if not session:
with self.modelmaker() as scoped_session:
model = scoped_session.query(Model).filter(
Model.handle == model_name).one()
return json.loads(model.description)
else:
model = session.query(Model).filter(
Model.handle == model_name).one()
return json.loads(model.description)
def create_engine(*args, **kwargs):
"""Create engine wrapper to patch database options.
Args:
*args (list): Arguments.
**kwargs (dict): Arguments.
Returns:
object: Engine.
"""
sqlite_enforce_fks = 'sqlite_enforce_fks'
forward_kwargs = {k: v for k, v in kwargs.iteritems()}
is_sqlite = False
for arg in args:
if 'sqlite' in arg:
is_sqlite = True
if sqlite_enforce_fks in forward_kwargs:
del forward_kwargs[sqlite_enforce_fks]
if is_sqlite:
engine = sqlalchemy_create_engine(*args, **forward_kwargs)
else:
engine = sqlalchemy_create_engine(*args,
pool_size=50,
**forward_kwargs)
dialect = engine.dialect.name
if dialect == 'sqlite':
@event.listens_for(engine, 'connect')
def do_connect(dbapi_connection, _):
"""Hooking database connect.
Args:
dbapi_connection (object): Database connection.
_ (object): Unknown.
"""
# Fix for nested transaction problems
dbapi_connection.isolation_level = None
if kwargs.get(sqlite_enforce_fks, False):
# Enable foreign key constraints
dbapi_connection.execute('pragma foreign_keys=ON')
@event.listens_for(engine, 'begin')
def do_begin(conn):
"""Hooking database transaction begin.
Args:
conn (object): Database connection.
"""
# Fix for nested transaction problems
conn.execute('BEGIN')
# pylint: disable=protected-access
engine.__explain_hooks = [do_connect, do_begin]
# pylint: enable=protected-access
return engine
def session_creator(model_name, filename=None, seed=None, echo=False):
"""Create a session maker for the model and db file.
Args:
model_name (str): the model name
filename (str): the db file to load the sqlite database
seed (str): the unique model handle
echo (bool): whether to echo all the statements
Returns:
tuple: session_maker and the ModelAccess object
"""
LOGGER.info('Creating session maker, model_name = %s, filename = %s',
model_name, filename)
if filename:
engine = create_engine('sqlite:///{}'.format(filename),
pool_recycle=POOL_RECYCLE_SECONDS)
else:
engine = create_engine('sqlite:///:memory:',
pool_recycle=POOL_RECYCLE_SECONDS, echo=echo)
if seed is None:
seed = generate_model_seed()
session_maker, data_access = define_model(model_name, engine, seed)
return session_maker, data_access
| 1 | 33,710 | Please move unrelated formatting changes to their own PR (same with the resources.py file). | forseti-security-forseti-security | py |
@@ -2171,6 +2171,7 @@ class CommandDispatcher:
window = self._tabbed_browser.window()
if window.isFullScreen():
- window.showNormal()
+ window.setWindowState(window._state_before_fullscreen & ~Qt.WindowFullScreen)
else:
+ window._state_before_fullscreen = window.windowState()
window.showFullScreen() | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
from PyQt5.QtWidgets import QApplication, QTabBar
from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configexc
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem, downloads)
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, typing)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import instances, sortfilter
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self, private):
"""Get a tabbed-browser from a new window."""
from qutebrowser.mainwindow import mainwindow
new_window = mainwindow.MainWindow(private=private)
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
explicit=True, private=None):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
private: If opening a new window, open it in private browsing mode.
If not given, inherit the current window's mode.
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window, private), 'tbwp')
if window and private is None:
private = self._tabbed_browser.private
if window or private:
tabbed_browser = self._new_tabbed_browser(private)
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, explicit=explicit)
elif background:
tabbed_browser.tabopen(url, background=True, explicit=explicit)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self, *, show_error=True):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
if not show_error:
return
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, prev, next_, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((prev, next_, opposite), 'pno')
if prev:
return QTabBar.SelectLeftTab
elif next_:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.get('tabs', 'select-on-remove')
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs->select-on-remove' set to "
"'last-used'!")
else: # pragma: no cover
raise ValueError("Invalid select-on-remove value "
"{!r}!".format(conf_selection))
return None
def _tab_close(self, tab, prev=False, next_=False, opposite=False):
"""Helper function for tab_close be able to handle message.async.
Args:
tab: Tab object to select be closed.
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
count: The tab index to close, or None
"""
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(prev, next_,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
def _tab_close_prompt_if_pinned(self, tab, force, yes_action):
"""Helper method for tab_close.
If tab is pinned, prompt. If everything is good, run yes_action.
"""
if tab.data.pinned and not force:
message.confirm_async(
title='Pinned Tab',
text="Are you sure you want to close a pinned tab?",
yes_action=yes_action, default=False)
else:
yes_action()
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, prev=False, next_=False, opposite=False,
force=False, count=None):
"""Close the current/[count]th tab.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
force: Avoid confirmation for pinned tabs.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
close = functools.partial(self._tab_close, tab, prev,
next_, opposite)
self._tab_close_prompt_if_pinned(tab, force, close)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-pin')
@cmdutils.argument('count', count=True)
def tab_pin(self, count=None):
"""Pin/Unpin the current/[count]th tab.
Pinning a tab shrinks it to tabs->pinned-width size.
Attempting to close a pinned tab will cause a confirmation,
unless --force is passed.
Args:
count: The tab index to pin or unpin, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
to_pin = not tab.data.pinned
tab_index = self._current_index() if count is None else count - 1
cmdutils.check_overflow(tab_index + 1, 'int')
self._tabbed_browser.set_tab_pinned(tab_index, to_pin)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=usertypes.Completion.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, implicit=False,
bg=False, tab=False, window=False, count=None, secure=False,
private=False):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
implicit: If opening a new tab, treat the tab as implicit (like
clicking on a link).
count: The tab index to open the URL in, or None.
secure: Force HTTPS.
private: Open a new window in private browsing mode.
"""
if url is None:
urls = [config.get('general', 'default-page')]
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if secure:
cur_url.setScheme('https')
if not window and i > 0:
tab = False
bg = True
if tab or bg or window or private:
self._open(cur_url, tab, bg, window, explicit=not implicit,
private=private)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
def _print_preview(self, tab):
"""Show a print preview."""
def print_callback(ok):
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(self, tab, filename):
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
log.misc.debug("Print to file: {}".format(filename))
def _print(self, tab):
"""Print with a QPrintDialog."""
def print_callback(ok):
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
diag = QPrintDialog(tab)
diag.open(lambda: tab.printing.to_printer(diag.printer(),
print_callback))
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
if preview:
tab.printing.check_preview_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
self._print_preview(tab)
elif pdf:
self._print_pdf(tab, pdf)
else:
self._print(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
try:
history = curtab.history.serialize()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs-are-windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.get('tabs', 'show-favicons'):
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.get('tabs', 'tabs-are-windows'):
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(history)
newtab.zoom.set_factor(curtab.zoom.factor())
new_tabbed_browser.set_tab_pinned(idx, curtab.data.pinned)
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_detach(self):
"""Detach the current tab to its own window."""
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach one tab.")
url = self._current_url()
self._open(url, window=True)
cur_widget = self._current_widget()
self._tabbed_browser.close_tab(cur_widget, add_undo=False)
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
for _ in range(count):
if forward:
if not widget.history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
widget.history.forward()
else:
if not widget.history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
widget.history.back()
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window, explicit=False)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_perc(self, perc: float = None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str = None, bottom_navigate: str = None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/qutebrowser/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = url.query()
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.get('general', 'yank-ignored-url-parameters'):
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
caret = self._current_widget().caret
s = caret.selection()
if not caret.has_selection() or not s:
message.info("Nothing to yank")
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.leave(self._win_id, KeyMode.caret, "yank selected",
maybe=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(perc), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(perc), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
if zoom is not None:
try:
zoom = int(zoom.rstrip('%'))
except ValueError:
raise cmdexc.CommandError("zoom: Invalid int value {}"
.format(zoom))
level = count if count is not None else zoom
if level is None:
level = config.get('ui', 'default-zoom')
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(level), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, prev=False, next_=False, force=False):
"""Close all tabs except for the current one.
Args:
prev: Keep tabs before the current.
next_: Keep tabs after the current.
force: Avoid confirmation for pinned tabs.
"""
cmdutils.check_exclusive((prev, next_), 'pn')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
def _to_close(i):
"""Helper method to check if a tab should be closed or not."""
return not (i == cur_idx or
(prev and i < cur_idx) or
(next_ and i > cur_idx))
# Check to see if we are closing any pinned tabs
if not force:
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i) and tab.data.pinned:
self._tab_close_prompt_if_pinned(
tab, force,
lambda: self.tab_only(
prev=prev, next_=next_, force=True))
return
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i):
self._tabbed_browser.close_tab(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open a closed tab."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated="Use :open {clipboard}")
def paste(self, sel=False, tab=False, bg=False, window=False):
"""Open a page from the clipboard.
If the pasted text contains newlines, each line gets opened in its own
tab.
Args:
sel: Use the primary selection instead of the clipboard.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in new window.
"""
force_search = False
if not utils.supports_selection():
sel = False
try:
text = utils.get_clipboard(selection=sel)
except utils.ClipboardError as e:
raise cmdexc.CommandError(e)
text_urls = [u for u in text.split('\n') if u.strip()]
if (len(text_urls) > 1 and not urlutils.is_url(text_urls[0]) and
urlutils.get_path_if_valid(
text_urls[0], check_exists=True) is None):
force_search = True
text_urls = [text]
for i, text_url in enumerate(text_urls):
if not window and i > 0:
tab = False
bg = True
try:
url = urlutils.fuzzy_url(text_url, force_search=force_search)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=usertypes.Completion.tab)
def buffer(self, index):
"""Select tab by index or url/title best match.
Focuses window if necessary.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = instances.get(usertypes.Completion.tab)
sf = sortfilter.CompletionFilterModel(source=model)
sf.set_pattern(index)
if sf.count() > 0:
index = sf.data(sf.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
window = objreg.window_registry[win_id]
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentIndex(idx-1)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int]=None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
"""
index = count if count is not None else index
if index == 'last':
self._tab_focus_last()
return
elif index == self._current_index() + 1:
self._tab_focus_last(show_error=False)
return
elif index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int]=None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.get('tabs', 'wrap'):
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
cur_idx = self._current_index()
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self._run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.get('general', 'startpage')[0])
def _run_userscript(self, cmd, *args, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
tab = self._tabbed_browser.currentWidget()
if tab is not None and tab.caret.has_selection():
env['QUTE_SELECTED_TEXT'] = tab.caret.selection()
try:
env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True)
except browsertab.UnsupportedOperationError:
pass
# FIXME:qtwebengine: If tab is None, run_async will fail!
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name',
completion=usertypes.Completion.quickmark_by_name)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name',
completion=usertypes.Completion.quickmark_by_name)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If None, use url of current page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if url is None:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}" if was_added else "Removed bookmark {}"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('dest_old', hide=True)
def download(self, url=None, dest_old=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
The form `:download [url] [dest]` is deprecated, use `:download --dest
[dest] [url]` instead.
Args:
url: The URL to download. If not given, download the current page.
dest_old: (deprecated) Same as dest.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
if dest_old is not None:
message.warning(":download [url] [dest] is deprecated - use "
":download --dest [dest] [url]")
if dest is not None:
raise cmdexc.CommandError("Can't give two destinations for the"
" download.")
dest = dest_old
# FIXME:qtwebengine do this with the QtWebEngine download manager?
download_manager = objreg.get('qtnetwork-download-manager',
scope='window', window=self._win_id)
target = None
if dest is not None:
dest = downloads.transform_path(dest)
if dest is None:
raise cmdexc.CommandError("Invalid target filename")
target = downloads.FileDownloadTarget(dest)
tab = self._current_widget()
user_agent = tab.user_agent()
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, user_agent=user_agent, target=target)
elif mhtml_:
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebEngine:
webengine_download_manager = objreg.get(
'webengine-download-manager')
try:
webengine_download_manager.get_mhtml(tab, target)
except browsertab.UnsupportedOperationError as e:
raise cmdexc.CommandError(e)
else:
download_manager.get_mhtml(tab, target)
else:
qnam = tab.networkaccessmanager()
download_manager.get(self._current_url(), user_agent=user_agent,
qnam=qnam, target=target)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page in a new tab."""
# pylint: disable=no-member
# WORKAROUND for https://bitbucket.org/logilab/pylint/issue/491/
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
def show_source_cb(source):
"""Show source as soon as it's ready."""
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table',
title='Source for {}'.format(current_url.toDisplayString()))
highlighted = pygments.highlight(source, lexer, formatter)
new_tab = self._tabbed_browser.tabopen()
new_tab.set_html(highlighted)
new_tab.data.viewing_source = True
tab.dump_async(show_source_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', scope='window')
def history(self, tab=True, bg=False, window=False):
"""Show browsing history.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
url = QUrl('qute://history/')
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=usertypes.Completion.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__\->__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif '->' in topic:
parts = topic.split('->')
if len(parts) != 2:
raise cmdexc.CommandError("Invalid help topic {}!".format(
topic))
try:
config.get(*parts)
except configexc.NoSectionError:
raise cmdexc.CommandError("Invalid section {}!".format(
parts[0]))
except configexc.NoOptionError:
raise cmdexc.CommandError("Invalid option {}!".format(
parts[1]))
path = 'settings.html#{}'.format(topic.replace('->', '-'))
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='info', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.value()
if text is None:
message.error("Could not get text from the focused element.")
return
assert isinstance(text, str), text
ed = editor.ExternalEditor(self._tabbed_browser)
ed.editing_finished.connect(functools.partial(
self.on_editing_finished, elem))
ed.edit(text)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`general -> editor` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_value(text)
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher',
deprecated="Use :insert-text {primary}",
modes=[KeyMode.insert], hide=True, scope='window',
backend=usertypes.Backend.QtWebKit)
def paste_primary(self):
"""Paste the primary selection at cursor position."""
self.insert_text(utils.get_clipboard(selection=True, fallback=True))
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget=
usertypes.ClickTarget.normal,
force_event=False):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
tab.search.clear()
if not text:
return
options = {
'ignore_case': config.get('general', 'ignore-case'),
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
for _ in range(count):
try:
tab.action.run_string(action)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, file=False, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int]=None):
"""Evaluate a JavaScript string.
Args:
js_code: The string/file to evaluate.
file: Interpret js-code as a path to a file.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
if file:
try:
with open(js_code, 'r', encoding='utf-8') as f:
js_code = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True, backend=usertypes.Backend.QtWebKit)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`general -> editor` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.editing_finished.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
if bg or tab or window or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fullscreen(self, leave=False):
"""Toggle fullscreen mode.
Args:
leave: Only leave fullscreen if it was entered by the page.
"""
if leave:
tab = self._current_widget()
try:
tab.action.exit_fullscreen()
except browsertab.UnsupportedOperationError:
pass
return
window = self._tabbed_browser.window()
if window.isFullScreen():
window.showNormal()
else:
window.showFullScreen()
| 1 | 18,061 | Make this a public attribute (remove the leading `_`) if you want to access it from the outside. | qutebrowser-qutebrowser | py |
@@ -116,7 +116,7 @@ define(['layoutManager', 'browser', 'actionsheet', 'css!./emby-select', 'registe
inputId++;
}
- if (!browser.firefox) {
+ if (browser) {
this.classList.add('emby-select-withcolor');
if (layoutManager.tv) { | 1 | define(['layoutManager', 'browser', 'actionsheet', 'css!./emby-select', 'registerElement'], function (layoutManager, browser, actionsheet) {
'use strict';
var EmbySelectPrototype = Object.create(HTMLSelectElement.prototype);
function enableNativeMenu() {
if (browser.edgeUwp || browser.xboxOne) {
return true;
}
// Doesn't seem to work at all
if (browser.tizen || browser.orsay || browser.web0s) {
return false;
}
// Take advantage of the native input methods
if (browser.tv) {
return true;
}
if (layoutManager.tv) {
return false;
}
return true;
}
function triggerChange(select) {
var evt = document.createEvent("HTMLEvents");
evt.initEvent("change", false, true);
select.dispatchEvent(evt);
}
function setValue(select, value) {
select.value = value;
}
function showActionSheet(select) {
var labelElem = getLabel(select);
var title = labelElem ? (labelElem.textContent || labelElem.innerText) : null;
actionsheet.show({
items: select.options,
positionTo: select,
title: title
}).then(function (value) {
setValue(select, value);
triggerChange(select);
});
}
function getLabel(select) {
var elem = select.previousSibling;
while (elem && elem.tagName !== 'LABEL') {
elem = elem.previousSibling;
}
return elem;
}
function onFocus(e) {
var label = getLabel(this);
if (label) {
label.classList.add('selectLabelFocused');
}
}
function onBlur(e) {
var label = getLabel(this);
if (label) {
label.classList.remove('selectLabelFocused');
}
}
function onMouseDown(e) {
// e.button=0 for primary (left) mouse button click
if (!e.button && !enableNativeMenu()) {
e.preventDefault();
showActionSheet(this);
}
}
function onKeyDown(e) {
switch (e.keyCode) {
case 13:
if (!enableNativeMenu()) {
e.preventDefault();
showActionSheet(this);
}
return;
case 37:
case 38:
case 39:
case 40:
if (layoutManager.tv) {
e.preventDefault();
}
return;
default:
break;
}
}
var inputId = 0;
EmbySelectPrototype.createdCallback = function () {
if (!this.id) {
this.id = 'embyselect' + inputId;
inputId++;
}
if (!browser.firefox) {
this.classList.add('emby-select-withcolor');
if (layoutManager.tv) {
this.classList.add('emby-select-tv-withcolor');
}
}
if (layoutManager.tv) {
this.classList.add('emby-select-focusscale');
}
this.addEventListener('mousedown', onMouseDown);
this.addEventListener('keydown', onKeyDown);
this.addEventListener('focus', onFocus);
this.addEventListener('blur', onBlur);
};
EmbySelectPrototype.attachedCallback = function () {
if (this.classList.contains('emby-select')) {
return;
}
this.classList.add('emby-select');
var label = this.ownerDocument.createElement('label');
label.innerHTML = this.getAttribute('label') || '';
label.classList.add('selectLabel');
label.htmlFor = this.id;
this.parentNode.insertBefore(label, this);
if (this.classList.contains('emby-select-withcolor')) {
this.parentNode.insertAdjacentHTML('beforeend', '<div class="selectArrowContainer"><div style="visibility:hidden;">0</div><i class="selectArrow md-icon"></i></div>');
}
};
EmbySelectPrototype.setLabel = function (text) {
var label = this.parentNode.querySelector('label');
label.innerHTML = text;
};
document.registerElement('emby-select', {
prototype: EmbySelectPrototype,
extends: 'select'
});
}); | 1 | 11,386 | this conditional is useless now so it should be removed | jellyfin-jellyfin-web | js |
@@ -262,8 +262,8 @@ void AccessFlatBufferTest(const uint8_t *flatbuf, size_t length,
auto vecofstructs = monster->testarrayofsortedstruct();
if (vecofstructs) { // not filled in monster_test.bfbs
for (size_t i = 0; i < vecofstructs->size()-1; i++) {
- auto left = vecofstructs->Get(i);
- auto right = vecofstructs->Get(i+1);
+ auto left = vecofstructs->Get(static_cast<flatbuffers::uoffset_t>(i));
+ auto right = vecofstructs->Get(static_cast<flatbuffers::uoffset_t>(i+1));
TEST_EQ(true, (left->KeyCompareLessThan(right)));
}
TEST_NOTNULL(vecofstructs->LookupByKey(3)); | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include "monster_test_generated.h"
#include "namespace_test/namespace_test1_generated.h"
#include "namespace_test/namespace_test2_generated.h"
#include "union_vector/union_vector_generated.h"
#ifndef FLATBUFFERS_CPP98_STL
#include <random>
#endif
#include "flatbuffers/flexbuffers.h"
using namespace MyGame::Example;
#ifdef __ANDROID__
#include <android/log.h>
#define TEST_OUTPUT_LINE(...) \
__android_log_print(ANDROID_LOG_INFO, "FlatBuffers", __VA_ARGS__)
#define FLATBUFFERS_NO_FILE_TESTS
#else
#define TEST_OUTPUT_LINE(...) \
{ printf(__VA_ARGS__); printf("\n"); }
#endif
int testing_fails = 0;
void TestFail(const char *expval, const char *val, const char *exp,
const char *file, int line) {
TEST_OUTPUT_LINE("TEST FAILED: %s:%d, %s (%s) != %s", file, line,
exp, expval, val);
assert(0);
testing_fails++;
}
void TestEqStr(const char *expval, const char *val, const char *exp,
const char *file, int line) {
if (strcmp(expval, val) != 0) {
TestFail(expval, val, exp, file, line);
}
}
template<typename T, typename U>
void TestEq(T expval, U val, const char *exp, const char *file, int line) {
if (U(expval) != val) {
TestFail(flatbuffers::NumToString(expval).c_str(),
flatbuffers::NumToString(val).c_str(),
exp, file, line);
}
}
#define TEST_EQ(exp, val) TestEq(exp, val, #exp, __FILE__, __LINE__)
#define TEST_NOTNULL(exp) TestEq(exp == NULL, false, #exp, __FILE__, __LINE__)
#define TEST_EQ_STR(exp, val) TestEqStr(exp, val, #exp, __FILE__, __LINE__)
// Include simple random number generator to ensure results will be the
// same cross platform.
// http://en.wikipedia.org/wiki/Park%E2%80%93Miller_random_number_generator
uint32_t lcg_seed = 48271;
uint32_t lcg_rand() {
return lcg_seed = ((uint64_t)lcg_seed * 279470273UL) % 4294967291UL;
}
void lcg_reset() { lcg_seed = 48271; }
// example of how to build up a serialized buffer algorithmically:
flatbuffers::unique_ptr_t CreateFlatBufferTest(std::string &buffer) {
flatbuffers::FlatBufferBuilder builder;
auto vec = Vec3(1, 2, 3, 0, Color_Red, Test(10, 20));
auto name = builder.CreateString("MyMonster");
unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
auto inventory = builder.CreateVector(inv_data, 10);
// Alternatively, create the vector first, and fill in data later:
// unsigned char *inv_buf = nullptr;
// auto inventory = builder.CreateUninitializedVector<unsigned char>(
// 10, &inv_buf);
// memcpy(inv_buf, inv_data, 10);
Test tests[] = { Test(10, 20), Test(30, 40) };
auto testv = builder.CreateVectorOfStructs(tests, 2);
// create monster with very few fields set:
// (same functionality as CreateMonster below, but sets fields manually)
flatbuffers::Offset<Monster> mlocs[3];
auto fred = builder.CreateString("Fred");
auto barney = builder.CreateString("Barney");
auto wilma = builder.CreateString("Wilma");
MonsterBuilder mb1(builder);
mb1.add_name(fred);
mlocs[0] = mb1.Finish();
MonsterBuilder mb2(builder);
mb2.add_name(barney);
mb2.add_hp(1000);
mlocs[1] = mb2.Finish();
MonsterBuilder mb3(builder);
mb3.add_name(wilma);
mlocs[2] = mb3.Finish();
// Create an array of strings. Also test string pooling, and lambdas.
const char *names[] = { "bob", "fred", "bob", "fred" };
auto vecofstrings =
builder.CreateVector<flatbuffers::Offset<flatbuffers::String>>(4,
[&](size_t i) {
return builder.CreateSharedString(names[i]);
});
// Creating vectors of strings in one convenient call.
std::vector<std::string> names2;
names2.push_back("jane");
names2.push_back("mary");
auto vecofstrings2 = builder.CreateVectorOfStrings(names2);
// Create an array of sorted tables, can be used with binary search when read:
auto vecoftables = builder.CreateVectorOfSortedTables(mlocs, 3);
// Create an array of sorted structs,
// can be used with binary search when read:
std::vector<Ability> abilities;
abilities.push_back(Ability(4, 40));
abilities.push_back(Ability(3, 30));
abilities.push_back(Ability(2, 20));
abilities.push_back(Ability(1, 10));
auto vecofstructs = builder.CreateVectorOfSortedStructs(&abilities);
// shortcut for creating monster with all fields set:
auto mloc = CreateMonster(builder, &vec, 150, 80, name, inventory, Color_Blue,
Any_Monster, mlocs[1].Union(), // Store a union.
testv, vecofstrings, vecoftables, 0, 0, 0, false,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3.14159f, 3.0f, 0.0f,
vecofstrings2, vecofstructs);
FinishMonsterBuffer(builder, mloc);
#ifdef FLATBUFFERS_TEST_VERBOSE
// print byte data for debugging:
auto p = builder.GetBufferPointer();
for (flatbuffers::uoffset_t i = 0; i < builder.GetSize(); i++)
printf("%d ", p[i]);
#endif
// return the buffer for the caller to use.
auto bufferpointer =
reinterpret_cast<const char *>(builder.GetBufferPointer());
buffer.assign(bufferpointer, bufferpointer + builder.GetSize());
return builder.ReleaseBufferPointer();
}
// example of accessing a buffer loaded in memory:
void AccessFlatBufferTest(const uint8_t *flatbuf, size_t length,
bool pooled = true) {
// First, verify the buffers integrity (optional)
flatbuffers::Verifier verifier(flatbuf, length);
TEST_EQ(VerifyMonsterBuffer(verifier), true);
std::vector<uint8_t> test_buff;
test_buff.resize(length * 2);
std::memcpy(&test_buff[0], flatbuf , length);
std::memcpy(&test_buff[length], flatbuf , length);
flatbuffers::Verifier verifierl(&test_buff[0], length - 1);
TEST_EQ(VerifyMonsterBuffer(verifierl), false);
TEST_EQ(verifierl.GetComputedSize(), 0);
flatbuffers::Verifier verifier1(&test_buff[0], length);
TEST_EQ(VerifyMonsterBuffer(verifier1), true);
TEST_EQ(verifier1.GetComputedSize(), length);
flatbuffers::Verifier verifier2(&test_buff[length], length);
TEST_EQ(VerifyMonsterBuffer(verifier2), true);
TEST_EQ(verifier2.GetComputedSize(), length);
TEST_EQ(strcmp(MonsterIdentifier(), "MONS"), 0);
TEST_EQ(MonsterBufferHasIdentifier(flatbuf), true);
TEST_EQ(strcmp(MonsterExtension(), "mon"), 0);
// Access the buffer from the root.
auto monster = GetMonster(flatbuf);
TEST_EQ(monster->hp(), 80);
TEST_EQ(monster->mana(), 150); // default
TEST_EQ_STR(monster->name()->c_str(), "MyMonster");
// Can't access the following field, it is deprecated in the schema,
// which means accessors are not generated:
// monster.friendly()
auto pos = monster->pos();
TEST_NOTNULL(pos);
TEST_EQ(pos->z(), 3);
TEST_EQ(pos->test3().a(), 10);
TEST_EQ(pos->test3().b(), 20);
auto inventory = monster->inventory();
TEST_EQ(VectorLength(inventory), 10UL); // Works even if inventory is null.
TEST_NOTNULL(inventory);
unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
for (auto it = inventory->begin(); it != inventory->end(); ++it)
TEST_EQ(*it, inv_data[it - inventory->begin()]);
TEST_EQ(monster->color(), Color_Blue);
// Example of accessing a union:
TEST_EQ(monster->test_type(), Any_Monster); // First make sure which it is.
auto monster2 = reinterpret_cast<const Monster *>(monster->test());
TEST_NOTNULL(monster2);
TEST_EQ_STR(monster2->name()->c_str(), "Fred");
// Example of accessing a vector of strings:
auto vecofstrings = monster->testarrayofstring();
TEST_EQ(vecofstrings->Length(), 4U);
TEST_EQ_STR(vecofstrings->Get(0)->c_str(), "bob");
TEST_EQ_STR(vecofstrings->Get(1)->c_str(), "fred");
if (pooled) {
// These should have pointer equality because of string pooling.
TEST_EQ(vecofstrings->Get(0)->c_str(), vecofstrings->Get(2)->c_str());
TEST_EQ(vecofstrings->Get(1)->c_str(), vecofstrings->Get(3)->c_str());
}
auto vecofstrings2 = monster->testarrayofstring2();
if (vecofstrings2) {
TEST_EQ(vecofstrings2->Length(), 2U);
TEST_EQ_STR(vecofstrings2->Get(0)->c_str(), "jane");
TEST_EQ_STR(vecofstrings2->Get(1)->c_str(), "mary");
}
// Example of accessing a vector of tables:
auto vecoftables = monster->testarrayoftables();
TEST_EQ(vecoftables->Length(), 3U);
for (auto it = vecoftables->begin(); it != vecoftables->end(); ++it)
TEST_EQ(strlen(it->name()->c_str()) >= 4, true);
TEST_EQ_STR(vecoftables->Get(0)->name()->c_str(), "Barney");
TEST_EQ(vecoftables->Get(0)->hp(), 1000);
TEST_EQ_STR(vecoftables->Get(1)->name()->c_str(), "Fred");
TEST_EQ_STR(vecoftables->Get(2)->name()->c_str(), "Wilma");
TEST_NOTNULL(vecoftables->LookupByKey("Barney"));
TEST_NOTNULL(vecoftables->LookupByKey("Fred"));
TEST_NOTNULL(vecoftables->LookupByKey("Wilma"));
// Test accessing a vector of sorted structs
auto vecofstructs = monster->testarrayofsortedstruct();
if (vecofstructs) { // not filled in monster_test.bfbs
for (size_t i = 0; i < vecofstructs->size()-1; i++) {
auto left = vecofstructs->Get(i);
auto right = vecofstructs->Get(i+1);
TEST_EQ(true, (left->KeyCompareLessThan(right)));
}
TEST_NOTNULL(vecofstructs->LookupByKey(3));
TEST_EQ(static_cast<const Ability*>(nullptr), vecofstructs->LookupByKey(5));
}
// Since Flatbuffers uses explicit mechanisms to override the default
// compiler alignment, double check that the compiler indeed obeys them:
// (Test consists of a short and byte):
TEST_EQ(flatbuffers::AlignOf<Test>(), 2UL);
TEST_EQ(sizeof(Test), 4UL);
auto tests = monster->test4();
TEST_NOTNULL(tests);
auto test_0 = tests->Get(0);
auto test_1 = tests->Get(1);
TEST_EQ(test_0->a(), 10);
TEST_EQ(test_0->b(), 20);
TEST_EQ(test_1->a(), 30);
TEST_EQ(test_1->b(), 40);
for (auto it = tests->begin(); it != tests->end(); ++it) {
TEST_EQ(it->a() == 10 || it->a() == 30, true); // Just testing iterators.
}
// Checking for presence of fields:
TEST_EQ(flatbuffers::IsFieldPresent(monster, Monster::VT_HP), true);
TEST_EQ(flatbuffers::IsFieldPresent(monster, Monster::VT_MANA), false);
// Obtaining a buffer from a root:
TEST_EQ(GetBufferStartFromRootPointer(monster), flatbuf);
}
// Change a FlatBuffer in-place, after it has been constructed.
void MutateFlatBuffersTest(uint8_t *flatbuf, std::size_t length) {
// Get non-const pointer to root.
auto monster = GetMutableMonster(flatbuf);
// Each of these tests mutates, then tests, then set back to the original,
// so we can test that the buffer in the end still passes our original test.
auto hp_ok = monster->mutate_hp(10);
TEST_EQ(hp_ok, true); // Field was present.
TEST_EQ(monster->hp(), 10);
// Mutate to default value
auto hp_ok_default = monster->mutate_hp(100);
TEST_EQ(hp_ok_default, true); // Field was present.
TEST_EQ(monster->hp(), 100);
// Test that mutate to default above keeps field valid for further mutations
auto hp_ok_2 = monster->mutate_hp(20);
TEST_EQ(hp_ok_2, true);
TEST_EQ(monster->hp(), 20);
monster->mutate_hp(80);
// Monster originally at 150 mana (default value)
auto mana_default_ok = monster->mutate_mana(150); // Mutate to default value.
TEST_EQ(mana_default_ok, true); // Mutation should succeed, because default value.
TEST_EQ(monster->mana(), 150);
auto mana_ok = monster->mutate_mana(10);
TEST_EQ(mana_ok, false); // Field was NOT present, because default value.
TEST_EQ(monster->mana(), 150);
// Mutate structs.
auto pos = monster->mutable_pos();
auto test3 = pos->mutable_test3(); // Struct inside a struct.
test3.mutate_a(50); // Struct fields never fail.
TEST_EQ(test3.a(), 50);
test3.mutate_a(10);
// Mutate vectors.
auto inventory = monster->mutable_inventory();
inventory->Mutate(9, 100);
TEST_EQ(inventory->Get(9), 100);
inventory->Mutate(9, 9);
auto tables = monster->mutable_testarrayoftables();
auto first = tables->GetMutableObject(0);
TEST_EQ(first->hp(), 1000);
first->mutate_hp(0);
TEST_EQ(first->hp(), 0);
first->mutate_hp(1000);
// Run the verifier and the regular test to make sure we didn't trample on
// anything.
AccessFlatBufferTest(flatbuf, length);
}
// Unpack a FlatBuffer into objects.
void ObjectFlatBuffersTest(uint8_t *flatbuf) {
// Optional: we can specify resolver and rehasher functions to turn hashed
// strings into object pointers and back, to implement remote references
// and such.
auto resolver = flatbuffers::resolver_function_t(
[](void **pointer_adr, flatbuffers::hash_value_t hash) {
(void)pointer_adr;
(void)hash;
// Don't actually do anything, leave variable null.
});
auto rehasher = flatbuffers::rehasher_function_t(
[](void *pointer) -> flatbuffers::hash_value_t {
(void)pointer;
return 0;
});
// Turn a buffer into C++ objects.
auto monster1 = UnPackMonster(flatbuf, &resolver);
// Re-serialize the data.
flatbuffers::FlatBufferBuilder fbb1;
fbb1.Finish(CreateMonster(fbb1, monster1.get(), &rehasher),
MonsterIdentifier());
// Unpack again, and re-serialize again.
auto monster2 = UnPackMonster(fbb1.GetBufferPointer(), &resolver);
flatbuffers::FlatBufferBuilder fbb2;
fbb2.Finish(CreateMonster(fbb2, monster2.get(), &rehasher),
MonsterIdentifier());
// Now we've gone full round-trip, the two buffers should match.
auto len1 = fbb1.GetSize();
auto len2 = fbb2.GetSize();
TEST_EQ(len1, len2);
TEST_EQ(memcmp(fbb1.GetBufferPointer(), fbb2.GetBufferPointer(),
len1), 0);
// Test it with the original buffer test to make sure all data survived.
AccessFlatBufferTest(fbb2.GetBufferPointer(), len2, false);
// Test accessing fields, similar to AccessFlatBufferTest above.
TEST_EQ(monster2->hp, 80);
TEST_EQ(monster2->mana, 150); // default
TEST_EQ_STR(monster2->name.c_str(), "MyMonster");
auto &pos = monster2->pos;
TEST_NOTNULL(pos);
TEST_EQ(pos->z(), 3);
TEST_EQ(pos->test3().a(), 10);
TEST_EQ(pos->test3().b(), 20);
auto &inventory = monster2->inventory;
TEST_EQ(inventory.size(), 10UL);
unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
for (auto it = inventory.begin(); it != inventory.end(); ++it)
TEST_EQ(*it, inv_data[it - inventory.begin()]);
TEST_EQ(monster2->color, Color_Blue);
auto monster3 = monster2->test.AsMonster();
TEST_NOTNULL(monster3);
TEST_EQ_STR(monster3->name.c_str(), "Fred");
auto &vecofstrings = monster2->testarrayofstring;
TEST_EQ(vecofstrings.size(), 4U);
TEST_EQ_STR(vecofstrings[0].c_str(), "bob");
TEST_EQ_STR(vecofstrings[1].c_str(), "fred");
auto &vecofstrings2 = monster2->testarrayofstring2;
TEST_EQ(vecofstrings2.size(), 2U);
TEST_EQ_STR(vecofstrings2[0].c_str(), "jane");
TEST_EQ_STR(vecofstrings2[1].c_str(), "mary");
auto &vecoftables = monster2->testarrayoftables;
TEST_EQ(vecoftables.size(), 3U);
TEST_EQ_STR(vecoftables[0]->name.c_str(), "Barney");
TEST_EQ(vecoftables[0]->hp, 1000);
TEST_EQ_STR(vecoftables[1]->name.c_str(), "Fred");
TEST_EQ_STR(vecoftables[2]->name.c_str(), "Wilma");
auto &tests = monster2->test4;
TEST_EQ(tests[0].a(), 10);
TEST_EQ(tests[0].b(), 20);
TEST_EQ(tests[1].a(), 30);
TEST_EQ(tests[1].b(), 40);
}
// Prefix a FlatBuffer with a size field.
void SizePrefixedTest() {
// Create size prefixed buffer.
flatbuffers::FlatBufferBuilder fbb;
fbb.FinishSizePrefixed(CreateMonster(fbb, 0, 200, 300,
fbb.CreateString("bob")));
// Verify it.
flatbuffers::Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize());
TEST_EQ(verifier.VerifySizePrefixedBuffer<Monster>(nullptr), true);
// Access it.
auto m = flatbuffers::GetSizePrefixedRoot<MyGame::Example::Monster>(
fbb.GetBufferPointer());
TEST_EQ(m->mana(), 200);
TEST_EQ(m->hp(), 300);
TEST_EQ_STR(m->name()->c_str(), "bob");
}
// example of parsing text straight into a buffer, and generating
// text back from it:
void ParseAndGenerateTextTest() {
// load FlatBuffer schema (.fbs) and JSON from disk
std::string schemafile;
std::string jsonfile;
TEST_EQ(flatbuffers::LoadFile(
"tests/monster_test.fbs", false, &schemafile), true);
TEST_EQ(flatbuffers::LoadFile(
"tests/monsterdata_test.golden", false, &jsonfile), true);
// parse schema first, so we can use it to parse the data after
flatbuffers::Parser parser;
const char *include_directories[] = { "tests", nullptr };
TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true);
TEST_EQ(parser.Parse(jsonfile.c_str(), include_directories), true);
// here, parser.builder_ contains a binary buffer that is the parsed data.
// First, verify it, just in case:
flatbuffers::Verifier verifier(parser.builder_.GetBufferPointer(),
parser.builder_.GetSize());
TEST_EQ(VerifyMonsterBuffer(verifier), true);
// to ensure it is correct, we now generate text back from the binary,
// and compare the two:
std::string jsongen;
auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
if (jsongen != jsonfile) {
printf("%s----------------\n%s", jsongen.c_str(), jsonfile.c_str());
TEST_NOTNULL(NULL);
}
}
void ReflectionTest(uint8_t *flatbuf, size_t length) {
// Load a binary schema.
std::string bfbsfile;
TEST_EQ(flatbuffers::LoadFile(
"tests/monster_test.bfbs", true, &bfbsfile), true);
// Verify it, just in case:
flatbuffers::Verifier verifier(
reinterpret_cast<const uint8_t *>(bfbsfile.c_str()), bfbsfile.length());
TEST_EQ(reflection::VerifySchemaBuffer(verifier), true);
// Make sure the schema is what we expect it to be.
auto &schema = *reflection::GetSchema(bfbsfile.c_str());
auto root_table = schema.root_table();
TEST_EQ_STR(root_table->name()->c_str(), "MyGame.Example.Monster");
auto fields = root_table->fields();
auto hp_field_ptr = fields->LookupByKey("hp");
TEST_NOTNULL(hp_field_ptr);
auto &hp_field = *hp_field_ptr;
TEST_EQ_STR(hp_field.name()->c_str(), "hp");
TEST_EQ(hp_field.id(), 2);
TEST_EQ(hp_field.type()->base_type(), reflection::Short);
auto friendly_field_ptr = fields->LookupByKey("friendly");
TEST_NOTNULL(friendly_field_ptr);
TEST_NOTNULL(friendly_field_ptr->attributes());
TEST_NOTNULL(friendly_field_ptr->attributes()->LookupByKey("priority"));
// Make sure the table index is what we expect it to be.
auto pos_field_ptr = fields->LookupByKey("pos");
TEST_NOTNULL(pos_field_ptr);
TEST_EQ(pos_field_ptr->type()->base_type(), reflection::Obj);
auto pos_table_ptr = schema.objects()->Get(pos_field_ptr->type()->index());
TEST_NOTNULL(pos_table_ptr);
TEST_EQ_STR(pos_table_ptr->name()->c_str(), "MyGame.Example.Vec3");
// Now use it to dynamically access a buffer.
auto &root = *flatbuffers::GetAnyRoot(flatbuf);
// Verify the buffer first using reflection based verification
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), flatbuf, length),
true);
auto hp = flatbuffers::GetFieldI<uint16_t>(root, hp_field);
TEST_EQ(hp, 80);
// Rather than needing to know the type, we can also get the value of
// any field as an int64_t/double/string, regardless of what it actually is.
auto hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 80);
auto hp_double = flatbuffers::GetAnyFieldF(root, hp_field);
TEST_EQ(hp_double, 80.0);
auto hp_string = flatbuffers::GetAnyFieldS(root, hp_field, &schema);
TEST_EQ_STR(hp_string.c_str(), "80");
// Get struct field through reflection
auto pos_struct = flatbuffers::GetFieldStruct(root, *pos_field_ptr);
TEST_NOTNULL(pos_struct);
TEST_EQ(flatbuffers::GetAnyFieldF(
*pos_struct, *pos_table_ptr->fields()->LookupByKey("z")), 3.0f);
auto test3_field = pos_table_ptr->fields()->LookupByKey("test3");
auto test3_struct = flatbuffers::GetFieldStruct(*pos_struct, *test3_field);
TEST_NOTNULL(test3_struct);
auto test3_object = schema.objects()->Get(test3_field->type()->index());
TEST_EQ(flatbuffers::GetAnyFieldF(
*test3_struct, *test3_object->fields()->LookupByKey("a")), 10);
// We can also modify it.
flatbuffers::SetField<uint16_t>(&root, hp_field, 200);
hp = flatbuffers::GetFieldI<uint16_t>(root, hp_field);
TEST_EQ(hp, 200);
// We can also set fields generically:
flatbuffers::SetAnyFieldI(&root, hp_field, 300);
hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 300);
flatbuffers::SetAnyFieldF(&root, hp_field, 300.5);
hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 300);
flatbuffers::SetAnyFieldS(&root, hp_field, "300");
hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 300);
// Test buffer is valid after the modifications
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), flatbuf, length),
true);
// Reset it, for further tests.
flatbuffers::SetField<uint16_t>(&root, hp_field, 80);
// More advanced functionality: changing the size of items in-line!
// First we put the FlatBuffer inside an std::vector.
std::vector<uint8_t> resizingbuf(flatbuf, flatbuf + length);
// Find the field we want to modify.
auto &name_field = *fields->LookupByKey("name");
// Get the root.
// This time we wrap the result from GetAnyRoot in a smartpointer that
// will keep rroot valid as resizingbuf resizes.
auto rroot = flatbuffers::piv(flatbuffers::GetAnyRoot(resizingbuf.data()),
resizingbuf);
SetString(schema, "totally new string", GetFieldS(**rroot, name_field),
&resizingbuf);
// Here resizingbuf has changed, but rroot is still valid.
TEST_EQ_STR(GetFieldS(**rroot, name_field)->c_str(), "totally new string");
// Now lets extend a vector by 100 elements (10 -> 110).
auto &inventory_field = *fields->LookupByKey("inventory");
auto rinventory = flatbuffers::piv(
flatbuffers::GetFieldV<uint8_t>(**rroot, inventory_field),
resizingbuf);
flatbuffers::ResizeVector<uint8_t>(schema, 110, 50, *rinventory,
&resizingbuf);
// rinventory still valid, so lets read from it.
TEST_EQ(rinventory->Get(10), 50);
// For reflection uses not covered already, there is a more powerful way:
// we can simply generate whatever object we want to add/modify in a
// FlatBuffer of its own, then add that to an existing FlatBuffer:
// As an example, let's add a string to an array of strings.
// First, find our field:
auto &testarrayofstring_field = *fields->LookupByKey("testarrayofstring");
// Find the vector value:
auto rtestarrayofstring = flatbuffers::piv(
flatbuffers::GetFieldV<flatbuffers::Offset<flatbuffers::String>>(
**rroot, testarrayofstring_field),
resizingbuf);
// It's a vector of 2 strings, to which we add one more, initialized to
// offset 0.
flatbuffers::ResizeVector<flatbuffers::Offset<flatbuffers::String>>(
schema, 3, 0, *rtestarrayofstring, &resizingbuf);
// Here we just create a buffer that contans a single string, but this
// could also be any complex set of tables and other values.
flatbuffers::FlatBufferBuilder stringfbb;
stringfbb.Finish(stringfbb.CreateString("hank"));
// Add the contents of it to our existing FlatBuffer.
// We do this last, so the pointer doesn't get invalidated (since it is
// at the end of the buffer):
auto string_ptr = flatbuffers::AddFlatBuffer(resizingbuf,
stringfbb.GetBufferPointer(),
stringfbb.GetSize());
// Finally, set the new value in the vector.
rtestarrayofstring->MutateOffset(2, string_ptr);
TEST_EQ_STR(rtestarrayofstring->Get(0)->c_str(), "bob");
TEST_EQ_STR(rtestarrayofstring->Get(2)->c_str(), "hank");
// Test integrity of all resize operations above.
flatbuffers::Verifier resize_verifier(
reinterpret_cast<const uint8_t *>(resizingbuf.data()),
resizingbuf.size());
TEST_EQ(VerifyMonsterBuffer(resize_verifier), true);
// Test buffer is valid using reflection as well
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), resizingbuf.data(),
resizingbuf.size()), true);
// As an additional test, also set it on the name field.
// Note: unlike the name change above, this just overwrites the offset,
// rather than changing the string in-place.
SetFieldT(*rroot, name_field, string_ptr);
TEST_EQ_STR(GetFieldS(**rroot, name_field)->c_str(), "hank");
// Using reflection, rather than mutating binary FlatBuffers, we can also copy
// tables and other things out of other FlatBuffers into a FlatBufferBuilder,
// either part or whole.
flatbuffers::FlatBufferBuilder fbb;
auto root_offset = flatbuffers::CopyTable(fbb, schema, *root_table,
*flatbuffers::GetAnyRoot(flatbuf),
true);
fbb.Finish(root_offset, MonsterIdentifier());
// Test that it was copied correctly:
AccessFlatBufferTest(fbb.GetBufferPointer(), fbb.GetSize());
// Test buffer is valid using reflection as well
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(),
fbb.GetBufferPointer(), fbb.GetSize()), true);
}
// Parse a .proto schema, output as .fbs
void ParseProtoTest() {
// load the .proto and the golden file from disk
std::string protofile;
std::string goldenfile;
TEST_EQ(flatbuffers::LoadFile(
"tests/prototest/test.proto", false, &protofile), true);
TEST_EQ(flatbuffers::LoadFile(
"tests/prototest/test.golden", false, &goldenfile), true);
flatbuffers::IDLOptions opts;
opts.include_dependence_headers = false;
opts.proto_mode = true;
// Parse proto.
flatbuffers::Parser parser(opts);
const char *include_directories[] = { "tests/prototest", nullptr };
TEST_EQ(parser.Parse(protofile.c_str(), include_directories), true);
// Generate fbs.
auto fbs = flatbuffers::GenerateFBS(parser, "test");
// Ensure generated file is parsable.
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse(fbs.c_str(), nullptr), true);
if (fbs != goldenfile) {
printf("%s----------------\n%s", fbs.c_str(), goldenfile.c_str());
TEST_NOTNULL(NULL);
}
}
template<typename T> void CompareTableFieldValue(flatbuffers::Table *table,
flatbuffers::voffset_t voffset,
T val) {
T read = table->GetField(voffset, static_cast<T>(0));
TEST_EQ(read, val);
}
// Low level stress/fuzz test: serialize/deserialize a variety of
// different kinds of data in different combinations
void FuzzTest1() {
// Values we're testing against: chosen to ensure no bits get chopped
// off anywhere, and also be different from eachother.
const uint8_t bool_val = true;
const int8_t char_val = -127; // 0x81
const uint8_t uchar_val = 0xFF;
const int16_t short_val = -32222; // 0x8222;
const uint16_t ushort_val = 0xFEEE;
const int32_t int_val = 0x83333333;
const uint32_t uint_val = 0xFDDDDDDD;
const int64_t long_val = 0x8444444444444444LL;
const uint64_t ulong_val = 0xFCCCCCCCCCCCCCCCULL;
const float float_val = 3.14159f;
const double double_val = 3.14159265359;
const int test_values_max = 11;
const flatbuffers::voffset_t fields_per_object = 4;
const int num_fuzz_objects = 10000; // The higher, the more thorough :)
flatbuffers::FlatBufferBuilder builder;
lcg_reset(); // Keep it deterministic.
flatbuffers::uoffset_t objects[num_fuzz_objects];
// Generate num_fuzz_objects random objects each consisting of
// fields_per_object fields, each of a random type.
for (int i = 0; i < num_fuzz_objects; i++) {
auto start = builder.StartTable();
for (flatbuffers::voffset_t f = 0; f < fields_per_object; f++) {
int choice = lcg_rand() % test_values_max;
auto off = flatbuffers::FieldIndexToOffset(f);
switch (choice) {
case 0: builder.AddElement<uint8_t >(off, bool_val, 0); break;
case 1: builder.AddElement<int8_t >(off, char_val, 0); break;
case 2: builder.AddElement<uint8_t >(off, uchar_val, 0); break;
case 3: builder.AddElement<int16_t >(off, short_val, 0); break;
case 4: builder.AddElement<uint16_t>(off, ushort_val, 0); break;
case 5: builder.AddElement<int32_t >(off, int_val, 0); break;
case 6: builder.AddElement<uint32_t>(off, uint_val, 0); break;
case 7: builder.AddElement<int64_t >(off, long_val, 0); break;
case 8: builder.AddElement<uint64_t>(off, ulong_val, 0); break;
case 9: builder.AddElement<float >(off, float_val, 0); break;
case 10: builder.AddElement<double >(off, double_val, 0); break;
}
}
objects[i] = builder.EndTable(start, fields_per_object);
}
builder.PreAlign<flatbuffers::largest_scalar_t>(0); // Align whole buffer.
lcg_reset(); // Reset.
uint8_t *eob = builder.GetCurrentBufferPointer() + builder.GetSize();
// Test that all objects we generated are readable and return the
// expected values. We generate random objects in the same order
// so this is deterministic.
for (int i = 0; i < num_fuzz_objects; i++) {
auto table = reinterpret_cast<flatbuffers::Table *>(eob - objects[i]);
for (flatbuffers::voffset_t f = 0; f < fields_per_object; f++) {
int choice = lcg_rand() % test_values_max;
flatbuffers::voffset_t off = flatbuffers::FieldIndexToOffset(f);
switch (choice) {
case 0: CompareTableFieldValue(table, off, bool_val ); break;
case 1: CompareTableFieldValue(table, off, char_val ); break;
case 2: CompareTableFieldValue(table, off, uchar_val ); break;
case 3: CompareTableFieldValue(table, off, short_val ); break;
case 4: CompareTableFieldValue(table, off, ushort_val); break;
case 5: CompareTableFieldValue(table, off, int_val ); break;
case 6: CompareTableFieldValue(table, off, uint_val ); break;
case 7: CompareTableFieldValue(table, off, long_val ); break;
case 8: CompareTableFieldValue(table, off, ulong_val ); break;
case 9: CompareTableFieldValue(table, off, float_val ); break;
case 10: CompareTableFieldValue(table, off, double_val); break;
}
}
}
}
// High level stress/fuzz test: generate a big schema and
// matching json data in random combinations, then parse both,
// generate json back from the binary, and compare with the original.
void FuzzTest2() {
lcg_reset(); // Keep it deterministic.
const int num_definitions = 30;
const int num_struct_definitions = 5; // Subset of num_definitions.
const int fields_per_definition = 15;
const int instances_per_definition = 5;
const int deprecation_rate = 10; // 1 in deprecation_rate fields will
// be deprecated.
std::string schema = "namespace test;\n\n";
struct RndDef {
std::string instances[instances_per_definition];
// Since we're generating schema and corresponding data in tandem,
// this convenience function adds strings to both at once.
static void Add(RndDef (&definitions_l)[num_definitions],
std::string &schema_l,
const int instances_per_definition_l,
const char *schema_add, const char *instance_add,
int definition) {
schema_l += schema_add;
for (int i = 0; i < instances_per_definition_l; i++)
definitions_l[definition].instances[i] += instance_add;
}
};
#define AddToSchemaAndInstances(schema_add, instance_add) \
RndDef::Add(definitions, schema, instances_per_definition, \
schema_add, instance_add, definition)
#define Dummy() \
RndDef::Add(definitions, schema, instances_per_definition, \
"byte", "1", definition)
RndDef definitions[num_definitions];
// We are going to generate num_definitions, the first
// num_struct_definitions will be structs, the rest tables. For each
// generate random fields, some of which may be struct/table types
// referring to previously generated structs/tables.
// Simultanenously, we generate instances_per_definition JSON data
// definitions, which will have identical structure to the schema
// being generated. We generate multiple instances such that when creating
// hierarchy, we get some variety by picking one randomly.
for (int definition = 0; definition < num_definitions; definition++) {
std::string definition_name = "D" + flatbuffers::NumToString(definition);
bool is_struct = definition < num_struct_definitions;
AddToSchemaAndInstances(
((is_struct ? "struct " : "table ") + definition_name + " {\n").c_str(),
"{\n");
for (int field = 0; field < fields_per_definition; field++) {
const bool is_last_field = field == fields_per_definition - 1;
// Deprecate 1 in deprecation_rate fields. Only table fields can be
// deprecated.
// Don't deprecate the last field to avoid dangling commas in JSON.
const bool deprecated = !is_struct &&
!is_last_field &&
(lcg_rand() % deprecation_rate == 0);
std::string field_name = "f" + flatbuffers::NumToString(field);
AddToSchemaAndInstances((" " + field_name + ":").c_str(),
deprecated ? "" : (field_name + ": ").c_str());
// Pick random type:
int base_type = lcg_rand() % (flatbuffers::BASE_TYPE_UNION + 1);
switch (base_type) {
case flatbuffers::BASE_TYPE_STRING:
if (is_struct) {
Dummy(); // No strings in structs.
} else {
AddToSchemaAndInstances("string", deprecated ? "" : "\"hi\"");
}
break;
case flatbuffers::BASE_TYPE_VECTOR:
if (is_struct) {
Dummy(); // No vectors in structs.
}
else {
AddToSchemaAndInstances("[ubyte]",
deprecated ? "" : "[\n0,\n1,\n255\n]");
}
break;
case flatbuffers::BASE_TYPE_NONE:
case flatbuffers::BASE_TYPE_UTYPE:
case flatbuffers::BASE_TYPE_STRUCT:
case flatbuffers::BASE_TYPE_UNION:
if (definition) {
// Pick a random previous definition and random data instance of
// that definition.
int defref = lcg_rand() % definition;
int instance = lcg_rand() % instances_per_definition;
AddToSchemaAndInstances(
("D" + flatbuffers::NumToString(defref)).c_str(),
deprecated
? ""
: definitions[defref].instances[instance].c_str());
} else {
// If this is the first definition, we have no definition we can
// refer to.
Dummy();
}
break;
case flatbuffers::BASE_TYPE_BOOL:
AddToSchemaAndInstances("bool", deprecated
? ""
: (lcg_rand() % 2 ? "true" : "false"));
break;
default:
// All the scalar types.
schema += flatbuffers::kTypeNames[base_type];
if (!deprecated) {
// We want each instance to use its own random value.
for (int inst = 0; inst < instances_per_definition; inst++)
definitions[definition].instances[inst] +=
flatbuffers::NumToString(lcg_rand() % 128).c_str();
}
}
AddToSchemaAndInstances(
deprecated ? "(deprecated);\n" : ";\n",
deprecated ? "" : is_last_field ? "\n" : ",\n");
}
AddToSchemaAndInstances("}\n\n", "}");
}
schema += "root_type D" + flatbuffers::NumToString(num_definitions - 1);
schema += ";\n";
flatbuffers::Parser parser;
// Will not compare against the original if we don't write defaults
parser.builder_.ForceDefaults(true);
// Parse the schema, parse the generated data, then generate text back
// from the binary and compare against the original.
TEST_EQ(parser.Parse(schema.c_str()), true);
const std::string &json =
definitions[num_definitions - 1].instances[0] + "\n";
TEST_EQ(parser.Parse(json.c_str()), true);
std::string jsongen;
parser.opts.indent_step = 0;
auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
if (jsongen != json) {
// These strings are larger than a megabyte, so we show the bytes around
// the first bytes that are different rather than the whole string.
size_t len = std::min(json.length(), jsongen.length());
for (size_t i = 0; i < len; i++) {
if (json[i] != jsongen[i]) {
i -= std::min(static_cast<size_t>(10), i); // show some context;
size_t end = std::min(len, i + 20);
for (; i < end; i++)
printf("at %d: found \"%c\", expected \"%c\"\n",
static_cast<int>(i), jsongen[i], json[i]);
break;
}
}
TEST_NOTNULL(NULL);
}
printf("%dk schema tested with %dk of json\n",
static_cast<int>(schema.length() / 1024),
static_cast<int>(json.length() / 1024));
}
// Test that parser errors are actually generated.
void TestError(const char *src, const char *error_substr,
bool strict_json = false) {
flatbuffers::IDLOptions opts;
opts.strict_json = strict_json;
flatbuffers::Parser parser(opts);
TEST_EQ(parser.Parse(src), false); // Must signal error
// Must be the error we're expecting
TEST_NOTNULL(strstr(parser.error_.c_str(), error_substr));
}
// Test that parsing errors occur as we'd expect.
// Also useful for coverage, making sure these paths are run.
void ErrorTest() {
// In order they appear in idl_parser.cpp
TestError("table X { Y:byte; } root_type X; { Y: 999 }", "bit field");
TestError(".0", "floating point");
TestError("\"\0", "illegal");
TestError("\"\\q", "escape code");
TestError("table ///", "documentation");
TestError("@", "illegal");
TestError("table 1", "expecting");
TestError("table X { Y:[[int]]; }", "nested vector");
TestError("table X { Y:1; }", "illegal type");
TestError("table X { Y:int; Y:int; }", "field already");
TestError("struct X { Y:string; }", "only scalar");
TestError("struct X { Y:int (deprecated); }", "deprecate");
TestError("union Z { X } table X { Y:Z; } root_type X; { Y: {}, A:1 }",
"missing type field");
TestError("union Z { X } table X { Y:Z; } root_type X; { Y_type: 99, Y: {",
"type id");
TestError("table X { Y:int; } root_type X; { Z:", "unknown field");
TestError("table X { Y:int; } root_type X; { Y:", "string constant", true);
TestError("table X { Y:int; } root_type X; { \"Y\":1, }", "string constant",
true);
TestError("struct X { Y:int; Z:int; } table W { V:X; } root_type W; "
"{ V:{ Y:1 } }", "wrong number");
TestError("enum E:byte { A } table X { Y:E; } root_type X; { Y:U }",
"unknown enum value");
TestError("table X { Y:byte; } root_type X; { Y:; }", "starting");
TestError("enum X:byte { Y } enum X {", "enum already");
TestError("enum X:float {}", "underlying");
TestError("enum X:byte { Y, Y }", "value already");
TestError("enum X:byte { Y=2, Z=1 }", "ascending");
TestError("enum X:byte (bit_flags) { Y=8 }", "bit flag out");
TestError("table X { Y:int; } table X {", "datatype already");
TestError("struct X (force_align: 7) { Y:int; }", "force_align");
TestError("{}", "no root");
TestError("table X { Y:byte; } root_type X; { Y:1 } { Y:1 }", "one json");
TestError("root_type X;", "unknown root");
TestError("struct X { Y:int; } root_type X;", "a table");
TestError("union X { Y }", "referenced");
TestError("union Z { X } struct X { Y:int; }", "only tables");
TestError("table X { Y:[int]; YLength:int; }", "clash");
TestError("table X { Y:string = 1; }", "scalar");
TestError("table X { Y:byte; } root_type X; { Y:1, Y:2 }", "more than once");
}
template<typename T> T TestValue(const char *json, const char *type_name) {
flatbuffers::Parser parser;
// Simple schema.
TEST_EQ(parser.Parse(std::string("table X { Y:" + std::string(type_name) +
"; } root_type X;").c_str()), true);
TEST_EQ(parser.Parse(json), true);
auto root = flatbuffers::GetRoot<flatbuffers::Table>(
parser.builder_.GetBufferPointer());
return root->GetField<T>(flatbuffers::FieldIndexToOffset(0), 0);
}
bool FloatCompare(float a, float b) { return fabs(a - b) < 0.001; }
// Additional parser testing not covered elsewhere.
void ValueTest() {
// Test scientific notation numbers.
TEST_EQ(FloatCompare(TestValue<float>("{ Y:0.0314159e+2 }","float"),
(float)3.14159), true);
// Test conversion functions.
TEST_EQ(FloatCompare(TestValue<float>("{ Y:cos(rad(180)) }","float"), -1),
true);
// Test negative hex constant.
TEST_EQ(TestValue<int>("{ Y:-0x80 }","int"), -128);
// Make sure we do unsigned 64bit correctly.
TEST_EQ(TestValue<uint64_t>("{ Y:12335089644688340133 }","ulong"),
12335089644688340133ULL);
}
void EnumStringsTest() {
flatbuffers::Parser parser1;
TEST_EQ(parser1.Parse("enum E:byte { A, B, C } table T { F:[E]; }"
"root_type T;"
"{ F:[ A, B, \"C\", \"A B C\" ] }"), true);
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse("enum E:byte { A, B, C } table T { F:[int]; }"
"root_type T;"
"{ F:[ \"E.C\", \"E.A E.B E.C\" ] }"), true);
}
void IntegerOutOfRangeTest() {
TestError("table T { F:byte; } root_type T; { F:256 }",
"constant does not fit");
TestError("table T { F:byte; } root_type T; { F:-257 }",
"constant does not fit");
TestError("table T { F:ubyte; } root_type T; { F:256 }",
"constant does not fit");
TestError("table T { F:ubyte; } root_type T; { F:-257 }",
"constant does not fit");
TestError("table T { F:short; } root_type T; { F:65536 }",
"constant does not fit");
TestError("table T { F:short; } root_type T; { F:-65537 }",
"constant does not fit");
TestError("table T { F:ushort; } root_type T; { F:65536 }",
"constant does not fit");
TestError("table T { F:ushort; } root_type T; { F:-65537 }",
"constant does not fit");
TestError("table T { F:int; } root_type T; { F:4294967296 }",
"constant does not fit");
TestError("table T { F:int; } root_type T; { F:-4294967297 }",
"constant does not fit");
TestError("table T { F:uint; } root_type T; { F:4294967296 }",
"constant does not fit");
TestError("table T { F:uint; } root_type T; { F:-4294967297 }",
"constant does not fit");
}
void UnicodeTest() {
flatbuffers::Parser parser;
// Without setting allow_non_utf8 = true, we treat \x sequences as byte sequences
// which are then validated as UTF-8.
TEST_EQ(parser.Parse("table T { F:string; }"
"root_type T;"
"{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\xE2\\x82\\xAC\\u0080\\uD83D\\uDE0E\" }"),
true);
std::string jsongen;
parser.opts.indent_step = -1;
auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ(jsongen,
std::string(
"{F: \"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\u20AC\\u0080\\uD83D\\uDE0E\"}"));
}
void UnicodeTestAllowNonUTF8() {
flatbuffers::Parser parser;
parser.opts.allow_non_utf8 = true;
TEST_EQ(parser.Parse("table T { F:string; }"
"root_type T;"
"{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\x01\\x80\\u0080\\uD83D\\uDE0E\" }"), true);
std::string jsongen;
parser.opts.indent_step = -1;
auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ(jsongen,
std::string(
"{F: \"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\u0001\\x80\\u0080\\uD83D\\uDE0E\"}"));
}
void UnicodeTestGenerateTextFailsOnNonUTF8() {
flatbuffers::Parser parser;
// Allow non-UTF-8 initially to model what happens when we load a binary flatbuffer from disk
// which contains non-UTF-8 strings.
parser.opts.allow_non_utf8 = true;
TEST_EQ(parser.Parse("table T { F:string; }"
"root_type T;"
"{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\x01\\x80\\u0080\\uD83D\\uDE0E\" }"), true);
std::string jsongen;
parser.opts.indent_step = -1;
// Now, disallow non-UTF-8 (the default behavior) so GenerateText indicates failure.
parser.opts.allow_non_utf8 = false;
auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, false);
}
void UnicodeSurrogatesTest() {
flatbuffers::Parser parser;
TEST_EQ(
parser.Parse(
"table T { F:string (id: 0); }"
"root_type T;"
"{ F:\"\\uD83D\\uDCA9\"}"), true);
auto root = flatbuffers::GetRoot<flatbuffers::Table>(
parser.builder_.GetBufferPointer());
auto string = root->GetPointer<flatbuffers::String *>(
flatbuffers::FieldIndexToOffset(0));
TEST_EQ(strcmp(string->c_str(), "\xF0\x9F\x92\xA9"), 0);
}
void UnicodeInvalidSurrogatesTest() {
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800\"}", "unpaired high surrogate");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800abcd\"}", "unpaired high surrogate");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800\\n\"}", "unpaired high surrogate");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800\\uD800\"}", "multiple high surrogates");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uDC00\"}", "unpaired low surrogate");
}
void InvalidUTF8Test() {
// "1 byte" pattern, under min length of 2 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\x80\"}", "illegal UTF-8 sequence");
// 2 byte pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xDF\"}", "illegal UTF-8 sequence");
// 3 byte pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xEF\xBF\"}", "illegal UTF-8 sequence");
// 4 byte pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF7\xBF\xBF\"}", "illegal UTF-8 sequence");
// "5 byte" pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFB\xBF\xBF\xBF\"}", "illegal UTF-8 sequence");
// "6 byte" pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFD\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence");
// "7 byte" pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFE\xBF\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence");
// "5 byte" pattern, over max length of 4 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFB\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence");
// "6 byte" pattern, over max length of 4 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFD\xBF\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence");
// "7 byte" pattern, over max length of 4 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFE\xBF\xBF\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence");
// Three invalid encodings for U+000A (\n, aka NEWLINE)
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xC0\x8A\"}", "illegal UTF-8 sequence");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xE0\x80\x8A\"}", "illegal UTF-8 sequence");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF0\x80\x80\x8A\"}", "illegal UTF-8 sequence");
// Two invalid encodings for U+00A9 (COPYRIGHT SYMBOL)
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xE0\x81\xA9\"}", "illegal UTF-8 sequence");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF0\x80\x81\xA9\"}", "illegal UTF-8 sequence");
// Invalid encoding for U+20AC (EURO SYMBOL)
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF0\x82\x82\xAC\"}", "illegal UTF-8 sequence");
// UTF-16 surrogate values between U+D800 and U+DFFF cannot be encoded in UTF-8
TestError(
"table T { F:string; }"
"root_type T;"
// U+10400 "encoded" as U+D801 U+DC00
"{ F:\"\xED\xA0\x81\xED\xB0\x80\"}", "illegal UTF-8 sequence");
}
void UnknownFieldsTest() {
flatbuffers::IDLOptions opts;
opts.skip_unexpected_fields_in_json = true;
flatbuffers::Parser parser(opts);
TEST_EQ(parser.Parse("table T { str:string; i:int;}"
"root_type T;"
"{ str:\"test\","
"unknown_string:\"test\","
"\"unknown_string\":\"test\","
"unknown_int:10,"
"unknown_float:1.0,"
"unknown_array: [ 1, 2, 3, 4],"
"unknown_object: { i: 10 },"
"\"unknown_object\": { \"i\": 10 },"
"i:10}"), true);
std::string jsongen;
parser.opts.indent_step = -1;
auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ(jsongen == "{str: \"test\",i: 10}", true);
}
void ParseUnionTest() {
// Unions must be parseable with the type field following the object.
flatbuffers::Parser parser;
TEST_EQ(parser.Parse("table T { A:int; }"
"union U { T }"
"table V { X:U; }"
"root_type V;"
"{ X:{ A:1 }, X_type: T }"), true);
// Unions must be parsable with prefixed namespace.
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse("namespace N; table A {} namespace; union U { N.A }"
"table B { e:U; } root_type B;"
"{ e_type: N_A, e: {} }"), true);
}
void UnionVectorTest() {
// load FlatBuffer fbs schema.
// TODO: load a JSON file with such a vector when JSON support is ready.
std::string schemafile;
TEST_EQ(flatbuffers::LoadFile(
"tests/union_vector/union_vector.fbs", false, &schemafile), true);
// parse schema.
flatbuffers::IDLOptions idl_opts;
idl_opts.lang_to_generate |= flatbuffers::IDLOptions::kCpp;
flatbuffers::Parser parser(idl_opts);
const char *include_directories[] = { "tests/union_vector", nullptr };
TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true);
flatbuffers::FlatBufferBuilder fbb;
// union types.
std::vector<uint8_t> types;
types.push_back(static_cast<uint8_t>(Character_Belle));
types.push_back(static_cast<uint8_t>(Character_Rapunzel));
types.push_back(static_cast<uint8_t>(Character_MuLan));
// union values.
std::vector<flatbuffers::Offset<void>> characters;
characters.push_back(CreateBelle(fbb, /*books_read=*/7).Union());
characters.push_back(CreateRapunzel(fbb, /*hair_length=*/6).Union());
characters.push_back(CreateMuLan(fbb, /*sword_attack_damage=*/5).Union());
// create Movie.
const auto movie_offset =
CreateMovie(fbb, fbb.CreateVector(types), fbb.CreateVector(characters));
FinishMovieBuffer(fbb, movie_offset);
uint8_t *buf = fbb.GetBufferPointer();
flatbuffers::Verifier verifier(buf, fbb.GetSize());
TEST_EQ(VerifyMovieBuffer(verifier), true);
const Movie *movie = GetMovie(buf);
TEST_EQ(movie->characters_type()->size(), 3);
TEST_EQ(
movie->characters_type()->GetEnum<Character>(0) == Character_Belle,
true);
TEST_EQ(
movie->characters_type()->GetEnum<Character>(1) == Character_Rapunzel,
true);
TEST_EQ(
movie->characters_type()->GetEnum<Character>(2) == Character_MuLan,
true);
TEST_EQ(movie->characters()->size(), 3);
const Belle *belle =
reinterpret_cast<const Belle*>(movie->characters()->Get(0));
TEST_EQ(belle->books_read(), 7);
const Rapunzel *rapunzel =
reinterpret_cast<const Rapunzel*>(movie->characters()->Get(1));
TEST_EQ(rapunzel->hair_length(), 6);
const MuLan *mu_lan =
reinterpret_cast<const MuLan*>(movie->characters()->Get(2));
TEST_EQ(mu_lan->sword_attack_damage(), 5);
}
void ConformTest() {
flatbuffers::Parser parser;
TEST_EQ(parser.Parse("table T { A:int; } enum E:byte { A }"), true);
auto test_conform = [&](const char *test, const char *expected_err) {
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse(test), true);
auto err = parser2.ConformTo(parser);
TEST_NOTNULL(strstr(err.c_str(), expected_err));
};
test_conform("table T { A:byte; }", "types differ for field");
test_conform("table T { B:int; A:int; }", "offsets differ for field");
test_conform("table T { A:int = 1; }", "defaults differ for field");
test_conform("table T { B:float; }", "field renamed to different type");
test_conform("enum E:byte { B, A }", "values differ for enum");
}
void FlexBuffersTest() {
flexbuffers::Builder slb(512,
flexbuffers::BUILDER_FLAG_SHARE_KEYS_AND_STRINGS);
// Write the equivalent of:
// { vec: [ -100, "Fred", 4.0 ], bar: [ 1, 2, 3 ], foo: 100 }
slb.Map([&]() {
slb.Vector("vec", [&]() {
slb += -100; // Equivalent to slb.Add(-100) or slb.Int(-100);
slb += "Fred";
slb.IndirectFloat(4.0f);
});
int ints[] = { 1, 2, 3 };
slb.Vector("bar", ints, 3);
slb.FixedTypedVector("bar3", ints, 3);
slb.Double("foo", 100);
slb.Map("mymap", [&]() {
slb.String("foo", "Fred"); // Testing key and string reuse.
});
});
slb.Finish();
for (size_t i = 0; i < slb.GetBuffer().size(); i++)
printf("%d ", slb.GetBuffer().data()[i]);
printf("\n");
auto map = flexbuffers::GetRoot(slb.GetBuffer()).AsMap();
TEST_EQ(map.size(), 5);
auto vec = map["vec"].AsVector();
TEST_EQ(vec.size(), 3);
TEST_EQ(vec[0].AsInt64(), -100);
TEST_EQ_STR(vec[1].AsString().c_str(), "Fred");
TEST_EQ(vec[1].AsInt64(), 0); // Number parsing failed.
TEST_EQ(vec[2].AsDouble(), 4.0);
TEST_EQ(vec[2].AsString().IsTheEmptyString(), true); // Wrong Type.
TEST_EQ_STR(vec[2].AsString().c_str(), ""); // This still works though.
TEST_EQ_STR(vec[2].ToString().c_str(), "4"); // Or have it converted.
auto tvec = map["bar"].AsTypedVector();
TEST_EQ(tvec.size(), 3);
TEST_EQ(tvec[2].AsInt8(), 3);
auto tvec3 = map["bar3"].AsFixedTypedVector();
TEST_EQ(tvec3.size(), 3);
TEST_EQ(tvec3[2].AsInt8(), 3);
TEST_EQ(map["foo"].AsUInt8(), 100);
TEST_EQ(map["unknown"].IsNull(), true);
auto mymap = map["mymap"].AsMap();
// These should be equal by pointer equality, since key and value are shared.
TEST_EQ(mymap.Keys()[0].AsKey(), map.Keys()[2].AsKey());
TEST_EQ(mymap.Values()[0].AsString().c_str(), vec[1].AsString().c_str());
// We can mutate values in the buffer.
TEST_EQ(vec[0].MutateInt(-99), true);
TEST_EQ(vec[0].AsInt64(), -99);
TEST_EQ(vec[1].MutateString("John"), true); // Size must match.
TEST_EQ_STR(vec[1].AsString().c_str(), "John");
TEST_EQ(vec[1].MutateString("Alfred"), false); // Too long.
TEST_EQ(vec[2].MutateFloat(2.0f), true);
TEST_EQ(vec[2].AsFloat(), 2.0f);
TEST_EQ(vec[2].MutateFloat(3.14159), false); // Double does not fit in float.
}
int main(int /*argc*/, const char * /*argv*/[]) {
// Run our various test suites:
std::string rawbuf;
auto flatbuf = CreateFlatBufferTest(rawbuf);
AccessFlatBufferTest(reinterpret_cast<const uint8_t *>(rawbuf.c_str()),
rawbuf.length());
AccessFlatBufferTest(flatbuf.get(), rawbuf.length());
MutateFlatBuffersTest(flatbuf.get(), rawbuf.length());
ObjectFlatBuffersTest(flatbuf.get());
SizePrefixedTest();
#ifndef FLATBUFFERS_NO_FILE_TESTS
ParseAndGenerateTextTest();
ReflectionTest(flatbuf.get(), rawbuf.length());
ParseProtoTest();
UnionVectorTest();
#endif
FuzzTest1();
FuzzTest2();
ErrorTest();
ValueTest();
EnumStringsTest();
IntegerOutOfRangeTest();
UnicodeTest();
UnicodeTestAllowNonUTF8();
UnicodeTestGenerateTextFailsOnNonUTF8();
UnicodeSurrogatesTest();
UnicodeInvalidSurrogatesTest();
InvalidUTF8Test();
UnknownFieldsTest();
ParseUnionTest();
ConformTest();
FlexBuffersTest();
if (!testing_fails) {
TEST_OUTPUT_LINE("ALL TESTS PASSED");
return 0;
} else {
TEST_OUTPUT_LINE("%d FAILED TESTS", testing_fails);
return 1;
}
}
| 1 | 11,579 | `vecofstructs->size()` already returns a `uoffset_t`. So if you just change the type of `i`, you don't need any casts. | google-flatbuffers | java |
@@ -197,7 +197,14 @@ public abstract class GapicProductConfig implements ProductConfig {
}
// Toggle on/off proto annotations parsing.
- ProtoParser protoParser = new ProtoParser(getDefaultLanguageFeatureConfig(language, null));
+ ProtoParser protoParser;
+ // TODO(andrealin): Expose command-line option for toggling proto annotations parsing.
+ if (configProto == null) {
+ // By default, enable proto annotations parsing when no GAPIC config is given.
+ protoParser = new ProtoParser(true);
+ } else {
+ protoParser = new ProtoParser(getDefaultLanguageFeatureConfig(language, null));
+ }
if (configProto == null) {
configProto = ConfigProto.getDefaultInstance();
} | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.Resource;
import com.google.api.ResourceSet;
import com.google.api.codegen.CollectionConfigProto;
import com.google.api.codegen.CollectionOneofProto;
import com.google.api.codegen.ConfigProto;
import com.google.api.codegen.FixedResourceNameValueProto;
import com.google.api.codegen.InterfaceConfigProto;
import com.google.api.codegen.LanguageSettingsProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.ReleaseLevel;
import com.google.api.codegen.ResourceNameTreatment;
import com.google.api.codegen.common.TargetLanguage;
import com.google.api.codegen.configgen.mergers.LanguageSettingsMerger;
import com.google.api.codegen.transformer.DefaultFeatureConfig;
import com.google.api.codegen.transformer.FeatureConfig;
import com.google.api.codegen.transformer.csharp.CSharpFeatureConfig;
import com.google.api.codegen.transformer.java.JavaFeatureConfig;
import com.google.api.codegen.transformer.nodejs.NodeJSFeatureConfig;
import com.google.api.codegen.transformer.php.PhpFeatureConfig;
import com.google.api.codegen.transformer.ruby.RubyFeatureConfig;
import com.google.api.codegen.util.LicenseHeaderUtil;
import com.google.api.codegen.util.ProtoParser;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.Model;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.api.tools.framework.model.SymbolTable;
import com.google.api.tools.framework.tools.ToolUtil;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSortedMap;
import com.google.common.collect.Iterables;
import com.google.protobuf.Api;
import com.google.protobuf.DescriptorProtos;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import org.apache.commons.lang3.StringUtils;
/**
* GapicProductConfig represents client code-gen config for an API product contained in a
* {api}_gapic.yaml configuration file.
*/
@AutoValue
public abstract class GapicProductConfig implements ProductConfig {
public abstract ImmutableMap<String, ? extends InterfaceConfig> getInterfaceConfigMap();
/** Returns the package name. */
@Override
public abstract String getPackageName();
/** Returns the location of the domain layer, if any. */
public abstract String getDomainLayerLocation();
/** Returns the release level, if any. */
public abstract ReleaseLevel getReleaseLevel();
/** Returns the resource name messages configuration. If none was specified, returns null. */
@Nullable
public abstract ResourceNameMessageConfigs getResourceNameMessageConfigs();
/** Returns the lines from the configured copyright file. */
@Override
public abstract ImmutableList<String> getCopyrightLines();
/** Returns the lines from the configured license file. */
@Override
public abstract ImmutableList<String> getLicenseLines();
/** Returns a map from entity names to resource name configs. */
public abstract ImmutableMap<String, ResourceNameConfig> getResourceNameConfigs();
/** Returns a map from entity names to resource name configs. */
public abstract ProtoParser getProtoParser();
/** Returns the type of transport for the generated client. Defaults to Grpc. */
public abstract TransportProtocol getTransportProtocol();
/**
* Returns a map from fully qualified field names to FieldConfigs for all fields that have a
* resource name type specified. This is the default field config for each field, and should be
* used when not in the context of a particular method or flattening configuration.
*/
public abstract ImmutableMap<String, FieldConfig> getDefaultResourceNameFieldConfigMap();
/** Returns the version of config schema. */
@Nullable
public abstract String getConfigSchemaVersion();
public GapicProductConfig withPackageName(String packageName) {
return new AutoValue_GapicProductConfig(
getInterfaceConfigMap(),
packageName,
getDomainLayerLocation(),
getReleaseLevel(),
getResourceNameMessageConfigs(),
getCopyrightLines(),
getLicenseLines(),
getResourceNameConfigs(),
getProtoParser(),
getTransportProtocol(),
getDefaultResourceNameFieldConfigMap(),
getConfigSchemaVersion());
}
@Nullable
public static GapicProductConfig create(
Model model, ConfigProto configProto, TargetLanguage language) {
return create(model, configProto, null, null, language);
}
/**
* Creates an instance of GapicProductConfig based on ConfigProto, linking up API interface
* configurations with specified interfaces in interfaceConfigMap. On errors, null will be
* returned, and diagnostics are reported to the model.
*
* @param model The protobuf model for which we are creating a config.
* @param configProto The parsed set of config files from input
* @param protoPackage The source proto package, as opposed to imported protos, that we will
* generate clients for.
* @param clientPackage The desired package name for the generated client.
* @param language The language that this config will be used to generate a client in.
*/
@Nullable
public static GapicProductConfig create(
Model model,
@Nullable ConfigProto configProto,
@Nullable String protoPackage,
@Nullable String clientPackage,
TargetLanguage language) {
final String defaultPackage;
SymbolTable symbolTable = model.getSymbolTable();
if (protoPackage != null) {
// Default to using --package option for value of default package and first API protoFile.
defaultPackage = protoPackage;
} else if (configProto != null) {
// Otherwise use configProto to get the proto file containing the first interface listed in
// the config proto, and use it as
// the assigned file for generated resource names, and to get the default message namespace.
ProtoFile file =
symbolTable.lookupInterface(configProto.getInterfaces(0).getName()).getFile();
defaultPackage = file.getProto().getPackage();
} else {
throw new NullPointerException("configProto and protoPackage cannot both be null.");
}
List<ProtoFile> sourceProtos =
model
.getFiles()
.stream()
.filter(f -> f.getProto().getPackage().equals(defaultPackage))
.collect(Collectors.toList());
if (protoPackage != null && configProto == null) {
if (sourceProtos.isEmpty()) {
model
.getDiagReporter()
.getDiagCollector()
.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"There are no source proto files with package %s",
defaultPackage));
}
sourceProtos.forEach(model::addRoot);
}
// Toggle on/off proto annotations parsing.
ProtoParser protoParser = new ProtoParser(getDefaultLanguageFeatureConfig(language, null));
if (configProto == null) {
configProto = ConfigProto.getDefaultInstance();
}
DiagCollector diagCollector = model.getDiagReporter().getDiagCollector();
Map<Resource, ProtoFile> resourceDefs =
protoParser.getResourceDefs(sourceProtos, diagCollector);
Map<ResourceSet, ProtoFile> resourceSetDefs =
protoParser.getResourceSetDefs(sourceProtos, diagCollector);
// Get list of fields from proto
ResourceNameMessageConfigs messageConfigs =
ResourceNameMessageConfigs.createMessageResourceTypesConfig(
sourceProtos, configProto, defaultPackage, resourceDefs, resourceSetDefs, protoParser);
// Update the protoParser with new info.
protoParser = new ProtoParser(getDefaultLanguageFeatureConfig(language, messageConfigs));
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs =
createResourceNameConfigs(
diagCollector,
configProto,
sourceProtos,
language,
resourceDefs,
resourceSetDefs,
protoParser);
if (resourceNameConfigs == null) {
return null;
}
TransportProtocol transportProtocol = TransportProtocol.GRPC;
String clientPackageName;
LanguageSettingsProto settings =
configProto.getLanguageSettingsMap().get(language.toString().toLowerCase());
if (settings == null) {
settings = LanguageSettingsProto.getDefaultInstance();
if (!Strings.isNullOrEmpty(clientPackage)) {
clientPackageName = clientPackage;
} else {
String basePackageName = Optional.ofNullable(protoPackage).orElse(getPackageName(model));
clientPackageName =
LanguageSettingsMerger.getFormattedPackageName(language, basePackageName);
}
} else {
clientPackageName = settings.getPackageName();
}
ImmutableMap<String, Interface> protoInterfaces =
getInterfacesFromProtoFile(diagCollector, sourceProtos, symbolTable);
// Collect the interfaces (clients) and methods that we will generate on the surface.
// Not all methods defined in the protofiles will be generated on the surface.
ImmutableList<GapicInterfaceInput> interfaceInputs;
if (!configProto.equals(ConfigProto.getDefaultInstance())) {
interfaceInputs =
createInterfaceInputsWithGapicConfig(
diagCollector, configProto.getInterfacesList(), protoInterfaces, symbolTable);
} else {
interfaceInputs = createInterfaceInputsWithoutGapicConfig(protoInterfaces.values());
}
if (interfaceInputs == null) {
return null;
}
ImmutableMap<String, InterfaceConfig> interfaceConfigMap =
createInterfaceConfigMap(
diagCollector,
interfaceInputs,
defaultPackage,
settings,
messageConfigs,
resourceNameConfigs,
language,
protoParser);
ImmutableList<String> copyrightLines;
ImmutableList<String> licenseLines;
String configSchemaVersion = null;
try {
LicenseHeaderUtil licenseHeaderUtil =
LicenseHeaderUtil.create(
configProto, settings, model.getDiagReporter().getDiagCollector());
copyrightLines = licenseHeaderUtil.loadCopyrightLines();
licenseLines = licenseHeaderUtil.loadLicenseLines();
} catch (Exception e) {
model
.getDiagReporter()
.getDiagCollector()
.addDiag(Diag.error(SimpleLocation.TOPLEVEL, "Exception: %s", e.getMessage()));
e.printStackTrace(System.err);
throw new RuntimeException(e);
}
if (!configProto.equals(ConfigProto.getDefaultInstance())) {
configSchemaVersion = configProto.getConfigSchemaVersion();
if (Strings.isNullOrEmpty(configSchemaVersion)) {
model
.getDiagReporter()
.getDiagCollector()
.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"config_schema_version field is required in GAPIC yaml."));
}
}
if (interfaceConfigMap == null || copyrightLines == null || licenseLines == null) {
return null;
}
return new AutoValue_GapicProductConfig(
interfaceConfigMap,
clientPackageName,
settings.getDomainLayerLocation(),
settings.getReleaseLevel(),
messageConfigs,
copyrightLines,
licenseLines,
resourceNameConfigs,
protoParser,
transportProtocol,
createResponseFieldConfigMap(messageConfigs, resourceNameConfigs),
configSchemaVersion);
}
public static GapicProductConfig create(
DiscoApiModel model, ConfigProto configProto, TargetLanguage language) {
String defaultPackage =
configProto
.getLanguageSettingsMap()
.get(language.toString().toLowerCase())
.getPackageName();
ResourceNameMessageConfigs messageConfigs =
ResourceNameMessageConfigs.createMessageResourceTypesConfig(
model, configProto, defaultPackage);
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs =
createResourceNameConfigs(model.getDiagCollector(), configProto, language);
TransportProtocol transportProtocol = TransportProtocol.HTTP;
LanguageSettingsProto settings =
configProto.getLanguageSettingsMap().get(language.toString().toLowerCase());
if (settings == null) {
settings = LanguageSettingsProto.getDefaultInstance();
}
ImmutableMap<String, InterfaceConfig> interfaceConfigMap =
createDiscoGapicInterfaceConfigMap(
model, configProto, settings, messageConfigs, resourceNameConfigs, language);
ImmutableList<String> copyrightLines;
ImmutableList<String> licenseLines;
try {
LicenseHeaderUtil licenseHeaderUtil =
LicenseHeaderUtil.create(configProto, settings, model.getDiagCollector());
copyrightLines = licenseHeaderUtil.loadCopyrightLines();
licenseLines = licenseHeaderUtil.loadLicenseLines();
} catch (Exception e) {
model
.getDiagCollector()
.addDiag(Diag.error(SimpleLocation.TOPLEVEL, "Exception: %s", e.getMessage()));
e.printStackTrace(System.err);
throw new RuntimeException(e);
}
String configSchemaVersion = configProto.getConfigSchemaVersion();
if (Strings.isNullOrEmpty(configSchemaVersion)) {
model
.getDiagCollector()
.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"config_schema_version field is required in GAPIC yaml."));
}
return new AutoValue_GapicProductConfig(
interfaceConfigMap,
settings.getPackageName(),
settings.getDomainLayerLocation(),
settings.getReleaseLevel(),
messageConfigs,
copyrightLines,
licenseLines,
resourceNameConfigs,
new ProtoParser(false),
transportProtocol,
createResponseFieldConfigMap(messageConfigs, resourceNameConfigs),
configSchemaVersion);
}
/** Creates an GapicProductConfig with no content. Exposed for testing. */
@VisibleForTesting
public static GapicProductConfig createDummyInstance() {
return createDummyInstance(ImmutableMap.of(), "", "", null, "1.0.0");
}
/** Creates an GapicProductConfig with fixed content. Exposed for testing. */
@VisibleForTesting
public static GapicProductConfig createDummyInstance(
ImmutableMap<String, InterfaceConfig> interfaceConfigMap,
String packageName,
String domainLayerLocation,
ResourceNameMessageConfigs messageConfigs) {
return createDummyInstance(
interfaceConfigMap, packageName, domainLayerLocation, messageConfigs, "1.0.0");
}
/** Creates an GapicProductConfig with fixed content. Exposed for testing. */
private static GapicProductConfig createDummyInstance(
ImmutableMap<String, InterfaceConfig> interfaceConfigMap,
String packageName,
String domainLayerLocation,
ResourceNameMessageConfigs messageConfigs,
String configSchemaVersion) {
return new AutoValue_GapicProductConfig(
interfaceConfigMap,
packageName,
domainLayerLocation,
ReleaseLevel.UNSET_RELEASE_LEVEL,
messageConfigs,
ImmutableList.of(),
ImmutableList.of(),
ImmutableMap.of(),
new ProtoParser(true),
// Default to gRPC.
TransportProtocol.GRPC,
createResponseFieldConfigMap(messageConfigs, ImmutableMap.of()),
configSchemaVersion);
}
/** Return the list of information about clients to be generated. */
private static ImmutableList<GapicInterfaceInput> createInterfaceInputsWithGapicConfig(
DiagCollector diagCollector,
List<InterfaceConfigProto> interfaceConfigProtosList,
ImmutableMap<String, Interface> protoInterfaces,
SymbolTable symbolTable) {
// Maps name of interfaces to found interfaces from proto.
Map<String, Interface> interfaceMap = new LinkedHashMap<>(protoInterfaces);
// Maps name of interfaces to found InterfaceConfigs from config yamls.
Map<String, InterfaceConfigProto> interfaceConfigProtos = new LinkedHashMap<>();
// Parse GAPIC config for interfaceConfigProtos.
for (InterfaceConfigProto interfaceConfigProto : interfaceConfigProtosList) {
Interface apiInterface = symbolTable.lookupInterface(interfaceConfigProto.getName());
if (apiInterface == null || !apiInterface.isReachable()) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"interface not found: %s",
interfaceConfigProto.getName()));
continue;
}
interfaceConfigProtos.put(interfaceConfigProto.getName(), interfaceConfigProto);
interfaceMap.put(interfaceConfigProto.getName(), apiInterface);
}
// Store info about each Interface in a GapicInterfaceInput object.
ImmutableList.Builder<GapicInterfaceInput> interfaceInputs = ImmutableList.builder();
for (Entry<String, Interface> interfaceEntry : interfaceMap.entrySet()) {
String serviceFullName = interfaceEntry.getKey();
Interface apiInterface = interfaceEntry.getValue();
GapicInterfaceInput.Builder interfaceInput =
GapicInterfaceInput.newBuilder().setInterface(apiInterface);
InterfaceConfigProto interfaceConfigProto =
interfaceConfigProtos.getOrDefault(
serviceFullName, InterfaceConfigProto.getDefaultInstance());
interfaceInput.setInterfaceConfigProto(interfaceConfigProto);
Map<Method, MethodConfigProto> methodsToGenerate;
methodsToGenerate =
findMethodsToGenerateWithConfigYaml(apiInterface, interfaceConfigProto, diagCollector);
if (methodsToGenerate == null) {
return null;
}
interfaceInput.setMethodsToGenerate(methodsToGenerate);
interfaceInputs.add(interfaceInput.build());
}
return interfaceInputs.build();
}
private static ImmutableMap<String, Interface> getInterfacesFromProtoFile(
DiagCollector diagCollector, List<ProtoFile> sourceProtos, SymbolTable symbolTable) {
// Maps name of interfaces to found interfaces from proto.
ImmutableMap.Builder<String, Interface> protoInterfaces = ImmutableMap.builder();
// Parse proto file for interfaces.
for (ProtoFile file : sourceProtos) {
for (DescriptorProtos.ServiceDescriptorProto service : file.getProto().getServiceList()) {
String serviceFullName =
String.format("%s.%s", file.getProto().getPackage(), service.getName());
Interface apiInterface = symbolTable.lookupInterface(serviceFullName);
if (apiInterface == null) {
diagCollector.addDiag(
Diag.error(SimpleLocation.TOPLEVEL, "interface not found: %s", service.getName()));
continue;
}
protoInterfaces.put(serviceFullName, apiInterface);
}
}
return protoInterfaces.build();
}
/** Return the list of information about clients to be generated. */
private static ImmutableList<GapicInterfaceInput> createInterfaceInputsWithoutGapicConfig(
Collection<Interface> protoInterfaces) {
// Store info about each Interface in a GapicInterfaceInput object.
ImmutableList.Builder<GapicInterfaceInput> interfaceInputs = ImmutableList.builder();
for (Interface apiInterface : protoInterfaces) {
GapicInterfaceInput.Builder interfaceInput =
GapicInterfaceInput.newBuilder()
.setInterface(apiInterface)
.setInterfaceConfigProto(InterfaceConfigProto.getDefaultInstance())
.setMethodsToGenerate(findMethodsToGenerateWithoutConfigYaml(apiInterface));
interfaceInputs.add(interfaceInput.build());
}
return interfaceInputs.build();
}
/** Find the methods that should be generated on the surface when no GAPIC config was given. */
private static ImmutableMap<Method, MethodConfigProto> findMethodsToGenerateWithoutConfigYaml(
Interface apiInterface) {
ImmutableMap.Builder<Method, MethodConfigProto> methodsToSurface = ImmutableMap.builder();
// TODO(andrealin): After migration off GAPIC config is complete; generate all methods
// from protofile even if they aren't included in the GAPIC config.
// Just generate all methods defined in the protos.
apiInterface
.getMethods()
.forEach(m -> methodsToSurface.put(m, MethodConfigProto.getDefaultInstance()));
return methodsToSurface.build();
}
/** Find the methods that should be generated on the surface when a GAPIC config was given. */
@Nullable
private static ImmutableMap<Method, MethodConfigProto> findMethodsToGenerateWithConfigYaml(
Interface apiInterface,
InterfaceConfigProto interfaceConfigProto,
DiagCollector diagCollector) {
ImmutableMap.Builder<Method, MethodConfigProto> methodsToSurface = ImmutableMap.builder();
// Get the set of methods defined by the GAPIC config. Only these methods will be generated.
for (MethodConfigProto methodConfigProto : interfaceConfigProto.getMethodsList()) {
Interface targetInterface =
GapicInterfaceConfig.getTargetInterface(
apiInterface, methodConfigProto.getRerouteToGrpcInterface());
Method protoMethod = targetInterface.lookupMethod(methodConfigProto.getName());
if (protoMethod == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL, "method not found: %s", methodConfigProto.getName()));
continue;
}
methodsToSurface.put(protoMethod, methodConfigProto);
}
if (diagCollector.getErrorCount() > 0) {
return null;
}
return methodsToSurface.build();
}
private static ImmutableMap<String, InterfaceConfig> createInterfaceConfigMap(
DiagCollector diagCollector,
List<GapicInterfaceInput> interfaceInputs,
String defaultPackageName,
LanguageSettingsProto languageSettings,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
TargetLanguage language,
ProtoParser protoParser) {
// Return value; maps interface names to their InterfaceConfig.
ImmutableMap.Builder<String, InterfaceConfig> interfaceConfigMap = ImmutableMap.builder();
for (GapicInterfaceInput interfaceInput : interfaceInputs) {
String serviceFullName = interfaceInput.getServiceFullName();
String interfaceNameOverride = languageSettings.getInterfaceNamesMap().get(serviceFullName);
GapicInterfaceConfig interfaceConfig =
GapicInterfaceConfig.createInterfaceConfig(
diagCollector,
language,
defaultPackageName,
interfaceInput,
interfaceNameOverride,
messageConfigs,
resourceNameConfigs,
protoParser);
if (interfaceConfig == null) {
continue;
}
interfaceConfigMap.put(serviceFullName, interfaceConfig);
}
if (diagCollector.getErrorCount() > 0) {
return null;
} else {
return interfaceConfigMap.build();
}
}
private static ImmutableMap<String, InterfaceConfig> createDiscoGapicInterfaceConfigMap(
DiscoApiModel model,
ConfigProto configProto,
LanguageSettingsProto languageSettings,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
TargetLanguage language) {
ImmutableMap.Builder<String, InterfaceConfig> interfaceConfigMap = ImmutableMap.builder();
for (InterfaceConfigProto interfaceConfigProto : configProto.getInterfacesList()) {
String interfaceNameOverride =
languageSettings.getInterfaceNamesMap().get(interfaceConfigProto.getName());
DiscoGapicInterfaceConfig interfaceConfig =
DiscoGapicInterfaceConfig.createInterfaceConfig(
model,
language,
interfaceConfigProto,
interfaceNameOverride,
messageConfigs,
resourceNameConfigs);
if (interfaceConfig == null) {
continue;
}
interfaceConfigMap.put(interfaceConfigProto.getName(), interfaceConfig);
}
if (model.getDiagCollector().getErrorCount() > 0) {
return null;
} else {
return interfaceConfigMap.build();
}
}
private static ImmutableMap<String, ResourceNameConfig> createResourceNameConfigs(
DiagCollector diagCollector, ConfigProto configProto, TargetLanguage language) {
return createResourceNameConfigs(
diagCollector, configProto, null, language, ImmutableMap.of(), ImmutableMap.of(), null);
}
/**
* Create all the ResourceNameOneofConfig from the protofile and GAPIC config, and let the GAPIC
* config resourceNames override the protofile resourceNames in event of clashes.
*/
@VisibleForTesting
@Nullable
static ImmutableMap<String, ResourceNameConfig> createResourceNameConfigs(
DiagCollector diagCollector,
ConfigProto configProto,
@Nullable List<ProtoFile> protoFiles,
TargetLanguage language,
Map<Resource, ProtoFile> resourceDefs,
Map<ResourceSet, ProtoFile> resourceSetDefs,
ProtoParser protoParser) {
ProtoFile file = null;
if (protoFiles != null) {
file = protoFiles.get(0);
}
ImmutableMap<String, SingleResourceNameConfig> singleResourceNameConfigsFromGapicConfig =
createSingleResourceNameConfigs(diagCollector, configProto, protoFiles, language);
ImmutableMap<String, FixedResourceNameConfig> fixedResourceNameConfigs =
createFixedResourceNameConfigs(
diagCollector, configProto.getFixedResourceNameValuesList(), file);
ImmutableMap<String, ResourceNameOneofConfig> resourceNameOneofConfigsFromGapicConfig =
createResourceNameOneofConfigs(
diagCollector,
configProto.getCollectionOneofsList(),
singleResourceNameConfigsFromGapicConfig,
fixedResourceNameConfigs,
file);
if (diagCollector.getErrorCount() > 0) {
ToolUtil.reportDiags(diagCollector, true);
return null;
}
ImmutableMap<String, SingleResourceNameConfig>
fullyQualifiedSingleResourceNameConfigsFromProtoFile =
createSingleResourceNameConfigsFromProtoFile(diagCollector, resourceDefs, protoParser);
ImmutableMap<String, ResourceNameOneofConfig> resourceNameOneofConfigsFromProtoFile =
createResourceNameOneofConfigsFromProtoFile(
diagCollector,
fullyQualifiedSingleResourceNameConfigsFromProtoFile,
resourceSetDefs,
protoParser);
// Populate a SingleResourceNameConfigs map, using just the unqualified names.
Map<String, SingleResourceNameConfig> singleResourceConfigsFromProtoFile = new HashMap<>();
for (String fullName : fullyQualifiedSingleResourceNameConfigsFromProtoFile.keySet()) {
int periodIndex = fullName.lastIndexOf('.');
SingleResourceNameConfig config =
fullyQualifiedSingleResourceNameConfigsFromProtoFile.get(fullName);
singleResourceConfigsFromProtoFile.put(fullName.substring(periodIndex + 1), config);
}
// Combine the ResourceNameConfigs from the GAPIC and protofile.
Map<String, SingleResourceNameConfig> finalSingleResourceNameConfigs =
mergeResourceNameConfigs(
diagCollector,
singleResourceNameConfigsFromGapicConfig,
singleResourceConfigsFromProtoFile);
Map<String, ResourceNameOneofConfig> finalResourceOneofNameConfigs =
mergeResourceNameConfigs(
diagCollector,
resourceNameOneofConfigsFromGapicConfig,
resourceNameOneofConfigsFromProtoFile);
ImmutableMap.Builder<String, ResourceNameConfig> resourceNameConfigs =
new ImmutableSortedMap.Builder<>(Comparator.naturalOrder());
resourceNameConfigs.putAll(finalSingleResourceNameConfigs);
resourceNameConfigs.putAll(fixedResourceNameConfigs);
resourceNameConfigs.putAll(finalResourceOneofNameConfigs);
return resourceNameConfigs.build();
}
// Return map of fully qualified SingleResourceNameConfig name to its derived config.
private static ImmutableMap<String, SingleResourceNameConfig>
createSingleResourceNameConfigsFromProtoFile(
DiagCollector diagCollector,
Map<Resource, ProtoFile> resourceDefs,
ProtoParser protoParser) {
// Map of fully qualified Resource name to its derived config.
LinkedHashMap<String, SingleResourceNameConfig> fullyQualifiedSingleResources =
new LinkedHashMap<>();
// Create the SingleResourceNameConfigs.
for (Resource resource : resourceDefs.keySet()) {
String resourcePath = resource.getPath();
ProtoFile protoFile = resourceDefs.get(resource);
createSingleResourceNameConfig(
diagCollector,
resource,
protoFile,
resourcePath,
protoParser,
fullyQualifiedSingleResources);
}
if (diagCollector.getErrorCount() > 0) {
ToolUtil.reportDiags(diagCollector, true);
return null;
}
return ImmutableMap.copyOf(fullyQualifiedSingleResources);
}
// Return map of fully qualified ResourceNameOneofConfig name to its derived config.
private static ImmutableMap<String, ResourceNameOneofConfig>
createResourceNameOneofConfigsFromProtoFile(
DiagCollector diagCollector,
ImmutableMap<String, SingleResourceNameConfig> fullyQualifiedSingleResourcesFromProtoFile,
Map<ResourceSet, ProtoFile> resourceSetDefs,
ProtoParser protoParser) {
// Map of fully qualified ResourceSet name to its derived config.
ImmutableMap.Builder<String, ResourceNameOneofConfig> resourceOneOfConfigsFromProtoFile =
ImmutableMap.builder();
// Create the ResourceNameOneOfConfigs.
for (ResourceSet resourceSet : resourceSetDefs.keySet()) {
ProtoFile protoFile = resourceSetDefs.get(resourceSet);
String resourceSetName = resourceSet.getName();
ResourceNameOneofConfig resourceNameOneofConfig =
ResourceNameOneofConfig.createResourceNameOneof(
diagCollector,
resourceSet,
resourceSetName,
fullyQualifiedSingleResourcesFromProtoFile,
protoParser,
protoFile);
if (resourceNameOneofConfig == null) {
return null;
}
resourceOneOfConfigsFromProtoFile.put(resourceSetName, resourceNameOneofConfig);
}
if (diagCollector.getErrorCount() > 0) {
ToolUtil.reportDiags(diagCollector, true);
return null;
}
return resourceOneOfConfigsFromProtoFile.build();
}
private static <T extends ResourceNameConfig> ImmutableMap<String, T> mergeResourceNameConfigs(
DiagCollector diagCollector,
Map<String, T> configsFromGapicConfig,
Map<String, T> configsFromProtoFile) {
Map<String, T> mergedResourceNameConfigs = new HashMap<>(configsFromProtoFile);
// If protofile annotations clash with the configs from configProto, use the configProto.
for (T resourceFromGapicConfig : configsFromGapicConfig.values()) {
if (configsFromProtoFile.containsKey(resourceFromGapicConfig.getEntityId())) {
diagCollector.addDiag(
Diag.warning(
SimpleLocation.TOPLEVEL,
"Resource[Set] entity %s from protofile clashes with a"
+ " Resource[Set] of the same name from the GAPIC config."
+ " Using the GAPIC config entity.",
resourceFromGapicConfig.getEntityId()));
}
// Add the protofile resourceNameConfigs to the map of resourceNameConfigs.
mergedResourceNameConfigs.put(resourceFromGapicConfig.getEntityId(), resourceFromGapicConfig);
}
return ImmutableMap.copyOf(mergedResourceNameConfigs);
}
private static ImmutableMap<String, SingleResourceNameConfig> createSingleResourceNameConfigs(
DiagCollector diagCollector,
ConfigProto configProto,
@Nullable List<ProtoFile> sourceProtos,
TargetLanguage language) {
ProtoFile file = null;
if (sourceProtos != null) {
file = sourceProtos.get(0);
}
LinkedHashMap<String, SingleResourceNameConfig> singleResourceNameConfigsMap =
new LinkedHashMap<>();
for (CollectionConfigProto collectionConfigProto : configProto.getCollectionsList()) {
createSingleResourceNameConfig(
diagCollector, collectionConfigProto, singleResourceNameConfigsMap, file, language);
}
for (InterfaceConfigProto interfaceConfigProto : configProto.getInterfacesList()) {
for (CollectionConfigProto collectionConfigProto :
interfaceConfigProto.getCollectionsList()) {
createSingleResourceNameConfig(
diagCollector, collectionConfigProto, singleResourceNameConfigsMap, file, language);
}
}
if (diagCollector.getErrorCount() > 0) {
return null;
} else {
return ImmutableMap.copyOf(singleResourceNameConfigsMap);
}
}
private static void createSingleResourceNameConfig(
DiagCollector diagCollector,
CollectionConfigProto collectionConfigProto,
LinkedHashMap<String, SingleResourceNameConfig> singleResourceNameConfigsMap,
@Nullable ProtoFile file,
TargetLanguage language) {
SingleResourceNameConfig singleResourceNameConfig =
SingleResourceNameConfig.createSingleResourceName(
diagCollector, collectionConfigProto, file, language);
if (singleResourceNameConfig == null) {
return;
}
if (singleResourceNameConfigsMap.containsKey(singleResourceNameConfig.getEntityId())) {
SingleResourceNameConfig otherConfig =
singleResourceNameConfigsMap.get(singleResourceNameConfig.getEntityId());
if (!singleResourceNameConfig.getNamePattern().equals(otherConfig.getNamePattern())) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Inconsistent collection configs across interfaces. Entity name: "
+ singleResourceNameConfig.getEntityId()));
}
} else {
singleResourceNameConfigsMap.put(
singleResourceNameConfig.getEntityId(), singleResourceNameConfig);
}
}
// Construct a new SingleResourceNameConfig from the given Resource, and add the newly
// created config as a value to the map param, keyed on the package-qualified entity_id.
private static void createSingleResourceNameConfig(
DiagCollector diagCollector,
Resource resource,
ProtoFile file,
String pathTemplate,
ProtoParser protoParser,
LinkedHashMap<String, SingleResourceNameConfig> singleResourceNameConfigsMap) {
SingleResourceNameConfig singleResourceNameConfig =
SingleResourceNameConfig.createSingleResourceName(
resource, pathTemplate, file, diagCollector);
if (singleResourceNameConfigsMap.containsKey(singleResourceNameConfig.getEntityId())) {
SingleResourceNameConfig otherConfig =
singleResourceNameConfigsMap.get(singleResourceNameConfig.getEntityId());
if (!singleResourceNameConfig.getNamePattern().equals(otherConfig.getNamePattern())) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Inconsistent collection configs across interfaces. Entity name: "
+ singleResourceNameConfig.getEntityId()));
}
} else {
String fullyQualifiedName = singleResourceNameConfig.getEntityId();
fullyQualifiedName =
StringUtils.prependIfMissing(fullyQualifiedName, protoParser.getProtoPackage(file) + ".");
singleResourceNameConfigsMap.put(fullyQualifiedName, singleResourceNameConfig);
}
}
private static ImmutableMap<String, FixedResourceNameConfig> createFixedResourceNameConfigs(
DiagCollector diagCollector,
Iterable<FixedResourceNameValueProto> fixedConfigProtos,
@Nullable ProtoFile file) {
ImmutableMap.Builder<String, FixedResourceNameConfig> fixedConfigBuilder =
ImmutableMap.builder();
for (FixedResourceNameValueProto fixedConfigProto : fixedConfigProtos) {
FixedResourceNameConfig fixedConfig =
FixedResourceNameConfig.createFixedResourceNameConfig(
diagCollector, fixedConfigProto, file);
if (fixedConfig == null) {
continue;
}
fixedConfigBuilder.put(fixedConfig.getEntityId(), fixedConfig);
}
return fixedConfigBuilder.build();
}
private static ImmutableMap<String, ResourceNameOneofConfig> createResourceNameOneofConfigs(
DiagCollector diagCollector,
Iterable<CollectionOneofProto> oneofConfigProtos,
ImmutableMap<String, SingleResourceNameConfig> singleResourceNameConfigs,
ImmutableMap<String, FixedResourceNameConfig> fixedResourceNameConfigs,
@Nullable ProtoFile file) {
ImmutableMap.Builder<String, ResourceNameOneofConfig> oneofConfigBuilder =
ImmutableMap.builder();
for (CollectionOneofProto oneofProto : oneofConfigProtos) {
ResourceNameOneofConfig oneofConfig =
ResourceNameOneofConfig.createResourceNameOneof(
diagCollector, oneofProto, singleResourceNameConfigs, fixedResourceNameConfigs, file);
if (oneofConfig == null) {
continue;
}
oneofConfigBuilder.put(oneofConfig.getEntityName(), oneofConfig);
}
return oneofConfigBuilder.build();
}
private static ImmutableMap<String, FieldConfig> createResponseFieldConfigMap(
ResourceNameMessageConfigs messageConfig,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs) {
ImmutableMap.Builder<String, FieldConfig> builder = ImmutableMap.builder();
if (messageConfig == null) {
return builder.build();
}
Map<String, FieldConfig> map = new HashMap<>();
for (FieldModel field : messageConfig.getFieldsWithResourceNamesByMessage().values()) {
map.put(
field.getFullName(),
FieldConfig.createMessageFieldConfig(
messageConfig, resourceNameConfigs, field, ResourceNameTreatment.STATIC_TYPES));
}
builder.putAll(map);
return builder.build();
}
/** Returns the GapicInterfaceConfig for the given API interface. */
public GapicInterfaceConfig getInterfaceConfig(Interface apiInterface) {
return (GapicInterfaceConfig) getInterfaceConfigMap().get(apiInterface.getFullName());
}
/** Returns the GapicInterfaceConfig for the given API interface. */
@Override
public InterfaceConfig getInterfaceConfig(InterfaceModel apiInterface) {
return getInterfaceConfigMap().get(apiInterface.getFullName());
}
/** Returns the GapicInterfaceConfig for the given API method. */
public InterfaceConfig getInterfaceConfig(String fullName) {
return getInterfaceConfigMap().get(fullName);
}
public Iterable<SingleResourceNameConfig> getSingleResourceNameConfigs() {
return Iterables.filter(getResourceNameConfigs().values(), SingleResourceNameConfig.class);
}
/**
* Returns a SingleResourceNameConfig object for the given entity name. If the entityName
* corresponds to a ResourceNameOneofConfig which contains at least one SingleResourceNameConfig,
* then the first of those SingleResourceNameConfigs is returned. If the entityName is neither a
* SingleResourceNameConfig or ResourceNameOneofConfig containing a SingleResourceNameConfig, then
* returns null.
*/
public SingleResourceNameConfig getSingleResourceNameConfig(String entityName) {
ResourceNameConfig resourceNameConfig = getResourceNameConfigs().get(entityName);
if (resourceNameConfig instanceof SingleResourceNameConfig) {
return (SingleResourceNameConfig) resourceNameConfig;
}
if (resourceNameConfig instanceof ResourceNameOneofConfig) {
ResourceNameOneofConfig oneofConfig = (ResourceNameOneofConfig) resourceNameConfig;
if (Iterables.size(oneofConfig.getSingleResourceNameConfigs()) > 0) {
return Iterables.get(oneofConfig.getSingleResourceNameConfigs(), 0);
}
}
return null;
}
/** Returns a base package name for an API's client. */
@Nullable
public static String getPackageName(Model model) {
if (model.getServiceConfig().getApisCount() > 0) {
Api api = model.getServiceConfig().getApis(0);
Interface apiInterface = model.getSymbolTable().lookupInterface(api.getName());
if (apiInterface != null) {
return apiInterface.getFile().getFullName();
}
}
return null;
}
private static FeatureConfig getDefaultLanguageFeatureConfig(
TargetLanguage targetLanguage, ResourceNameMessageConfigs resourceNameMessageConfigs) {
switch (targetLanguage) {
case JAVA:
return JavaFeatureConfig.newBuilder()
.enableStringFormatFunctions(
resourceNameMessageConfigs == null || resourceNameMessageConfigs.isEmpty())
.build();
case CSHARP:
return new CSharpFeatureConfig();
case NODEJS:
return new NodeJSFeatureConfig();
case PHP:
return new PhpFeatureConfig();
case RUBY:
return new RubyFeatureConfig();
default:
return new DefaultFeatureConfig();
}
}
}
| 1 | 27,967 | Nit: Maybe merge this with the configProto check on line 208? Don't feel that strongly though, please keep separate if you prefer. | googleapis-gapic-generator | java |
@@ -138,7 +138,7 @@ describe.skip('Buffering Proxy', function() {
if (die) {
request.connection.destroy();
} else {
- if (doc.ismaster) {
+ if (doc.ismaster || doc.hello) {
request.reply(primary[currentIsMasterIndex]);
} else if (doc.insert) {
request.reply({ ok: 1, n: 1 }); | 1 | 'use strict';
var test = require('./shared').assert;
var co = require('co');
var mock = require('mongodb-mock-server');
var extend = function(template, fields) {
var object = {};
for (var name in template) {
object[name] = template[name];
}
for (var fieldName in fields) {
object[fieldName] = fields[name];
}
return object;
};
describe.skip('Buffering Proxy', function() {
afterEach(() => mock.cleanup());
it('successfully handle buffering store execution for primary server', {
metadata: {
requires: {
generators: true,
topology: 'single'
}
},
test: function(done) {
var configuration = this.configuration,
ObjectId = configuration.require.ObjectId,
ReadPreference = configuration.require.ReadPreference;
var currentIsMasterIndex = 0;
var electionIds = [new ObjectId(0), new ObjectId(1)];
// Default message fields
var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, {
setName: 'rs',
setVersion: 1,
electionId: electionIds[0],
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002'],
arbiters: ['localhost:32002']
});
// Primary server states
var primary = [
extend(defaultFields, {
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32000',
primary: 'localhost:32001',
tags: { loc: 'ny' },
electionId: electionIds[1]
})
];
// Primary server states
var firstSecondary = [
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32001',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32001',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: true,
secondary: false,
me: 'localhost:32001',
primary: 'localhost:32001',
tags: { loc: 'ny' },
electionId: electionIds[1]
})
];
// Primary server states
var secondSecondary = [
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32002',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32002',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32002',
primary: 'localhost:32001',
tags: { loc: 'ny' },
electionId: electionIds[1]
})
];
// Die
var die = false;
var dieSecondary = false;
// Boot the mock
co(function*() {
const primaryServer = yield mock.createServer(32000, 'localhost');
const firstSecondaryServer = yield mock.createServer(32001, 'localhost');
const secondSecondaryServer = yield mock.createServer(32002, 'localhost');
primaryServer.setMessageHandler(request => {
var doc = request.document;
if (die) {
request.connection.destroy();
} else {
if (doc.ismaster) {
request.reply(primary[currentIsMasterIndex]);
} else if (doc.insert) {
request.reply({ ok: 1, n: 1 });
} else if (doc.aggregate) {
request.reply({ ok: 1, n: 1 });
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
}
});
firstSecondaryServer.setMessageHandler(request => {
var doc = request.document;
if (die || dieSecondary) {
request.connection.destroy();
} else {
if (doc.ismaster) {
request.reply(firstSecondary[currentIsMasterIndex]);
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
}
});
secondSecondaryServer.setMessageHandler(request => {
var doc = request.document;
if (die || dieSecondary) {
request.connection.destroy();
} else {
if (doc.ismaster) {
request.reply(secondSecondary[currentIsMasterIndex]);
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
}
});
const client = configuration.newClient(
'mongodb://localhost:32000,localhost:32001,localhost:32002/test?replicaSet=rs',
{
socketTimeoutMS: 2000,
haInterval: 1000
}
);
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
var results = [];
setTimeout(function() {
die = true;
dieSecondary = true;
setTimeout(function() {
db.collection('test').insertOne({ a: 1 }, function(err) {
test.equal(null, err);
results.push('insertOne');
});
db.command(
{ count: 'test', query: {} },
{ readPreference: new ReadPreference(ReadPreference.SECONDARY) },
function(err) {
test.equal(null, err);
results.push('count');
}
);
db.collection('test')
.aggregate([{ $match: {} }])
.toArray(function(err) {
test.equal(null, err);
results.push('aggregate');
});
db.collection('test')
.find({})
.setReadPreference(new ReadPreference(ReadPreference.SECONDARY))
.toArray(function(err) {
test.equal(null, err);
results.push('find');
});
setTimeout(function() {
die = false;
setTimeout(function() {
test.deepEqual(['insertOne', 'aggregate'].sort(), results.sort());
client.close(done);
}, 1000);
}, 1000);
}, 3000);
}, 1000);
});
});
}
});
it('successfully handle buffering store execution for secondary server', {
metadata: {
requires: {
generators: true,
topology: 'single'
}
},
test: function(done) {
var configuration = this.configuration,
ObjectId = configuration.require.ObjectId,
ReadPreference = configuration.require.ReadPreference;
var currentIsMasterIndex = 0;
var electionIds = [new ObjectId(0), new ObjectId(1)];
// Default message fields
var defaultFields = Object.assign({}, mock.DEFAULT_ISMASTER, {
setName: 'rs',
setVersion: 1,
electionId: electionIds[0],
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002'],
arbiters: ['localhost:32002']
});
// Primary server states
var primary = [
extend(defaultFields, {
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32000',
primary: 'localhost:32001',
tags: { loc: 'ny' },
electionId: electionIds[1]
})
];
// Primary server states
var firstSecondary = [
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32001',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32001',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: true,
secondary: false,
me: 'localhost:32001',
primary: 'localhost:32001',
tags: { loc: 'ny' },
electionId: electionIds[1]
})
];
// Primary server states
var secondSecondary = [
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32002',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32002',
primary: 'localhost:32000',
tags: { loc: 'sf' }
}),
extend(defaultFields, {
ismaster: false,
secondary: true,
me: 'localhost:32002',
primary: 'localhost:32001',
tags: { loc: 'ny' },
electionId: electionIds[1]
})
];
// Die
var die = false;
var diePrimary = false;
// Boot the mock
co(function*() {
const primaryServer = yield mock.createServer(32000, 'localhost');
const firstSecondaryServer = yield mock.createServer(32001, 'localhost');
const secondSecondaryServer = yield mock.createServer(32002, 'localhost');
primaryServer.setMessageHandler(request => {
var doc = request.document;
if (die || diePrimary) {
request.connection.destroy();
} else {
if (doc.ismaster) {
request.reply(primary[currentIsMasterIndex]);
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
}
});
firstSecondaryServer.setMessageHandler(request => {
var doc = request.document;
if (die) {
request.connection.destroy();
} else {
if (doc.ismaster) {
request.reply(firstSecondary[currentIsMasterIndex]);
} else if (doc.count) {
request.reply({ ok: 1, n: 10 });
} else if (doc.find) {
request.reply({ ok: 1, n: 10 });
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
}
});
secondSecondaryServer.setMessageHandler(request => {
var doc = request.document;
if (die) {
request.connection.destroy();
} else {
if (doc.ismaster) {
request.reply(secondSecondary[currentIsMasterIndex]);
} else if (doc.count) {
request.reply({ ok: 1, n: 10 });
} else if (doc.find) {
request.reply({ ok: 1, n: 10 });
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
}
});
const client = configuration.newClient(
'mongodb://localhost:32000,localhost:32001,localhost:32002/test?replicaSet=rs',
{
socketTimeoutMS: 2000,
haInterval: 1000
}
);
client.connect(function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
setTimeout(function() {
die = true;
diePrimary = true;
setTimeout(function() {
var results = [];
db.collection('test').insertOne({ a: 1 }, function(err) {
test.equal(null, err);
results.push('insertOne');
});
db.command(
{ count: 'test', query: {} },
{ readPreference: new ReadPreference(ReadPreference.SECONDARY) },
function(err) {
test.equal(null, err);
results.push('count');
}
);
db.collection('test')
.aggregate([{ $match: {} }])
.toArray(function(err) {
test.equal(null, err);
results.push('aggregate');
});
db.collection('test')
.find({})
.setReadPreference(new ReadPreference(ReadPreference.SECONDARY))
.toArray(function(err) {
test.equal(null, err);
results.push('find');
});
setTimeout(function() {
die = false;
setTimeout(function() {
test.deepEqual(['count', 'find'].sort(), results.sort());
client.close(done);
}, 1500);
}, 1000);
}, 3000);
}, 1000);
});
});
}
});
});
| 1 | 20,069 | I know we already did this in 4.0 so carry on, but I'm facepalming here we should've made an overrided setMessageHandler that sets ismaster to true when hello is present. Esp in 4.0 where we already have some overrides written. We can keep going here and maybe open a ticket to some effect of that solution | mongodb-node-mongodb-native | js |
@@ -7,6 +7,10 @@
<%= json_api_link_tag %>
<% end %>
+<% content_for(:skip_links) do -%>
+ <a href="#documents" class="element-invisible element-focusable" data-turbolinks="false">Skip to first result</a>
+<% end %>
+
<% content_for(:container_header) do -%>
<h1 class="sr-only top-content-title"><%= t('blacklight.search.header') %></h1>
| 1 | <% @page_title = t('blacklight.search.page_title.title', :constraints => render_search_to_page_title(params), :application_name => application_name) %>
<% content_for(:head) do -%>
<%= render_opensearch_response_metadata %>
<%= rss_feed_link_tag %>
<%= atom_feed_link_tag %>
<%= json_api_link_tag %>
<% end %>
<% content_for(:container_header) do -%>
<h1 class="sr-only top-content-title"><%= t('blacklight.search.header') %></h1>
<%= render 'constraints' %>
<% end %>
<%= render 'search_header' %>
<h2 class="sr-only"><%= t('blacklight.search.search_results') %></h2>
<%- if @response.empty? %>
<%= render "zero_results" %>
<%- elsif render_grouped_response? %>
<%= render_grouped_document_index %>
<%- else %>
<%= render_document_index %>
<%- end %>
<%= render 'results_pagination' %>
| 1 | 8,413 | I believe we could use `link_to` here (although not super important). Also, we should internationalize this value. I'm not 100% sure what our process is for adding new UI strings these days (e.g. skip the specs that will fail if we add a new key to only one language, use Google Translate to translate the value for us, some other option). | projectblacklight-blacklight | rb |
@@ -1323,11 +1323,9 @@ VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalD
instance_data->instance_dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
lock.lock();
- if (result == VK_SUCCESS) {
- if (pProperties) {
- for (uint32_t i = 0; i < *pPropertyCount; ++i) {
- CreateObject(physicalDevice, pProperties[i].display, kVulkanObjectTypeDisplayKHR, nullptr);
- }
+ if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) {
+ for (uint32_t i = 0; i < *pPropertyCount; ++i) {
+ CreateObject(physicalDevice, pProperties[i].display, kVulkanObjectTypeDisplayKHR, nullptr);
}
}
lock.unlock(); | 1 | /* Copyright (c) 2015-2018 The Khronos Group Inc.
* Copyright (c) 2015-2018 Valve Corporation
* Copyright (c) 2015-2018 LunarG, Inc.
* Copyright (C) 2015-2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Tobin Ehlis <[email protected]>
*/
#include "object_tracker.h"
namespace object_tracker {
std::unordered_map<void *, layer_data *> layer_data_map;
std::mutex global_lock;
uint64_t object_track_index = 0;
uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
void InitObjectTracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
layer_debug_report_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
layer_debug_messenger_actions(my_data->report_data, my_data->logging_messenger, pAllocator, "lunarg_object_tracker");
}
// Add new queue to head of global queue list
void AddQueueInfo(VkDevice device, uint32_t queue_node_index, VkQueue queue) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto queueItem = device_data->queue_info_map.find(queue);
if (queueItem == device_data->queue_info_map.end()) {
ObjTrackQueueInfo *p_queue_info = new ObjTrackQueueInfo;
if (p_queue_info != NULL) {
memset(p_queue_info, 0, sizeof(ObjTrackQueueInfo));
p_queue_info->queue = queue;
p_queue_info->queue_node_index = queue_node_index;
device_data->queue_info_map[queue] = p_queue_info;
} else {
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), kVUID_ObjectTracker_InternalError,
"ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
}
}
}
// Destroy memRef lists and free all memory
void DestroyQueueDataStructures(VkDevice device) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
for (auto queue_item : device_data->queue_info_map) {
delete queue_item.second;
}
device_data->queue_info_map.clear();
// Destroy the items in the queue map
auto queue = device_data->object_map[kVulkanObjectTypeQueue].begin();
while (queue != device_data->object_map[kVulkanObjectTypeQueue].end()) {
uint32_t obj_index = queue->second->object_type;
assert(device_data->num_total_objects > 0);
device_data->num_total_objects--;
assert(device_data->num_objects[obj_index] > 0);
device_data->num_objects[obj_index]--;
log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
queue->second->handle, kVUID_ObjectTracker_Info,
"OBJ_STAT Destroy Queue obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " Queue objs).",
queue->second->handle, device_data->num_total_objects, device_data->num_objects[obj_index]);
delete queue->second;
queue = device_data->object_map[kVulkanObjectTypeQueue].erase(queue);
}
}
// Check Queue type flags for selected queue operations
void ValidateQueueFlags(VkQueue queue, const char *function) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
auto queue_item = device_data->queue_info_map.find(queue);
if (queue_item != device_data->queue_info_map.end()) {
ObjTrackQueueInfo *pQueueInfo = queue_item->second;
if (pQueueInfo != NULL) {
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(device_data->physical_device), layer_data_map);
if ((instance_data->queue_family_properties[pQueueInfo->queue_node_index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) ==
0) {
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), "VUID-vkQueueBindSparse-queuetype",
"Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.", function);
}
}
}
}
// Look for this device object in any of the instance child devices lists.
// NOTE: This is of dubious value. In most circumstances Vulkan will die a flaming death if a dispatchable object is invalid.
// However, if this layer is loaded first and GetProcAddress is used to make API calls, it will detect bad DOs.
bool ValidateDeviceObject(uint64_t device_handle, const std::string &invalid_handle_code, const std::string &wrong_device_code) {
VkInstance last_instance = nullptr;
for (auto layer_data : layer_data_map) {
for (auto object : layer_data.second->object_map[kVulkanObjectTypeDevice]) {
// Grab last instance to use for possible error message
last_instance = layer_data.second->instance;
if (object.second->handle == device_handle) return false;
}
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(last_instance), layer_data_map);
return log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device_handle,
invalid_handle_code, "Invalid Device Object 0x%" PRIxLEAST64 ".", device_handle);
}
void AllocateCommandBuffer(VkDevice device, const VkCommandPool command_pool, const VkCommandBuffer command_buffer,
VkCommandBufferLevel level) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT", HandleToUint64(command_buffer));
ObjTrackState *pNewObjNode = new ObjTrackState;
pNewObjNode->object_type = kVulkanObjectTypeCommandBuffer;
pNewObjNode->handle = HandleToUint64(command_buffer);
pNewObjNode->parent_object = HandleToUint64(command_pool);
if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
} else {
pNewObjNode->status = OBJSTATUS_NONE;
}
device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)] = pNewObjNode;
device_data->num_objects[kVulkanObjectTypeCommandBuffer]++;
device_data->num_total_objects++;
}
bool ValidateCommandBuffer(VkDevice device, VkCommandPool command_pool, VkCommandBuffer command_buffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
uint64_t object_handle = HandleToUint64(command_buffer);
if (device_data->object_map[kVulkanObjectTypeCommandBuffer].find(object_handle) !=
device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
if (pNode->parent_object != HandleToUint64(command_pool)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-parent",
"FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
" belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
HandleToUint64(command_buffer), pNode->parent_object, HandleToUint64(command_pool));
}
} else {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-00048", "Invalid %s Object 0x%" PRIxLEAST64 ".",
object_string[kVulkanObjectTypeCommandBuffer], object_handle);
}
return skip;
}
void AllocateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(descriptor_set), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT", HandleToUint64(descriptor_set));
ObjTrackState *pNewObjNode = new ObjTrackState;
pNewObjNode->object_type = kVulkanObjectTypeDescriptorSet;
pNewObjNode->status = OBJSTATUS_NONE;
pNewObjNode->handle = HandleToUint64(descriptor_set);
pNewObjNode->parent_object = HandleToUint64(descriptor_pool);
device_data->object_map[kVulkanObjectTypeDescriptorSet][HandleToUint64(descriptor_set)] = pNewObjNode;
device_data->num_objects[kVulkanObjectTypeDescriptorSet]++;
device_data->num_total_objects++;
}
bool ValidateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
uint64_t object_handle = HandleToUint64(descriptor_set);
auto dsItem = device_data->object_map[kVulkanObjectTypeDescriptorSet].find(object_handle);
if (dsItem != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
ObjTrackState *pNode = dsItem->second;
if (pNode->parent_object != HandleToUint64(descriptor_pool)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-parent",
"FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
" belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
HandleToUint64(descriptor_set), pNode->parent_object, HandleToUint64(descriptor_pool));
}
} else {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-00310", "Invalid %s Object 0x%" PRIxLEAST64 ".",
object_string[kVulkanObjectTypeDescriptorSet], object_handle);
}
return skip;
}
template <typename DispObj>
static bool ValidateDescriptorWrite(DispObj disp, VkWriteDescriptorSet const *desc, bool isPush) {
bool skip = false;
if (!isPush && desc->dstSet) {
skip |= ValidateObject(disp, desc->dstSet, kVulkanObjectTypeDescriptorSet, false, "VUID-VkWriteDescriptorSet-dstSet-00320",
"VUID-VkWriteDescriptorSet-commonparent");
}
if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) ||
(desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)) {
for (uint32_t idx2 = 0; idx2 < desc->descriptorCount; ++idx2) {
skip |= ValidateObject(disp, desc->pTexelBufferView[idx2], kVulkanObjectTypeBufferView, false,
"VUID-VkWriteDescriptorSet-descriptorType-00323", "VUID-VkWriteDescriptorSet-commonparent");
}
}
if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
(desc->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) ||
(desc->descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) {
for (uint32_t idx3 = 0; idx3 < desc->descriptorCount; ++idx3) {
skip |= ValidateObject(disp, desc->pImageInfo[idx3].imageView, kVulkanObjectTypeImageView, false,
"VUID-VkWriteDescriptorSet-descriptorType-00326", "VUID-VkDescriptorImageInfo-commonparent");
}
}
if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
(desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
(desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
(desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
for (uint32_t idx4 = 0; idx4 < desc->descriptorCount; ++idx4) {
if (desc->pBufferInfo[idx4].buffer) {
skip |= ValidateObject(disp, desc->pBufferInfo[idx4].buffer, kVulkanObjectTypeBuffer, false,
"VUID-VkDescriptorBufferInfo-buffer-parameter", kVUIDUndefined);
}
}
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |=
ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false,
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
skip |= ValidateObject(commandBuffer, layout, kVulkanObjectTypePipelineLayout, false,
"VUID-vkCmdPushDescriptorSetKHR-layout-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
if (pDescriptorWrites) {
for (uint32_t index0 = 0; index0 < descriptorWriteCount; ++index0) {
skip |= ValidateDescriptorWrite(commandBuffer, &pDescriptorWrites[index0], true);
}
}
}
if (skip) return;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
device_data->device_dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites);
}
void CreateQueue(VkDevice device, VkQueue vkObj) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(vkObj), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT", HandleToUint64(vkObj));
ObjTrackState *p_obj_node = NULL;
auto queue_item = device_data->object_map[kVulkanObjectTypeQueue].find(HandleToUint64(vkObj));
if (queue_item == device_data->object_map[kVulkanObjectTypeQueue].end()) {
p_obj_node = new ObjTrackState;
device_data->object_map[kVulkanObjectTypeQueue][HandleToUint64(vkObj)] = p_obj_node;
device_data->num_objects[kVulkanObjectTypeQueue]++;
device_data->num_total_objects++;
} else {
p_obj_node = queue_item->second;
}
p_obj_node->object_type = kVulkanObjectTypeQueue;
p_obj_node->status = OBJSTATUS_NONE;
p_obj_node->handle = HandleToUint64(vkObj);
}
void CreateSwapchainImageObject(VkDevice dispatchable_object, VkImage swapchain_image, VkSwapchainKHR swapchain) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(dispatchable_object), layer_data_map);
log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(swapchain_image), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
object_track_index++, "SwapchainImage", HandleToUint64(swapchain_image));
ObjTrackState *pNewObjNode = new ObjTrackState;
pNewObjNode->object_type = kVulkanObjectTypeImage;
pNewObjNode->status = OBJSTATUS_NONE;
pNewObjNode->handle = HandleToUint64(swapchain_image);
pNewObjNode->parent_object = HandleToUint64(swapchain);
device_data->swapchainImageMap[HandleToUint64(swapchain_image)] = pNewObjNode;
}
void DeviceReportUndestroyedObjects(VkDevice device, VulkanObjectType object_type, const std::string &error_code) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
for (const auto &item : device_data->object_map[object_type]) {
const ObjTrackState *object_info = item.second;
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], object_info->handle,
error_code, "OBJ ERROR : For device 0x%" PRIxLEAST64 ", %s object 0x%" PRIxLEAST64 " has not been destroyed.",
HandleToUint64(device), object_string[object_type], object_info->handle);
}
}
void DeviceDestroyUndestroyedObjects(VkDevice device, VulkanObjectType object_type) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
while (!device_data->object_map[object_type].empty()) {
auto item = device_data->object_map[object_type].begin();
ObjTrackState *object_info = item->second;
DestroyObjectSilently(device, object_info->handle, object_type);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
std::unique_lock<std::mutex> lock(global_lock);
dispatch_key key = get_dispatch_key(instance);
layer_data *instance_data = GetLayerDataPtr(key, layer_data_map);
// Enable the temporary callback(s) here to catch cleanup issues:
if (instance_data->num_tmp_debug_messengers > 0) {
layer_enable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
}
if (instance_data->num_tmp_report_callbacks > 0) {
layer_enable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
}
// TODO: The instance handle can not be validated here. The loader will likely have to validate it.
ValidateObject(instance, instance, kVulkanObjectTypeInstance, true, "VUID-vkDestroyInstance-instance-parameter",
kVUIDUndefined);
// Destroy physical devices
for (auto iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
iit != instance_data->object_map[kVulkanObjectTypePhysicalDevice].end();) {
ObjTrackState *pNode = iit->second;
VkPhysicalDevice physical_device = reinterpret_cast<VkPhysicalDevice>(pNode->handle);
DestroyObject(instance, physical_device, kVulkanObjectTypePhysicalDevice, nullptr, kVUIDUndefined, kVUIDUndefined);
iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
}
// Destroy child devices
for (auto iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
iit != instance_data->object_map[kVulkanObjectTypeDevice].end();) {
ObjTrackState *pNode = iit->second;
VkDevice device = reinterpret_cast<VkDevice>(pNode->handle);
VkDebugReportObjectTypeEXT debug_object_type = get_debug_report_enum[pNode->object_type];
log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, pNode->handle,
kVUID_ObjectTracker_ObjectLeak, "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.",
string_VkDebugReportObjectTypeEXT(debug_object_type), pNode->handle);
// Report any remaining objects in LL
ReportUndestroyedObjects(device, "VUID-vkDestroyInstance-instance-00629");
DestroyUndestroyedObjects(device);
DestroyObject(instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyInstance-instance-00630",
"VUID-vkDestroyInstance-instance-00631");
iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
}
instance_data->object_map[kVulkanObjectTypeDevice].clear();
instance_data->instance_dispatch_table.DestroyInstance(instance, pAllocator);
// Disable and cleanup the temporary callback(s):
layer_disable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
instance_data->tmp_debug_messengers);
layer_disable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
instance_data->tmp_report_callbacks);
if (instance_data->num_tmp_debug_messengers > 0) {
layer_free_tmp_debug_messengers(instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
instance_data->num_tmp_debug_messengers = 0;
}
if (instance_data->num_tmp_report_callbacks > 0) {
layer_free_tmp_report_callbacks(instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
instance_data->num_tmp_report_callbacks = 0;
}
// Clean up logging callback, if any
while (instance_data->logging_messenger.size() > 0) {
VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
instance_data->logging_messenger.pop_back();
}
while (instance_data->logging_callback.size() > 0) {
VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
instance_data->logging_callback.pop_back();
}
DestroyObject(instance, instance, kVulkanObjectTypeInstance, pAllocator, "VUID-vkDestroyInstance-instance-00630",
"VUID-vkDestroyInstance-instance-00631");
layer_debug_utils_destroy_instance(instance_data->report_data);
FreeLayerDataPtr(key, layer_data_map);
lock.unlock();
}
VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
std::unique_lock<std::mutex> lock(global_lock);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
ValidateObject(device, device, kVulkanObjectTypeDevice, true, "VUID-vkDestroyDevice-device-parameter", kVUIDUndefined);
DestroyObject(device_data->instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyDevice-device-00379",
"VUID-vkDestroyDevice-device-00380");
// Report any remaining objects associated with this VkDevice object in LL
ReportUndestroyedObjects(device, "VUID-vkDestroyDevice-device-00378");
DestroyUndestroyedObjects(device);
// Clean up Queue's MemRef Linked Lists
DestroyQueueDataStructures(device);
lock.unlock();
dispatch_key key = get_dispatch_key(device);
device_data->device_dispatch_table.DestroyDevice(device, pAllocator);
FreeLayerDataPtr(key, layer_data_map);
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
std::unique_lock<std::mutex> lock(global_lock);
ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue-device-parameter", kVUIDUndefined);
lock.unlock();
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->device_dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
lock.lock();
CreateQueue(device, *pQueue);
AddQueueInfo(device, queueFamilyIndex, *pQueue);
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
std::unique_lock<std::mutex> lock(global_lock);
ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue2-device-parameter", kVUIDUndefined);
lock.unlock();
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->device_dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
lock.lock();
if (*pQueue != VK_NULL_HANDLE) {
CreateQueue(device, *pQueue);
AddQueueInfo(device, pQueueInfo->queueFamilyIndex, *pQueue);
}
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkUpdateDescriptorSets-device-parameter",
kVUIDUndefined);
if (pDescriptorCopies) {
for (uint32_t idx0 = 0; idx0 < descriptorCopyCount; ++idx0) {
if (pDescriptorCopies[idx0].dstSet) {
skip |= ValidateObject(device, pDescriptorCopies[idx0].dstSet, kVulkanObjectTypeDescriptorSet, false,
"VUID-VkCopyDescriptorSet-dstSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
}
if (pDescriptorCopies[idx0].srcSet) {
skip |= ValidateObject(device, pDescriptorCopies[idx0].srcSet, kVulkanObjectTypeDescriptorSet, false,
"VUID-VkCopyDescriptorSet-srcSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
}
}
}
if (pDescriptorWrites) {
for (uint32_t idx1 = 0; idx1 < descriptorWriteCount; ++idx1) {
skip |= ValidateDescriptorWrite(device, &pDescriptorWrites[idx1], false);
}
}
}
if (skip) {
return;
}
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->device_dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateComputePipelines-device-parameter",
kVUIDUndefined);
if (pCreateInfos) {
for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
if (pCreateInfos[idx0].basePipelineHandle) {
skip |=
ValidateObject(device, pCreateInfos[idx0].basePipelineHandle, kVulkanObjectTypePipeline, true,
"VUID-VkComputePipelineCreateInfo-flags-00697", "VUID-VkComputePipelineCreateInfo-commonparent");
}
if (pCreateInfos[idx0].layout) {
skip |= ValidateObject(device, pCreateInfos[idx0].layout, kVulkanObjectTypePipelineLayout, false,
"VUID-VkComputePipelineCreateInfo-layout-parameter",
"VUID-VkComputePipelineCreateInfo-commonparent");
}
if (pCreateInfos[idx0].stage.module) {
skip |= ValidateObject(device, pCreateInfos[idx0].stage.module, kVulkanObjectTypeShaderModule, false,
"VUID-VkPipelineShaderStageCreateInfo-module-parameter", kVUIDUndefined);
}
}
}
if (pipelineCache) {
skip |= ValidateObject(device, pipelineCache, kVulkanObjectTypePipelineCache, true,
"VUID-vkCreateComputePipelines-pipelineCache-parameter",
"VUID-vkCreateComputePipelines-pipelineCache-parent");
}
lock.unlock();
if (skip) {
for (uint32_t i = 0; i < createInfoCount; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = device_data->device_dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount,
pCreateInfos, pAllocator, pPipelines);
lock.lock();
for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
if (pPipelines[idx1] != VK_NULL_HANDLE) {
CreateObject(device, pPipelines[idx1], kVulkanObjectTypePipeline, pAllocator);
}
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
bool skip = false;
std::unique_lock<std::mutex> lock(global_lock);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkResetDescriptorPool-device-parameter",
kVUIDUndefined);
skip |=
ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
"VUID-vkResetDescriptorPool-descriptorPool-parameter", "VUID-vkResetDescriptorPool-descriptorPool-parent");
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
// A DescriptorPool's descriptor sets are implicitly deleted when the pool is reset.
// Remove this pool's descriptor sets from our descriptorSet map.
auto itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
ObjTrackState *pNode = (*itr).second;
auto del_itr = itr++;
if (pNode->parent_object == HandleToUint64(descriptorPool)) {
DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
kVUIDUndefined);
}
}
lock.unlock();
VkResult result = device_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer command_buffer, const VkCommandBufferBeginInfo *begin_info) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(command_buffer), layer_data_map);
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |= ValidateObject(command_buffer, command_buffer, kVulkanObjectTypeCommandBuffer, false,
"VUID-vkBeginCommandBuffer-commandBuffer-parameter", kVUIDUndefined);
if (begin_info) {
ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
if ((begin_info->pInheritanceInfo) && (pNode->status & OBJSTATUS_COMMAND_BUFFER_SECONDARY) &&
(begin_info->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |=
ValidateObject(command_buffer, begin_info->pInheritanceInfo->framebuffer, kVulkanObjectTypeFramebuffer, true,
"VUID-VkCommandBufferBeginInfo-flags-00055", "VUID-VkCommandBufferInheritanceInfo-commonparent");
skip |=
ValidateObject(command_buffer, begin_info->pInheritanceInfo->renderPass, kVulkanObjectTypeRenderPass, false,
"VUID-VkCommandBufferBeginInfo-flags-00053", "VUID-VkCommandBufferInheritanceInfo-commonparent");
}
}
}
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = device_data->device_dispatch_table.BeginCommandBuffer(command_buffer, begin_info);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugReportCallbackEXT *pCallback) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
VkResult result =
instance_data->instance_dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback);
if (VK_SUCCESS == result) {
result = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pCallback);
CreateObject(instance, *pCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
const VkAllocationCallbacks *pAllocator) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
instance_data->instance_dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
DestroyObject(instance, msgCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator,
"VUID-vkDestroyDebugReportCallbackEXT-instance-01242", "VUID-vkDestroyDebugReportCallbackEXT-instance-01243");
}
VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
instance_data->instance_dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
pMsg);
}
// VK_EXT_debug_utils commands
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (pNameInfo->pObjectName) {
lock.lock();
dev_data->report_data->debugUtilsObjectNameMap->insert(
std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
lock.unlock();
} else {
lock.lock();
dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
lock.unlock();
}
VkResult result = dev_data->device_dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
return result;
}
VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (!skip) {
lock.lock();
BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
lock.unlock();
dev_data->device_dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (!skip) {
dev_data->device_dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
lock.lock();
EndQueueDebugUtilsLabel(dev_data->report_data, queue);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (!skip) {
lock.lock();
InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
lock.unlock();
dev_data->device_dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
if (!skip) {
lock.lock();
BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
lock.unlock();
dev_data->device_dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
if (!skip) {
dev_data->device_dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
lock.lock();
EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
lock.unlock();
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
if (!skip) {
lock.lock();
InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
lock.unlock();
dev_data->device_dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugUtilsMessengerEXT *pMessenger) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
VkResult result =
instance_data->instance_dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
if (VK_SUCCESS == result) {
result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
CreateObject(instance, *pMessenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
const VkAllocationCallbacks *pAllocator) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
instance_data->instance_dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
DestroyObject(instance, messenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator, kVUIDUndefined, kVUIDUndefined);
}
VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageTypes,
const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
instance_data->instance_dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
}
static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION},
{VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}};
static const VkLayerProperties globalLayerProps = {"VK_LAYER_LUNARG_object_tracker",
VK_LAYER_API_VERSION, // specVersion
1, // implementationVersion
"LunarG Validation Layer"};
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
return instance_data->instance_dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
std::lock_guard<std::mutex> lock(global_lock);
bool skip = ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
"VUID-vkCreateDevice-physicalDevice-parameter", kVUIDUndefined);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *phy_dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(phy_dev_data->instance, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
// Advance the link info for the next element on the chain
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
VkResult result = fpCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
if (result != VK_SUCCESS) {
return result;
}
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
device_data->report_data = layer_debug_utils_create_device(phy_dev_data->report_data, *pDevice);
layer_init_device_dispatch_table(*pDevice, &device_data->device_dispatch_table, fpGetDeviceProcAddr);
// Save pCreateInfo device extension list for GetDeviceProcAddr()
for (uint32_t extn = 0; extn < pCreateInfo->enabledExtensionCount; extn++) {
device_data->device_extension_set.insert(pCreateInfo->ppEnabledExtensionNames[extn]);
}
// Add link back to physDev
device_data->physical_device = physicalDevice;
device_data->instance = phy_dev_data->instance;
CreateObject(phy_dev_data->instance, *pDevice, kVulkanObjectTypeDevice, pAllocator);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) {
bool skip = false;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetSwapchainImagesKHR-device-parameter",
kVUIDUndefined);
skip |= ValidateObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, false,
"VUID-vkGetSwapchainImagesKHR-swapchain-parameter", kVUIDUndefined);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result =
device_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
if (pSwapchainImages != NULL) {
lock.lock();
for (uint32_t i = 0; i < *pSwapchainImageCount; i++) {
CreateSwapchainImageObject(device, pSwapchainImages[i], swapchain);
}
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateDescriptorSetLayout-device-parameter",
kVUIDUndefined);
if (pCreateInfo) {
if (pCreateInfo->pBindings) {
for (uint32_t binding_index = 0; binding_index < pCreateInfo->bindingCount; ++binding_index) {
const VkDescriptorSetLayoutBinding &binding = pCreateInfo->pBindings[binding_index];
const bool is_sampler_type = binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
binding.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
if (binding.pImmutableSamplers && is_sampler_type) {
for (uint32_t index2 = 0; index2 < binding.descriptorCount; ++index2) {
const VkSampler sampler = binding.pImmutableSamplers[index2];
skip |= ValidateObject(device, sampler, kVulkanObjectTypeSampler, false,
"VUID-VkDescriptorSetLayoutBinding-descriptorType-00282", kVUIDUndefined);
}
}
}
}
}
}
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = device_data->device_dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
if (VK_SUCCESS == result) {
std::lock_guard<std::mutex> lock(global_lock);
CreateObject(device, *pSetLayout, kVulkanObjectTypeDescriptorSetLayout, pAllocator);
}
return result;
}
static inline bool ValidateSamplerObjects(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo) {
bool skip = false;
if (pCreateInfo->pBindings) {
for (uint32_t index1 = 0; index1 < pCreateInfo->bindingCount; ++index1) {
for (uint32_t index2 = 0; index2 < pCreateInfo->pBindings[index1].descriptorCount; ++index2) {
if (pCreateInfo->pBindings[index1].pImmutableSamplers) {
skip |=
ValidateObject(device, pCreateInfo->pBindings[index1].pImmutableSamplers[index2], kVulkanObjectTypeSampler,
true, "VUID-VkDescriptorSetLayoutBinding-descriptorType-00282", kVUIDUndefined);
}
}
}
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL GetDescriptorSetLayoutSupport(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
VkDescriptorSetLayoutSupport *pSupport) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false,
"VUID-vkGetDescriptorSetLayoutSupport-device-parameter", kVUIDUndefined);
if (pCreateInfo) {
skip |= ValidateSamplerObjects(device, pCreateInfo);
}
}
if (skip) return;
GetLayerDataPtr(get_dispatch_key(device), layer_data_map)
->device_dispatch_table.GetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport);
}
VKAPI_ATTR void VKAPI_CALL GetDescriptorSetLayoutSupportKHR(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
VkDescriptorSetLayoutSupport *pSupport) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false,
"VUID-vkGetDescriptorSetLayoutSupportKHR-device-parameter", kVUIDUndefined);
if (pCreateInfo) {
skip |= ValidateSamplerObjects(device, pCreateInfo);
}
}
if (skip) return;
GetLayerDataPtr(get_dispatch_key(device), layer_data_map)
->device_dispatch_table.GetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
"VUID-vkGetPhysicalDeviceQueueFamilyProperties-physicalDevice-parameter", kVUIDUndefined);
}
if (skip) {
return;
}
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
std::lock_guard<std::mutex> lock(global_lock);
if (pQueueFamilyProperties != NULL) {
if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
}
for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
instance_data->queue_family_properties[i] = pQueueFamilyProperties[i];
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
// Advance the link info for the next element on the chain
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
if (result != VK_SUCCESS) {
return result;
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
instance_data->instance = *pInstance;
layer_init_instance_dispatch_table(*pInstance, &instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
// Look for one or more debug report create info structures, and copy the
// callback(s) for each one found (for use by vkDestroyInstance)
layer_copy_tmp_debug_messengers(pCreateInfo->pNext, &instance_data->num_tmp_debug_messengers,
&instance_data->tmp_messenger_create_infos, &instance_data->tmp_debug_messengers);
layer_copy_tmp_report_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_report_callbacks,
&instance_data->tmp_report_create_infos, &instance_data->tmp_report_callbacks);
instance_data->report_data =
debug_utils_create_instance(&instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
pCreateInfo->ppEnabledExtensionNames);
InitObjectTracker(instance_data, pAllocator);
CreateObject(*pInstance, *pInstance, kVulkanObjectTypeInstance, pAllocator);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
VkPhysicalDevice *pPhysicalDevices) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(instance, instance, kVulkanObjectTypeInstance, false,
"VUID-vkEnumeratePhysicalDevices-instance-parameter", kVUIDUndefined);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
VkResult result =
instance_data->instance_dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
lock.lock();
if (result == VK_SUCCESS) {
if (pPhysicalDevices) {
for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
CreateObject(instance, pPhysicalDevices[i], kVulkanObjectTypePhysicalDevice, nullptr);
}
}
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
VkCommandBuffer *pCommandBuffers) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateCommandBuffers-device-parameter",
kVUIDUndefined);
skip |= ValidateObject(device, pAllocateInfo->commandPool, kVulkanObjectTypeCommandPool, false,
"VUID-VkCommandBufferAllocateInfo-commandPool-parameter", kVUIDUndefined);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = device_data->device_dispatch_table.AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
lock.lock();
for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
AllocateCommandBuffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], pAllocateInfo->level);
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateDescriptorSets-device-parameter",
kVUIDUndefined);
skip |= ValidateObject(device, pAllocateInfo->descriptorPool, kVulkanObjectTypeDescriptorPool, false,
"VUID-VkDescriptorSetAllocateInfo-descriptorPool-parameter",
"VUID-VkDescriptorSetAllocateInfo-commonparent");
for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
skip |= ValidateObject(device, pAllocateInfo->pSetLayouts[i], kVulkanObjectTypeDescriptorSetLayout, false,
"VUID-VkDescriptorSetAllocateInfo-pSetLayouts-parameter",
"VUID-VkDescriptorSetAllocateInfo-commonparent");
}
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = device_data->device_dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
if (VK_SUCCESS == result) {
lock.lock();
for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
AllocateDescriptorSet(device, pAllocateInfo->descriptorPool, pDescriptorSets[i]);
}
lock.unlock();
}
return result;
}
VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
std::unique_lock<std::mutex> lock(global_lock);
ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeCommandBuffers-device-parameter", kVUIDUndefined);
ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, false, "VUID-vkFreeCommandBuffers-commandPool-parameter",
"VUID-vkFreeCommandBuffers-commandPool-parent");
for (uint32_t i = 0; i < commandBufferCount; i++) {
if (pCommandBuffers[i] != VK_NULL_HANDLE) {
skip |= ValidateCommandBuffer(device, commandPool, pCommandBuffers[i]);
}
}
for (uint32_t i = 0; i < commandBufferCount; i++) {
DestroyObject(device, pCommandBuffers[i], kVulkanObjectTypeCommandBuffer, nullptr, kVUIDUndefined, kVUIDUndefined);
}
lock.unlock();
if (!skip) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->device_dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
}
}
VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
// A swapchain's images are implicitly deleted when the swapchain is deleted.
// Remove this swapchain's images from our map of such images.
std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->swapchainImageMap.begin();
while (itr != device_data->swapchainImageMap.end()) {
ObjTrackState *pNode = (*itr).second;
if (pNode->parent_object == HandleToUint64(swapchain)) {
delete pNode;
auto delete_item = itr++;
device_data->swapchainImageMap.erase(delete_item);
} else {
++itr;
}
}
DestroyObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, pAllocator, "VUID-vkDestroySwapchainKHR-swapchain-01283",
"VUID-vkDestroySwapchainKHR-swapchain-01284");
lock.unlock();
device_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount,
const VkDescriptorSet *pDescriptorSets) {
bool skip = false;
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeDescriptorSets-device-parameter",
kVUIDUndefined);
skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
"VUID-vkFreeDescriptorSets-descriptorPool-parameter", "VUID-vkFreeDescriptorSets-descriptorPool-parent");
for (uint32_t i = 0; i < descriptorSetCount; i++) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
skip |= ValidateDescriptorSet(device, descriptorPool, pDescriptorSets[i]);
}
}
for (uint32_t i = 0; i < descriptorSetCount; i++) {
DestroyObject(device, pDescriptorSets[i], kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined, kVUIDUndefined);
}
lock.unlock();
if (!skip) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
result = device_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) {
bool skip = VK_FALSE;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyDescriptorPool-device-parameter",
kVUIDUndefined);
skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, true,
"VUID-vkDestroyDescriptorPool-descriptorPool-parameter",
"VUID-vkDestroyDescriptorPool-descriptorPool-parent");
lock.unlock();
if (skip) {
return;
}
// A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
// Remove this pool's descriptor sets from our descriptorSet map.
lock.lock();
std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
ObjTrackState *pNode = (*itr).second;
auto del_itr = itr++;
if (pNode->parent_object == HandleToUint64(descriptorPool)) {
DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
kVUIDUndefined);
}
}
DestroyObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, pAllocator,
"VUID-vkDestroyDescriptorPool-descriptorPool-00304", "VUID-vkDestroyDescriptorPool-descriptorPool-00305");
lock.unlock();
device_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyCommandPool-device-parameter",
kVUIDUndefined);
skip |= ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, true,
"VUID-vkDestroyCommandPool-commandPool-parameter", "VUID-vkDestroyCommandPool-commandPool-parent");
lock.unlock();
if (skip) {
return;
}
lock.lock();
// A CommandPool's command buffers are implicitly deleted when the pool is deleted.
// Remove this pool's cmdBuffers from our cmd buffer map.
auto itr = device_data->object_map[kVulkanObjectTypeCommandBuffer].begin();
auto del_itr = itr;
while (itr != device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
ObjTrackState *pNode = (*itr).second;
del_itr = itr++;
if (pNode->parent_object == HandleToUint64(commandPool)) {
skip |= ValidateCommandBuffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
DestroyObject(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first), kVulkanObjectTypeCommandBuffer, nullptr,
kVUIDUndefined, kVUIDUndefined);
}
}
DestroyObject(device, commandPool, kVulkanObjectTypeCommandPool, pAllocator, "VUID-vkDestroyCommandPool-commandPool-00042",
"VUID-vkDestroyCommandPool-commandPool-00043");
lock.unlock();
device_data->device_dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
}
// Note: This is the core version of this routine. The extension version is below.
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |=
ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
}
if (skip) {
return;
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
std::lock_guard<std::mutex> lock(global_lock);
if (pQueueFamilyProperties != NULL) {
if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
}
for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
}
}
}
// Note: This is the extension version of this routine. The core version is above.
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |=
ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
}
if (skip) {
return;
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
std::lock_guard<std::mutex> lock(global_lock);
if (pQueueFamilyProperties != NULL) {
if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
}
for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayPropertiesKHR *pProperties) {
bool skip = false;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
"VUID-vkGetPhysicalDeviceDisplayPropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result =
instance_data->instance_dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
lock.lock();
if (result == VK_SUCCESS) {
if (pProperties) {
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
CreateObject(physicalDevice, pProperties[i].display, kVulkanObjectTypeDisplayKHR, nullptr);
}
}
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) {
bool skip = false;
std::unique_lock<std::mutex> lock(global_lock);
skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
"VUID-vkGetDisplayModePropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
skip |= ValidateObject(physicalDevice, display, kVulkanObjectTypeDisplayKHR, false,
"VUID-vkGetDisplayModePropertiesKHR-display-parameter", kVUIDUndefined);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result =
instance_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties);
lock.lock();
if (result == VK_SUCCESS) {
if (pProperties) {
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
CreateObject(physicalDevice, pProperties[i].displayMode, kVulkanObjectTypeDisplayModeKHR, nullptr);
}
}
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
bool skip = VK_FALSE;
std::unique_lock<std::mutex> lock(global_lock);
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (pNameInfo->pObjectName) {
dev_data->report_data->debugObjectNameMap->insert(
std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
} else {
dev_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
}
skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDebugMarkerSetObjectNameEXT-device-parameter",
kVUIDUndefined);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->device_dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
return result;
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
assert(instance);
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
if (instance_data->instance_dispatch_table.GetPhysicalDeviceProcAddr == NULL) {
return NULL;
}
return instance_data->instance_dispatch_table.GetPhysicalDeviceProcAddr(instance, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ApiParentExtensionEnabled(funcName, device_data->device_extension_set)) {
return nullptr;
}
const auto item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
if (!device_data->device_dispatch_table.GetDeviceProcAddr) return NULL;
return device_data->device_dispatch_table.GetDeviceProcAddr(device, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
const auto item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
if (!instance_data->instance_dispatch_table.GetInstanceProcAddr) return nullptr;
return instance_data->instance_dispatch_table.GetInstanceProcAddr(instance, funcName);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayProperties2KHR *pProperties) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |=
ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
}
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result =
instance_data->instance_dispatch_table.GetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties);
if (pProperties && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
std::lock_guard<std::mutex> lock(global_lock);
for (uint32_t index = 0; index < *pPropertyCount; ++index) {
CreateObject(physicalDevice, pProperties[index].displayProperties.display, kVulkanObjectTypeDisplayKHR, nullptr);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
"VUID-vkGetDisplayPlaneSupportedDisplaysKHR-physicalDevice-parameter", kVUIDUndefined);
}
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result = instance_data->instance_dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex,
pDisplayCount, pDisplays);
if (pDisplays && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
std::lock_guard<std::mutex> lock(global_lock);
for (uint32_t index = 0; index < *pDisplayCount; ++index) {
CreateObject(physicalDevice, pDisplays[index], kVulkanObjectTypeDisplayKHR, nullptr);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
uint32_t *pPropertyCount, VkDisplayModeProperties2KHR *pProperties) {
bool skip = false;
{
std::lock_guard<std::mutex> lock(global_lock);
skip |=
ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
skip |= ValidateObject(physicalDevice, display, kVulkanObjectTypeDisplayKHR, false, kVUIDUndefined, kVUIDUndefined);
}
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result =
instance_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties);
if (pProperties && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
std::lock_guard<std::mutex> lock(global_lock);
for (uint32_t index = 0; index < *pPropertyCount; ++index) {
CreateObject(physicalDevice, pProperties[index].displayModeProperties.displayMode, kVulkanObjectTypeDisplayModeKHR,
nullptr);
}
}
return result;
}
} // namespace object_tracker
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
return object_tracker::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
VkLayerProperties *pProperties) {
return object_tracker::EnumerateInstanceLayerProperties(pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
// The layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return object_tracker::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
return object_tracker::GetDeviceProcAddr(dev, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
return object_tracker::GetInstanceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
// The layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return object_tracker::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
const char *funcName) {
return object_tracker::GetPhysicalDeviceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
assert(pVersionStruct != NULL);
assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
// Fill in the function pointers if our version is at least capable of having the structure contain them.
if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
}
if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
object_tracker::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
} else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
}
return VK_SUCCESS;
}
| 1 | 9,249 | This was already in vkGetPhysicalDeviceDisplayProperties2KHR... | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -498,3 +498,7 @@ type tasksStopper interface {
type serviceLinkedRoleCreator interface {
CreateECSServiceLinkedRole() error
}
+
+type dockerEngineValidator interface {
+ IsDockerEngineRunning() (string, error)
+} | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"encoding"
"io"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/codepipeline"
"github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/copilot-cli/internal/pkg/describe"
"github.com/aws/copilot-cli/internal/pkg/exec"
"github.com/aws/copilot-cli/internal/pkg/initialize"
"github.com/aws/copilot-cli/internal/pkg/logging"
"github.com/aws/copilot-cli/internal/pkg/repository"
"github.com/aws/copilot-cli/internal/pkg/task"
"github.com/aws/copilot-cli/internal/pkg/term/command"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/aws/copilot-cli/internal/pkg/workspace"
)
// actionCommand is the interface that every command that creates a resource implements.
type actionCommand interface {
// Validate returns an error if a flag's value is invalid.
Validate() error
// Ask prompts for flag values that are required but not passed in.
Ask() error
// Execute runs the command after collecting all required options.
Execute() error
// RecommendedActions returns a list of follow-up suggestions users can run once the command executes successfully.
RecommendedActions() []string
}
// SSM store interfaces.
type serviceStore interface {
CreateService(svc *config.Workload) error
GetService(appName, svcName string) (*config.Workload, error)
ListServices(appName string) ([]*config.Workload, error)
DeleteService(appName, svcName string) error
}
type jobStore interface {
CreateJob(job *config.Workload) error
GetJob(appName, jobName string) (*config.Workload, error)
ListJobs(appName string) ([]*config.Workload, error)
DeleteJob(appName, jobName string) error
}
type wlStore interface {
ListWorkloads(appName string) ([]*config.Workload, error)
GetWorkload(appName, name string) (*config.Workload, error)
}
type workloadListWriter interface {
Write(appName string) error
}
type applicationStore interface {
applicationCreator
applicationGetter
applicationLister
applicationDeleter
}
type applicationCreator interface {
CreateApplication(app *config.Application) error
}
type applicationGetter interface {
GetApplication(appName string) (*config.Application, error)
}
type applicationLister interface {
ListApplications() ([]*config.Application, error)
}
type applicationDeleter interface {
DeleteApplication(name string) error
}
type environmentStore interface {
environmentCreator
environmentGetter
environmentLister
environmentDeleter
}
type environmentCreator interface {
CreateEnvironment(env *config.Environment) error
}
type environmentGetter interface {
GetEnvironment(appName string, environmentName string) (*config.Environment, error)
}
type environmentLister interface {
ListEnvironments(appName string) ([]*config.Environment, error)
}
type environmentDeleter interface {
DeleteEnvironment(appName, environmentName string) error
}
type store interface {
applicationStore
environmentStore
serviceStore
jobStore
wlStore
}
type deployedEnvironmentLister interface {
ListEnvironmentsDeployedTo(appName, svcName string) ([]string, error)
ListDeployedServices(appName, envName string) ([]string, error)
IsServiceDeployed(appName, envName string, svcName string) (bool, error)
}
// Secretsmanager interface.
type secretsManager interface {
secretCreator
secretDeleter
}
type secretCreator interface {
CreateSecret(secretName, secretString string) (string, error)
}
type secretDeleter interface {
DeleteSecret(secretName string) error
}
type imageBuilderPusher interface {
BuildAndPush(docker repository.ContainerLoginBuildPusher, args *exec.BuildArguments) error
}
type repositoryURIGetter interface {
URI() string
}
type repositoryService interface {
repositoryURIGetter
imageBuilderPusher
}
type logEventsWriter interface {
WriteLogEvents(opts logging.WriteLogEventsOpts) error
}
type templater interface {
Template() (string, error)
}
type stackSerializer interface {
templater
SerializedParameters() (string, error)
}
type runner interface {
Run(name string, args []string, options ...command.Option) error
}
type eventsWriter interface {
WriteEventsUntilStopped() error
}
type defaultSessionProvider interface {
Default() (*session.Session, error)
}
type regionalSessionProvider interface {
DefaultWithRegion(region string) (*session.Session, error)
}
type sessionFromRoleProvider interface {
FromRole(roleARN string, region string) (*session.Session, error)
}
type sessionFromStaticProvider interface {
FromStaticCreds(accessKeyID, secretAccessKey, sessionToken string) (*session.Session, error)
}
type sessionFromProfileProvider interface {
FromProfile(name string) (*session.Session, error)
}
type sessionProvider interface {
defaultSessionProvider
regionalSessionProvider
sessionFromRoleProvider
sessionFromProfileProvider
sessionFromStaticProvider
}
type describer interface {
Describe() (describe.HumanJSONStringer, error)
}
type wsFileDeleter interface {
DeleteWorkspaceFile() error
}
type svcManifestReader interface {
ReadServiceManifest(svcName string) ([]byte, error)
}
type jobManifestReader interface {
ReadJobManifest(jobName string) ([]byte, error)
}
type copilotDirGetter interface {
CopilotDirPath() (string, error)
}
type wsPipelineManifestReader interface {
ReadPipelineManifest() ([]byte, error)
}
type wsPipelineWriter interface {
WritePipelineBuildspec(marshaler encoding.BinaryMarshaler) (string, error)
WritePipelineManifest(marshaler encoding.BinaryMarshaler) (string, error)
}
type wsServiceLister interface {
ServiceNames() ([]string, error)
}
type wsSvcReader interface {
wsServiceLister
svcManifestReader
}
type wsSvcDirReader interface {
wsSvcReader
copilotDirGetter
}
type wsJobLister interface {
JobNames() ([]string, error)
}
type wsJobReader interface {
jobManifestReader
wsJobLister
}
type wsWlReader interface {
WorkloadNames() ([]string, error)
}
type wsJobDirReader interface {
wsJobReader
copilotDirGetter
}
type wsWlDirReader interface {
wsJobReader
wsSvcReader
copilotDirGetter
wsWlReader
ListDockerfiles() ([]string, error)
Summary() (*workspace.Summary, error)
}
type wsPipelineReader interface {
wsPipelineManifestReader
WorkloadNames() ([]string, error)
}
type wsAppManager interface {
Create(appName string) error
Summary() (*workspace.Summary, error)
}
type wsAddonManager interface {
WriteAddon(f encoding.BinaryMarshaler, svc, name string) (string, error)
wsWlReader
}
type artifactUploader interface {
PutArtifact(bucket, fileName string, data io.Reader) (string, error)
}
type bucketEmptier interface {
EmptyBucket(bucket string) error
}
// Interfaces for deploying resources through CloudFormation. Facilitates mocking.
type environmentDeployer interface {
DeployAndRenderEnvironment(out termprogress.FileWriter, env *deploy.CreateEnvironmentInput) error
DeleteEnvironment(appName, envName, cfnExecRoleARN string) error
GetEnvironment(appName, envName string) (*config.Environment, error)
EnvironmentTemplate(appName, envName string) (string, error)
UpdateEnvironmentTemplate(appName, envName, templateBody, cfnExecRoleARN string) error
}
type wlDeleter interface {
DeleteWorkload(in deploy.DeleteWorkloadInput) error
}
type svcRemoverFromApp interface {
RemoveServiceFromApp(app *config.Application, svcName string) error
}
type jobRemoverFromApp interface {
RemoveJobFromApp(app *config.Application, jobName string) error
}
type imageRemover interface {
ClearRepository(repoName string) error // implemented by ECR Service
}
type pipelineDeployer interface {
CreatePipeline(env *deploy.CreatePipelineInput) error
UpdatePipeline(env *deploy.CreatePipelineInput) error
PipelineExists(env *deploy.CreatePipelineInput) (bool, error)
DeletePipeline(pipelineName string) error
AddPipelineResourcesToApp(app *config.Application, region string) error
appResourcesGetter
// TODO: Add StreamPipelineCreation method
}
type appDeployer interface {
DeployApp(in *deploy.CreateAppInput) error
AddServiceToApp(app *config.Application, svcName string) error
AddJobToApp(app *config.Application, jobName string) error
AddEnvToApp(app *config.Application, env *config.Environment) error
DelegateDNSPermissions(app *config.Application, accountID string) error
DeleteApp(name string) error
}
type appResourcesGetter interface {
GetAppResourcesByRegion(app *config.Application, region string) (*stack.AppRegionalResources, error)
GetRegionalAppResources(app *config.Application) ([]*stack.AppRegionalResources, error)
}
type taskDeployer interface {
DeployTask(input *deploy.CreateTaskResourcesInput, opts ...cloudformation.StackOption) error
}
type taskRunner interface {
Run() ([]*task.Task, error)
}
type defaultClusterGetter interface {
HasDefaultCluster() (bool, error)
}
type deployer interface {
environmentDeployer
appDeployer
pipelineDeployer
}
type domainValidator interface {
DomainExists(domainName string) (bool, error)
}
type dockerfileParser interface {
GetExposedPorts() ([]uint16, error)
GetHealthCheck() (*exec.HealthCheck, error)
}
type statusDescriber interface {
Describe() (*describe.ServiceStatusDesc, error)
}
type envDescriber interface {
Describe() (*describe.EnvDescription, error)
}
type versionGetter interface {
Version() (string, error)
}
type envTemplater interface {
EnvironmentTemplate(appName, envName string) (string, error)
}
type envUpgrader interface {
UpgradeEnvironment(in *deploy.CreateEnvironmentInput) error
}
type legacyEnvUpgrader interface {
UpgradeLegacyEnvironment(in *deploy.CreateEnvironmentInput, lbWebServices ...string) error
envTemplater
}
type envTemplateUpgrader interface {
envUpgrader
legacyEnvUpgrader
}
type pipelineGetter interface {
GetPipeline(pipelineName string) (*codepipeline.Pipeline, error)
ListPipelineNamesByTags(tags map[string]string) ([]string, error)
GetPipelinesByTags(tags map[string]string) ([]*codepipeline.Pipeline, error)
}
type executor interface {
Execute() error
}
type deletePipelineRunner interface {
Run() error
}
type executeAsker interface {
Ask() error
executor
}
type appSelector interface {
Application(prompt, help string, additionalOpts ...string) (string, error)
}
type appEnvSelector interface {
appSelector
Environment(prompt, help, app string, additionalOpts ...string) (string, error)
}
type configSelector interface {
appEnvSelector
Service(prompt, help, app string) (string, error)
}
type deploySelector interface {
appSelector
DeployedService(prompt, help string, app string, opts ...selector.GetDeployedServiceOpts) (*selector.DeployedService, error)
}
type pipelineSelector interface {
Environments(prompt, help, app string, finalMsgFunc func(int) prompt.Option) ([]string, error)
}
type wsSelector interface {
appEnvSelector
Service(prompt, help string) (string, error)
Job(prompt, help string) (string, error)
Workload(msg, help string) (string, error)
}
type initJobSelector interface {
dockerfileSelector
Schedule(scheduleTypePrompt, scheduleTypeHelp string, scheduleValidator, rateValidator prompt.ValidatorFunc) (string, error)
}
type dockerfileSelector interface {
Dockerfile(selPrompt, notFoundPrompt, selHelp, notFoundHelp string, pv prompt.ValidatorFunc) (string, error)
}
type ec2Selector interface {
VPC(prompt, help string) (string, error)
PublicSubnets(prompt, help, vpcID string) ([]string, error)
PrivateSubnets(prompt, help, vpcID string) ([]string, error)
}
type credsSelector interface {
Creds(prompt, help string) (*session.Session, error)
}
type ec2Client interface {
HasDNSSupport(vpcID string) (bool, error)
}
type jobInitializer interface {
Job(props *initialize.JobProps) (string, error)
}
type svcInitializer interface {
Service(props *initialize.ServiceProps) (string, error)
}
type roleDeleter interface {
DeleteRole(string) error
}
type activeWorkloadTasksLister interface {
ListActiveWorkloadTasks(app, env, workload string) (clusterARN string, taskARNs []string, err error)
}
type tasksStopper interface {
StopTasks(tasks []string, opts ...ecs.StopTasksOpts) error
}
type serviceLinkedRoleCreator interface {
CreateECSServiceLinkedRole() error
}
| 1 | 16,123 | nit: I'd expect a public method called IsDockerEngineRunning to return a boolean yes/no, not the error message. Can we change either the return value or the name? Something like `CallDockerEngine` | aws-copilot-cli | go |
@@ -322,12 +322,12 @@ def define_model(model_name, dbengine, model_seed):
cai_resource_name = Column(String(4096))
cai_resource_type = Column(String(512))
full_name = Column(String(2048), nullable=False)
- type_name = Column(get_string_by_dialect(dbengine.dialect.name, 512),
+ type_name = Column(get_string_by_dialect(dbengine.dialect.name, 700),
primary_key=True)
parent_type_name = Column(
- get_string_by_dialect(dbengine.dialect.name, 512),
+ get_string_by_dialect(dbengine.dialect.name, 700),
ForeignKey('{}.type_name'.format(resources_tablename)))
- name = Column(String(256), nullable=False)
+ name = Column(String(1024), nullable=False)
type = Column(String(128), nullable=False)
policy_update_counter = Column(Integer, default=0)
display_name = Column(String(256), default='') | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database abstraction objects for Forseti Server."""
# pylint: disable=too-many-lines
# pylint: disable=too-many-branches
from builtins import next
from builtins import object
import binascii
import collections
import hmac
import json
import os
import struct
from threading import Lock
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import Integer
from sqlalchemy import Boolean
from sqlalchemy import String
from sqlalchemy import Sequence
from sqlalchemy import ForeignKey
from sqlalchemy import Text
from sqlalchemy import create_engine as sqlalchemy_create_engine
from sqlalchemy import Table
from sqlalchemy import DateTime
from sqlalchemy import or_
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import reconstructor
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import select
from sqlalchemy.sql import union
from sqlalchemy.ext.declarative import declarative_base
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.services.utils import mutual_exclusive
from google.cloud.forseti.services.utils import to_full_resource_name
from google.cloud.forseti.services import db
from google.cloud.forseti.services.utils import get_sql_dialect
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
POOL_RECYCLE_SECONDS = 300
PER_YIELD = 4096
def page_query(query, block_size=PER_YIELD):
"""Page query by block.
Args:
query (Query): sqlalchemy query.
block_size (int): Block size per page.
Yields:
object: The query result object.
"""
block_number = 0
results = query.slice(block_number * block_size,
(block_number + 1) * block_size).all()
while results:
for obj in results:
yield obj
block_number += 1
results = query.slice(block_number * block_size,
(block_number + 1) * block_size).all()
def generate_model_handle():
"""Generate random model handle.
Returns:
str: random bytes for handle
"""
return binascii.hexlify(os.urandom(16)).decode('utf-8')
def generate_model_seed():
"""Generate random model seed.
Returns:
str: random bytes
"""
return binascii.hexlify(os.urandom(16)).decode('utf-8')
MODEL_BASE = declarative_base()
class Model(MODEL_BASE):
"""Explain model object in database."""
__tablename__ = 'model'
name = Column(String(32), primary_key=True)
handle = Column(String(32))
state = Column(String(32))
description = Column(Text())
watchdog_timer_datetime = Column(DateTime())
created_at_datetime = Column(DateTime())
etag_seed = Column(String(32), nullable=False)
message = Column(Text(16777215))
warnings = Column(Text(16777215))
def __init__(self, *args, **kwargs):
"""Initialize
Args:
*args (list): Arguments.
**kwargs (dict): Arguments.
"""
super(Model, self).__init__(*args, **kwargs)
# Non-SQL attributes
self.warning_store = list()
@reconstructor
def init_on_load(self):
"""Initialization of model when reconstructed from query."""
self.warning_store = list()
def kick_watchdog(self):
"""Used during import to notify the import is still progressing."""
self.watchdog_timer_datetime = date_time.get_utc_now_datetime()
def add_warning(self, warning):
"""Add a warning to the model.
Args:
warning (str): Warning message
"""
if warning:
self.warning_store.append(warning)
def get_warnings(self):
"""Returns any stored warnings.
Returns:
str: warning message
"""
if self.warning_store:
return '\n'.join(self.warning_store)
return ''
def set_inprogress(self):
"""Set state to 'in progress'."""
self.state = 'INPROGRESS'
def add_description(self, description):
"""Add new description to the model
Args:
description (str): the description to be added in json format
"""
new_desc = json.loads(description)
model_desc = json.loads(self.description)
for new_item in new_desc:
model_desc[new_item] = new_desc[new_item]
self.description = json.dumps(model_desc, sort_keys=True)
def set_done(self, message=''):
"""Indicate a finished import.
Args:
message (str): Success message or ''
"""
warnings = self.get_warnings()
if warnings:
LOGGER.debug('warnings = %s', warnings)
self.warnings = warnings
self.state = 'PARTIAL_SUCCESS'
else:
self.state = 'SUCCESS'
self.message = message
def set_error(self, message):
"""Indicate a broken import.
Args:
message (str): error message
"""
self.state = 'BROKEN'
self.warnings = self.get_warnings()
self.message = message
LOGGER.error('warning = %s, message = %s',
self.warnings, self.message)
def __repr__(self):
"""String representation.
Returns:
str: Model represented as
(name='{}', handle='{}' state='{}')
"""
return '<Model(name={}, handle={} state={})>'.format(
self.name, self.handle, self.state)
# pylint: disable=too-many-locals,no-member
def define_model(model_name, dbengine, model_seed):
"""Defines table classes which point to the corresponding model.
This means, for each model being accessed this function needs to
be called in order to generate a full set of table definitions.
Models are name spaced via a random model seed such that multiple
models can exist within the same database. In order to implement
the name spacing in an abstract way.
Args:
model_name (str): model handle
dbengine (object): db engine
model_seed (str): seed to get etag
Returns:
tuple: (sessionmaker, ModelAccess)
"""
base = declarative_base()
denormed_group_in_group = '{}_group_in_group'.format(model_name)
bindings_tablename = '{}_bindings'.format(model_name)
roles_tablename = '{}_roles'.format(model_name)
permissions_tablename = '{}_permissions'.format(model_name)
members_tablename = '{}_members'.format(model_name)
resources_tablename = '{}_resources'.format(model_name)
role_permissions = Table('{}_role_permissions'.format(model_name),
base.metadata,
Column(
'roles_name', ForeignKey(
'{}.name'.format(roles_tablename)),
primary_key=True),
Column(
'permissions_name', ForeignKey(
'{}.name'.format(permissions_tablename)),
primary_key=True), )
binding_members = Table('{}_binding_members'.format(model_name),
base.metadata,
Column(
'bindings_id', ForeignKey(
'{}.id'.format(bindings_tablename)),
primary_key=True),
Column(
'members_name', ForeignKey(
'{}.name'.format(members_tablename)),
primary_key=True), )
group_members = Table(
'{}_group_members'.format(model_name),
base.metadata,
Column('group_name',
ForeignKey('{}.name'.format(members_tablename)),
primary_key=True),
Column('members_name',
ForeignKey('{}.name'.format(members_tablename)),
primary_key=True),
)
groups_settings = Table(
'{}_groups_settings'.format(model_name),
base.metadata,
Column('group_name',
ForeignKey('{}.name'.format(members_tablename)),
primary_key=True),
Column('settings',
Text(16777215)),
)
def get_string_by_dialect(db_dialect, column_size):
"""Get Sqlalchemy String by dialect.
Sqlite doesn't support collation type, need to define different
column types for different database engine.
This is used to make MySQL column case sensitive by adding
an encoding type.
Args:
db_dialect (String): The db dialect.
column_size (Integer): The size of the column.
Returns:
String: Sqlalchemy String.
"""
if db_dialect.lower() == 'sqlite':
return String(column_size)
return String(column_size, collation='utf8mb4_bin')
class Resource(base):
"""Row entry for a GCP resource."""
__tablename__ = resources_tablename
cai_resource_name = Column(String(4096))
cai_resource_type = Column(String(512))
full_name = Column(String(2048), nullable=False)
type_name = Column(get_string_by_dialect(dbengine.dialect.name, 512),
primary_key=True)
parent_type_name = Column(
get_string_by_dialect(dbengine.dialect.name, 512),
ForeignKey('{}.type_name'.format(resources_tablename)))
name = Column(String(256), nullable=False)
type = Column(String(128), nullable=False)
policy_update_counter = Column(Integer, default=0)
display_name = Column(String(256), default='')
email = Column(String(256), default='')
data = Column(Text(16777215))
parent = relationship('Resource', remote_side=[type_name])
bindings = relationship('Binding', back_populates='resource')
def increment_update_counter(self):
"""Increments counter for this object's db updates.
"""
self.policy_update_counter += 1
def get_etag(self):
"""Return the etag for this resource.
Returns:
str: etag to avoid race condition when set policy
"""
serialized_ctr = struct.pack('>I', self.policy_update_counter)
msg = binascii.hexlify(serialized_ctr)
msg += self.full_name.encode()
seed = (model_seed if isinstance(model_seed, bytes)
else model_seed.encode())
return hmac.new(seed, msg).hexdigest()
def __repr__(self):
"""String representation.
Returns:
str: Resource represented as
(full_name='{}', name='{}' type='{}')
"""
return '<Resource(full_name={}, name={} type={})>'.format(
self.full_name, self.name, self.type)
Resource.children = relationship(
'Resource', order_by=Resource.full_name, back_populates='parent')
class Binding(base):
"""Row for a binding between resource, roles and members."""
__tablename__ = bindings_tablename
id = Column(Integer, Sequence('{}_id_seq'.format(bindings_tablename)),
primary_key=True)
resource_type_name = Column(
get_string_by_dialect(dbengine.dialect.name, 512),
ForeignKey('{}.type_name'.format(resources_tablename)))
role_name = Column(get_string_by_dialect(dbengine.dialect.name, 128),
ForeignKey('{}.name'.format(roles_tablename)))
resource = relationship('Resource', remote_side=[resource_type_name])
role = relationship('Role', remote_side=[role_name])
members = relationship('Member',
secondary=binding_members,
back_populates='bindings')
def __repr__(self):
"""String Representation
Returns:
str: Binding represented as
(id='{}', role='{}', resource='{}' members='{}')
"""
fmt_s = '<Binding(id={}, role={}, resource={} members={})>'
return fmt_s.format(
self.id,
self.role_name,
self.resource_type_name,
self.members)
class Member(base):
"""Row entry for a policy member."""
__tablename__ = members_tablename
name = Column(String(256), primary_key=True)
type = Column(String(64))
member_name = Column(String(256))
parents = relationship(
'Member',
secondary=group_members,
primaryjoin=name == group_members.c.members_name,
secondaryjoin=name == group_members.c.group_name)
children = relationship(
'Member',
secondary=group_members,
primaryjoin=name == group_members.c.group_name,
secondaryjoin=name == group_members.c.members_name)
bindings = relationship('Binding',
secondary=binding_members,
back_populates='members')
def __repr__(self):
"""String representation.
Returns:
str: Member represented as (name='{}', type='{}')
"""
return '<Member(name={}, type={})>'.format(
self.name, self.type)
class GroupInGroup(base):
"""Row for a group-in-group membership."""
__tablename__ = denormed_group_in_group
parent = Column(String(256), primary_key=True)
member = Column(String(256), primary_key=True)
def __repr__(self):
"""String representation.
Returns:
str: GroupInGroup represented as (parent='{}', member='{}')
"""
return '<GroupInGroup(parent={}, member={})>'.format(
self.parent,
self.member)
class Role(base):
"""Row entry for an IAM role."""
__tablename__ = roles_tablename
name = Column(get_string_by_dialect(dbengine.dialect.name, 128),
primary_key=True)
title = Column(String(128), default='')
stage = Column(String(128), default='')
description = Column(String(1024), default='')
custom = Column(Boolean, default=False)
permissions = relationship('Permission',
secondary=role_permissions,
back_populates='roles')
def __repr__(self):
"""String Representation
Returns:
str: Role represented by name
"""
return '<Role(name=%s)>' % self.name
class Permission(base):
"""Row entry for an IAM permission."""
__tablename__ = permissions_tablename
name = Column(String(128), primary_key=True)
roles = relationship('Role',
secondary=role_permissions,
back_populates='permissions')
def __repr__(self):
"""String Representation
Returns:
str: Permission represented by name
"""
return '<Permission(name=%s)>' % self.name
# pylint: disable=too-many-public-methods
class ModelAccess(object):
"""Data model facade, implement main API against database."""
TBL_GROUP_IN_GROUP = GroupInGroup
TBL_GROUPS_SETTINGS = groups_settings
TBL_BINDING = Binding
TBL_MEMBER = Member
TBL_PERMISSION = Permission
TBL_ROLE = Role
TBL_RESOURCE = Resource
TBL_MEMBERSHIP = group_members
# Set of member binding types that expand like groups.
GROUP_TYPES = {'group',
'projecteditor',
'projectowner',
'projectviewer'}
# Members that represent all users
ALL_USER_MEMBERS = ['allusers', 'allauthenticatedusers']
@classmethod
def delete_all(cls, engine):
"""Delete all data from the model.
Args:
engine (object): database engine
"""
LOGGER.info('Deleting all data from the model.')
role_permissions.drop(engine)
binding_members.drop(engine)
group_members.drop(engine)
groups_settings.drop(engine)
Binding.__table__.drop(engine)
Permission.__table__.drop(engine)
GroupInGroup.__table__.drop(engine)
Role.__table__.drop(engine)
Member.__table__.drop(engine)
Resource.__table__.drop(engine)
@classmethod
def denorm_group_in_group(cls, session):
"""Denormalize group-in-group relation.
This method will fill the GroupInGroup table with
(parent, member) if parent is an ancestor of member,
whenever adding or removing a new group or group-group
relationship, this method should be called to re-denormalize
Args:
session (object): Database session to use.
Returns:
int: Number of iterations.
Raises:
Exception: dernomalize fail
"""
tbl1 = aliased(GroupInGroup.__table__, name='alias1')
tbl2 = aliased(GroupInGroup.__table__, name='alias2')
tbl3 = aliased(GroupInGroup.__table__, name='alias3')
if get_sql_dialect(session) != 'sqlite':
# Lock tables for denormalization
# including aliases 1-3
locked_tables = [
'`{}`'.format(GroupInGroup.__tablename__),
'`{}` as {}'.format(
GroupInGroup.__tablename__,
tbl1.name),
'`{}` as {}'.format(
GroupInGroup.__tablename__,
tbl2.name),
'`{}` as {}'.format(
GroupInGroup.__tablename__,
tbl3.name),
'`{}`'.format(group_members.name)]
lock_stmts = ['{} WRITE'.format(tbl) for tbl in locked_tables]
query = 'LOCK TABLES {}'.format(', '.join(lock_stmts))
session.execute(query)
try:
# Remove all existing rows in the denormalization
session.execute(GroupInGroup.__table__.delete())
# Select member relation into GroupInGroup
qry = (GroupInGroup.__table__.insert().from_select(
['parent', 'member'], group_members.select().where(
group_members.c.group_name.startswith('group/')
).where(
group_members.c.members_name.startswith('group/')
)
))
session.execute(qry)
iterations = 0
rows_affected = True
while rows_affected:
# Join membership on its own to find transitive
expansion = tbl1.join(tbl2, tbl1.c.member == tbl2.c.parent)
# Left outjoin to find the entries that
# are already in the table to prevent
# inserting already existing entries
expansion = expansion.outerjoin(
tbl3,
and_(tbl1.c.parent == tbl3.c.parent,
tbl2.c.member == tbl3.c.member))
# Select only such elements that are not
# already in the table, indicated as NULL
# values through the outer-left-join
stmt = (
select([tbl1.c.parent,
tbl2.c.member])
.select_from(expansion)
# pylint: disable=singleton-comparison
.where(tbl3.c.parent == None)
.distinct()
)
# Execute the query and insert into the table
qry = (GroupInGroup.__table__
.insert()
.from_select(['parent', 'member'], stmt))
rows_affected = bool(session.execute(qry).rowcount)
iterations += 1
except Exception as e:
LOGGER.exception(e)
session.rollback()
raise
finally:
if get_sql_dialect(session) != 'sqlite':
session.execute('UNLOCK TABLES')
session.commit()
return iterations
@classmethod
def expand_special_members(cls, session):
"""Create dynamic groups for project(Editor|Owner|Viewer).
Should be called after IAM bindings are added to the model.
Args:
session (object): Database session to use.
"""
member_type_map = {
'projecteditor': 'roles/editor',
'projectowner': 'roles/owner',
'projectviewer': 'roles/viewer'}
for parent_member in cls.list_group_members(
session, '', member_types=list(member_type_map.keys())):
member_type, project_id = parent_member.split('/')
role = member_type_map[member_type]
try:
iam_policy = cls.get_iam_policy(
session,
'project/{}'.format(project_id),
roles=[role])
LOGGER.info('iam_policy: %s', iam_policy)
except NoResultFound:
LOGGER.warning('Found a non-existent project, or project '
'outside of the organization, in an IAM '
'binding: %s', parent_member)
continue
members = iam_policy.get('bindings', {}).get(role, [])
expanded_members = cls.expand_members(session, members)
for member in expanded_members:
stmt = cls.TBL_MEMBERSHIP.insert(
{'group_name': parent_member,
'members_name': member.name})
session.execute(stmt)
if member.type == 'group' and member.name in members:
session.add(cls.TBL_GROUP_IN_GROUP(
parent=parent_member,
member=member.name))
session.commit()
@classmethod
def explain_granted(cls, session, member_name, resource_type_name,
role, permission):
"""Provide info about how the member has access to the resource.
For example, member m1 can access resource r1 with permission p
it might be granted by binding (r2, rol, g1),
r1 is a child resource in a project or folder r2,
role rol contains permission p,
m1 is a member in group g1.
This method list bindings that grant the access, member relation
and resource hierarchy
Args:
session (object): Database session.
member_name (str): name of the member
resource_type_name (str): type_name of the resource
role (str): role to query
permission (str): permission to query
Returns:
tuples: (bindings, member_graph, resource_type_names) bindings,
the bindings to grant the access member_graph, the graph to
have member included in the binding esource_type_names, the
resource tree
Raises:
Exception: not granted
"""
members, member_graph = cls.reverse_expand_members(
session, [member_name], request_graph=True)
member_names = [m.name for m in members]
resource_type_names = [r.type_name for r in
cls.find_resource_path(session,
resource_type_name)]
if role:
roles = set([role])
qry = session.query(Binding, Member).join(
binding_members).join(Member)
else:
roles = [r.name for r in
cls.get_roles_by_permission_names(
session,
[permission])]
qry = session.query(Binding, Member)
qry = qry.join(binding_members).join(Member)
qry = qry.join(Role).join(role_permissions).join(Permission)
qry = qry.filter(Binding.role_name.in_(roles))
qry = qry.filter(Member.name.in_(member_names))
qry = qry.filter(
Binding.resource_type_name.in_(resource_type_names))
result = qry.all()
if not result:
error_message = 'Grant not found: ({},{},{})'.format(
member_name,
resource_type_name,
role if role is not None else permission)
LOGGER.error(error_message)
raise Exception(error_message)
else:
bindings = [(b.resource_type_name, b.role_name, m.name)
for b, m in result]
return bindings, member_graph, resource_type_names
@classmethod
def scanner_iter(cls, session, resource_type,
parent_type_name=None):
"""Iterate over all resources with the specified type.
Args:
session (object): Database session.
resource_type (str): type of the resource to scan
parent_type_name (str): type_name of the parent resource
Returns:
generator: Generator of resources returned from the query.
"""
query = (
session.query(Resource)
.filter(Resource.type == resource_type)
.options(joinedload(Resource.parent))
.enable_eagerloads(True))
if parent_type_name:
query = query.filter(
Resource.parent_type_name == parent_type_name)
return page_query(query)
@classmethod
def scanner_fetch_groups_settings(cls, session, only_iam_groups):
"""Fetch Groups Settings.
Args:
session (object): Database session.
only_iam_groups (bool): boolean indicating whether we want to
only fetch groups settings for which there is at least 1 iam
policy
Yields:
Resource: resource that match the query
"""
if only_iam_groups:
query = (session.query(groups_settings)
.join(Member).join(binding_members)
.distinct().enable_eagerloads(True))
else:
query = (session.query(groups_settings).enable_eagerloads(True))
for resource in query.yield_per(PER_YIELD):
yield resource
@classmethod
def explain_denied(cls, session, member_name, resource_type_names,
permission_names, role_names):
"""Explain why an access is denied
Provide information how to grant access to a member if such
access is denied with current IAM policies.
For example, member m1 cannot access resource r1 with permission
p, this method shows the bindings with rol that covered the
desired permission on the resource r1 and its ancestors.
If adding this member to any of these bindings, such access
can be granted. An overgranting level is also provided
Args:
session (object): Database session.
member_name (str): name of the member
resource_type_names (list): list of type_names of resources
permission_names (list): list of permissions
role_names (list): list of roles
Returns:
list: list of tuples,
(overgranting,[(role_name,member_name,resource_name)])
Raises:
Exception: No roles covering requested permission set,
Not possible
"""
if not role_names:
role_names = [r.name for r in
cls.get_roles_by_permission_names(
session,
permission_names)]
if not role_names:
error_message = 'No roles covering requested permission set'
LOGGER.error(error_message)
raise Exception(error_message)
resource_hierarchy = (
cls.resource_ancestors(session,
resource_type_names))
def find_binding_candidates(resource_hierarchy):
"""Find the root node in the ancestors.
From there, walk down the resource tree and add
every node until a node has more than one child.
This is the set of nodes which grants access to
at least all of the resources requested.
There is always a chain with a single node root.
Args:
resource_hierarchy (dict): graph of the resource hierarchy
Returns:
list: candidates to add to bindings that potentially grant
access
"""
root = None
for parent in resource_hierarchy.keys():
is_root = True
for children in resource_hierarchy.values():
if parent in children:
is_root = False
break
if is_root:
root = parent
chain = [root]
cur = root
while len(resource_hierarchy[cur]) == 1:
cur = next(iter(resource_hierarchy[cur]))
chain.append(cur)
return chain
bind_res_candidates = find_binding_candidates(
resource_hierarchy)
bindings = (
session.query(Binding, Member)
.join(binding_members)
.join(Member)
.join(Role)
.filter(Binding.resource_type_name.in_(
bind_res_candidates))
.filter(Role.name.in_(role_names))
.filter(or_(Member.type == 'group',
Member.name == member_name))
.filter(and_((binding_members.c.bindings_id ==
Binding.id),
(binding_members.c.members_name ==
Member.name)))
.filter(Role.name == Binding.role_name)
.all())
strategies = []
for resource in bind_res_candidates:
for role_name in role_names:
overgranting = (len(bind_res_candidates) -
bind_res_candidates.index(resource) -
1)
strategies.append(
(overgranting, [
(role, member_name, resource)
for role in [role_name]]))
if bindings:
for binding, member in bindings:
overgranting = (len(bind_res_candidates) - 1 -
bind_res_candidates.index(
binding.resource_type_name))
strategies.append(
(overgranting, [
(binding.role_name,
member.name,
binding.resource_type_name)]))
return strategies
@classmethod
def query_access_by_member(cls, session, member_name, permission_names,
expand_resources=False,
reverse_expand_members=True):
"""Return the set of resources the member has access to.
By default, this method expand group_member relation,
so the result includes all resources can be accessed by the
groups that the member is in.
By default, this method does not expand resource hierarchy,
so the result does not include a resource if such resource does
not have a direct binding to allow access.
Args:
session (object): Database session.
member_name (str): name of the member
permission_names (list): list of names of permissions to query
expand_resources (bool): whether to expand resources
reverse_expand_members (bool): whether to expand members
Returns:
list: list of access tuples, ("role_name", "resource_type_name")
"""
if reverse_expand_members:
member_names = [m.name for m in
cls.reverse_expand_members(session,
[member_name],
False)]
else:
member_names = [member_name]
roles = cls.get_roles_by_permission_names(
session, permission_names)
qry = (
session.query(Binding)
.join(binding_members)
.join(Member)
.filter(Binding.role_name.in_([r.name for r in roles]))
.filter(Member.name.in_(member_names))
)
bindings = qry.yield_per(1024)
if not expand_resources:
return [(binding.role_name,
[binding.resource_type_name]) for binding in bindings]
r_type_names = [binding.resource_type_name for binding in bindings]
expansion = cls.expand_resources_by_type_names(
session,
r_type_names)
res_exp = {k.type_name: [v.type_name for v in values]
for k, values in expansion.items()}
return [(binding.role_name,
res_exp[binding.resource_type_name])
for binding in bindings]
@classmethod
def query_access_by_permission(cls,
session,
role_name=None,
permission_name=None,
expand_groups=False,
expand_resources=False):
"""Query access via the specified permission
Return all the (Principal, Resource) combinations allowing
satisfying access via the specified permission.
By default, the group relation and resource hierarchy will not be
expanded, so the results will only contains direct bindings
filtered by permission. But the relations can be expanded
Args:
session (object): Database session.
role_name (str): Role name to query for
permission_name (str): Permission name to query for.
expand_groups (bool): Whether or not to expand groups.
expand_resources (bool): Whether or not to expand resources.
Yields:
obejct: A generator of access tuples.
Raises:
ValueError: If neither role nor permission is set.
"""
if role_name:
role_names = [role_name]
elif permission_name:
role_names = [p.name for p in
cls.get_roles_by_permission_names(
session,
[permission_name])]
else:
error_message = 'Either role or permission must be set'
LOGGER.error(error_message)
raise ValueError(error_message)
if expand_resources:
expanded_resources = aliased(Resource)
qry = (
session.query(expanded_resources, Binding, Member)
.filter(binding_members.c.bindings_id == Binding.id)
.filter(binding_members.c.members_name == Member.name)
.filter(expanded_resources.full_name.startswith(
Resource.full_name))
.filter((Resource.type_name ==
Binding.resource_type_name))
.filter(Binding.role_name.in_(role_names))
.order_by(expanded_resources.name.asc(),
Binding.role_name.asc())
)
else:
qry = (
session.query(Resource, Binding, Member)
.filter(binding_members.c.bindings_id == Binding.id)
.filter(binding_members.c.members_name == Member.name)
.filter((Resource.type_name ==
Binding.resource_type_name))
.filter(Binding.role_name.in_(role_names))
.order_by(Resource.name.asc(), Binding.role_name.asc())
)
if expand_groups:
to_expand = set([m.name for _, _, m in
qry.yield_per(PER_YIELD)])
expansion = cls.expand_members_map(session, to_expand,
show_group_members=False,
member_contain_self=True)
qry = qry.distinct()
cur_resource = None
cur_role = None
cur_members = set()
for resource, binding, member in qry.yield_per(PER_YIELD):
if cur_resource != resource.type_name:
if cur_resource is not None:
yield cur_role, cur_resource, cur_members
cur_resource = resource.type_name
cur_role = binding.role_name
cur_members = set()
if expand_groups:
for member_name in expansion[member.name]:
cur_members.add(member_name)
else:
cur_members.add(member.name)
if cur_resource is not None:
yield cur_role, cur_resource, cur_members
@classmethod
def query_access_by_resource(cls, session, resource_type_name,
permission_names, expand_groups=False):
"""Query access by resource
Return members who have access to the given resource.
The resource hierarchy will always be expanded, so even if the
current resource does not have that binding, if its ancestors
have the binding, the access will be shown
By default, the group relationship will not be expanded
Args:
session (object): db session
resource_type_name (str): type_name of the resource to query
permission_names (list): list of strs, names of the permissions
to query
expand_groups (bool): whether to expand groups
Returns:
dict: role_member_mapping, <"role_name", "member_names">
"""
roles = cls.get_roles_by_permission_names(
session, permission_names)
resources = cls.find_resource_path(session, resource_type_name)
res = (session.query(Binding, Member)
.filter(
Binding.role_name.in_([r.name for r in roles]),
Binding.resource_type_name.in_(
[r.type_name for r in resources]))
.join(binding_members).join(Member))
role_member_mapping = collections.defaultdict(set)
for binding, member in res:
role_member_mapping[binding.role_name].add(member.name)
if expand_groups:
for role in role_member_mapping:
role_member_mapping[role] = (
[m.name for m in cls.expand_members(
session,
role_member_mapping[role])])
return role_member_mapping
@classmethod
def query_permissions_by_roles(cls, session, role_names, role_prefixes,
_=1024):
"""Resolve permissions for the role.
Args:
session (object): db session
role_names (list): list of strs, names of the roles
role_prefixes (list): list of strs, prefixes of the roles
_ (int): place occupation
Returns:
list: list of (Role, Permission)
Raises:
Exception: No roles or role prefixes specified
"""
if not role_names and not role_prefixes:
error_message = 'No roles or role prefixes specified'
LOGGER.error(error_message)
raise Exception(error_message)
qry = session.query(Role, Permission).join(
role_permissions).join(Permission)
if role_names:
qry = qry.filter(Role.name.in_(role_names))
if role_prefixes:
qry = qry.filter(
or_(*[Role.name.startswith(prefix)
for prefix in role_prefixes]))
return qry.all()
@classmethod
def set_iam_policy(cls,
session,
resource_type_name,
policy,
update_members=False):
"""Set IAM policy
Sets an IAM policy for the resource, check the etag when setting
new policy and reassign new etag.
Check etag to avoid race condition
Args:
session (object): db session
resource_type_name (str): type_name of the resource
policy (dict): the policy to set on the resource
update_members (bool): If true, then add new members to Member
table. This must be set when the call to set_iam_policy
happens outside of the model InventoryImporter class. Tests
or users that manually add an IAM policy need to mark this
as true to ensure the model remains consistent.
Raises:
Exception: Etag doesn't match
"""
LOGGER.info('Setting IAM policy, resource_type_name = %s, policy'
' = %s, session = %s',
resource_type_name, policy, session)
old_policy = cls.get_iam_policy(session, resource_type_name)
if policy['etag'] != old_policy['etag']:
error_message = 'Etags distinct, stored={}, provided={}'.format(
old_policy['etag'], policy['etag'])
LOGGER.error(error_message)
raise Exception(error_message)
old_policy = old_policy['bindings']
policy = policy['bindings']
def filter_etag(policy):
"""Filter etag key/value out of policy map.
Args:
policy (dict): the policy to filter
Returns:
dict: policy without etag, <"bindings":[<role, members>]>
Raises:
"""
return {k: v for k, v in policy.items() if k != 'etag'}
def calculate_diff(policy, old_policy):
"""Calculate the grant/revoke difference between policies.
The diff = policy['bindings'] - old_policy['bindings']
Args:
policy (dict): the new policy in dict format
old_policy (dict): the old policy in dict format
Returns:
dict: <role, members> diff of bindings
"""
diff = collections.defaultdict(list)
for role, members in filter_etag(policy).items():
if role in old_policy:
for member in members:
if member not in old_policy[role]:
diff[role].append(member)
else:
diff[role] = members
return diff
grants = calculate_diff(policy, old_policy)
revocations = calculate_diff(old_policy, policy)
for role, members in revocations.items():
bindings = (
session.query(Binding)
.filter((Binding.resource_type_name ==
resource_type_name))
.filter(Binding.role_name == role)
.join(binding_members).join(Member)
.filter(Member.name.in_(members)).all())
for binding in bindings:
session.delete(binding)
for role, members in grants.items():
inserted = False
existing_bindings = (
session.query(Binding)
.filter((Binding.resource_type_name ==
resource_type_name))
.filter(Binding.role_name == role)
.all())
if update_members:
for member in members:
if not cls.get_member(session, member):
try:
# This is the default case, e.g. 'group/foobar'
m_type, name = member.split('/', 1)
except ValueError:
# Special groups like 'allUsers'
m_type, name = member, member
session.add(cls.TBL_MEMBER(
name=member,
type=m_type,
member_name=name))
for binding in existing_bindings:
if binding.role_name == role:
inserted = True
for member in members:
binding.members.append(
session.query(Member).filter(
Member.name == member).one())
if not inserted:
binding = Binding(
resource_type_name=resource_type_name,
role=session.query(Role).filter(
Role.name == role).one())
binding.members = session.query(Member).filter(
Member.name.in_(members)).all()
session.add(binding)
resource = session.query(Resource).filter(
Resource.type_name == resource_type_name).one()
resource.increment_update_counter()
session.commit()
@classmethod
def get_iam_policy(cls, session, resource_type_name, roles=None):
"""Return the IAM policy for a resource.
Args:
session (object): db session
resource_type_name (str): type_name of the resource to query
roles (list): An optional list of roles to limit the results to
Returns:
dict: the IAM policy
"""
resource = session.query(Resource).filter(
Resource.type_name == resource_type_name).one()
policy = {'etag': resource.get_etag(),
'bindings': {},
'resource': resource.type_name}
bindings = session.query(Binding).filter(
Binding.resource_type_name == resource_type_name)
if roles:
bindings = bindings.filter(Binding.role_name.in_(roles))
for binding in bindings.all():
role = binding.role_name
members = [m.name for m in binding.members]
policy['bindings'][role] = members
return policy
@classmethod
def check_iam_policy(cls, session, resource_type_name, permission_name,
member_name):
"""Check access according to the resource IAM policy.
Args:
session (object): db session
resource_type_name (str): type_name of the resource to check
permission_name (str): name of the permission to check
member_name (str): name of the member to check
Returns:
bool: whether such access is allowed
Raises:
Exception: member or resource not found
"""
member_names = [m.name for m in
cls.reverse_expand_members(
session,
[member_name])]
resource_type_names = [r.type_name for r in cls.find_resource_path(
session,
resource_type_name)]
if not member_names:
error_message = 'Member not found: {}'.format(member_name)
LOGGER.error(error_message)
raise Exception(error_message)
if not resource_type_names:
error_message = 'Resource not found: {}'.format(
resource_type_name)
LOGGER.error(error_message)
raise Exception(error_message)
return (session.query(Permission)
.filter(Permission.name == permission_name)
.join(role_permissions).join(Role).join(Binding)
.filter(Binding.resource_type_name.in_(resource_type_names))
.join(binding_members).join(Member)
.filter(Member.name.in_(member_names)).first() is not None)
@classmethod
def list_roles_by_prefix(cls, session, role_prefix):
"""Provides a list of roles matched via name prefix.
Args:
session (object): db session
role_prefix (str): prefix of the role_name
Returns:
list: list of role_names that match the query
"""
return [r.name for r in session.query(Role).filter(
Role.name.startswith(role_prefix)).all()]
@classmethod
def add_role_by_name(cls, session, role_name, permission_names):
"""Creates a new role.
Args:
session (object): db session
role_name (str): name of the role to add
permission_names (list): list of permissions in the role
"""
LOGGER.info('Creating a new role, role_name = %s, permission_names'
' = %s, session = %s',
role_name, permission_names, session)
permission_names = set(permission_names)
existing_permissions = session.query(Permission).filter(
Permission.name.in_(permission_names)).all()
for existing_permission in existing_permissions:
try:
permission_names.remove(existing_permission.name)
except KeyError:
LOGGER.warning('existing_permissions.name = %s, KeyError',
existing_permission.name)
new_permissions = [Permission(name=n) for n in permission_names]
for perm in new_permissions:
session.add(perm)
cls.add_role(session, role_name,
existing_permissions + new_permissions)
session.commit()
@classmethod
def add_group_member(cls,
session,
member_type_name,
parent_type_names,
denorm=False):
"""Add member, optionally with parent relationship.
Args:
session (object): db session
member_type_name (str): type_name of the member to add
parent_type_names (list): type_names of the parents
denorm (bool): whether to denorm the groupingroup table after
addition
"""
LOGGER.info('Adding a member, member_type_name = %s,'
' parent_type_names = %s, denorm = %s, session = %s',
member_type_name, parent_type_names, denorm, session)
cls.add_member(session,
member_type_name,
parent_type_names,
denorm)
session.commit()
@classmethod
def list_group_members(cls,
session,
member_name_prefix,
member_types=None):
"""Returns members filtered by prefix.
Args:
session (object): db session
member_name_prefix (str): the prefix of the member_name
member_types (list): an optional list of member types to filter
the results by.
Returns:
list: list of Members that match the query
"""
qry = session.query(Member).filter(
Member.member_name.startswith(member_name_prefix))
if member_types:
qry = qry.filter(Member.type.in_(member_types))
return [m.name for m in qry.all()]
@classmethod
def iter_groups(cls, session):
"""Returns iterator of all groups in model.
Args:
session (object): db session
Yields:
Member: group in the model
"""
qry = session.query(Member).filter(Member.type == 'group')
for group in qry.yield_per(1024):
yield group
@classmethod
def iter_resources_by_prefix(cls,
session,
full_resource_name_prefix=None,
type_name_prefix=None,
type_prefix=None,
name_prefix=None):
"""Returns iterator to resources filtered by prefix.
Args:
session (object): db session
full_resource_name_prefix (str): the prefix of the
full_resource_name
type_name_prefix (str): the prefix of the type_name
type_prefix (str): the prefix of the type
name_prefix (ste): the prefix of the name
Yields:
Resource: that match the query
Raises:
Exception: No prefix given
"""
if not any([arg is not None for arg in [full_resource_name_prefix,
type_name_prefix,
type_prefix,
name_prefix]]):
error_message = 'At least one prefix must be set'
LOGGER.error(error_message)
raise Exception(error_message)
qry = session.query(Resource)
if full_resource_name_prefix:
qry = qry.filter(Resource.full_name.startswith(
full_resource_name_prefix))
if type_name_prefix:
qry = qry.filter(Resource.type_name.startswith(
type_name_prefix))
if type_prefix:
qry = qry.filter(Resource.type.startswith(
type_prefix))
if name_prefix:
qry = qry.filter(Resource.name.startswith(
name_prefix))
for resource in qry.yield_per(1024):
yield resource
@classmethod
def list_resources_by_prefix(cls,
session,
full_resource_name_prefix=None,
type_name_prefix=None,
type_prefix=None,
name_prefix=None):
"""Returns resources filtered by prefix.
Args:
session (object): db session
full_resource_name_prefix (str): the prefix of the
full_resource_name
type_name_prefix (str): the prefix of the type_name
type_prefix (str): the prefix of the type
name_prefix (ste): the prefix of the name
Returns:
list: list of Resources match the query
Raises:
"""
return list(
cls.iter_resources_by_prefix(session,
full_resource_name_prefix,
type_name_prefix,
type_prefix,
name_prefix))
@classmethod
def add_resource_by_name(cls,
session,
resource_type_name,
parent_type_name,
no_require_parent):
"""Adds resource specified via full name.
Args:
session (object): db session
resource_type_name (str): name of the resource
parent_type_name (str): name of the parent resource
no_require_parent (bool): if this resource has a parent
Returns:
Resource: Created new resource
"""
LOGGER.info('Adding resource via full name, resource_type_name'
' = %s, parent_type_name = %s, no_require_parent = %s,'
' session = %s', resource_type_name,
parent_type_name, no_require_parent, session)
if not no_require_parent:
parent = session.query(Resource).filter(
Resource.type_name == parent_type_name).one()
else:
parent = None
return cls.add_resource(session, resource_type_name, parent)
@classmethod
def add_resource(cls, session, resource_type_name, parent=None):
"""Adds resource by name.
Args:
session (object): db session
resource_type_name (str): name of the resource
parent (Resource): parent of the resource
Returns:
Resource: Created new resource
"""
LOGGER.info('Adding resource by name, resource_type_name = %s,'
' session = %s', resource_type_name, session)
res_type, res_name = resource_type_name.split('/')
parent_full_resource_name = (
'' if parent is None else parent.full_name)
full_resource_name = to_full_resource_name(
parent_full_resource_name,
resource_type_name)
resource = Resource(full_name=full_resource_name,
type_name=resource_type_name,
name=res_name,
type=res_type,
parent=parent)
session.add(resource)
return resource
@classmethod
def add_role(cls, session, name, permissions=None):
"""Add role by name.
Args:
session (object): db session
name (str): name of the role to add
permissions (list): permissions to add in the role
Returns:
Role: The created role
"""
LOGGER.info('Adding role, name = %s, permissions = %s,'
' session = %s', name, permissions, session)
permissions = [] if permissions is None else permissions
role = Role(name=name, permissions=permissions)
session.add(role)
return role
@classmethod
def add_permission(cls, session, name, roles=None):
"""Add permission by name.
Args:
session (object): db session
name (str): name of the permission
roles (list): list od roles to add the permission
Returns:
Permission: The created permission
"""
LOGGER.info('Adding permission, name = %s, roles = %s'
' session = %s', name, roles, session)
roles = [] if roles is None else roles
permission = Permission(name=name, roles=roles)
session.add(permission)
return permission
@classmethod
def add_binding(cls, session, resource, role, members):
"""Add a binding to the model.
Args:
session (object): db session
resource (str): Resource to be added in the binding
role (str): Role to be added in the binding
members (list): members to be added in the binding
Returns:
Binding: the created binding
"""
LOGGER.info('Adding a binding to the model, resource = %s,'
' role = %s, members = %s, session = %s',
resource, role, members, session)
binding = Binding(resource=resource, role=role, members=members)
session.add(binding)
return binding
@classmethod
def add_member(cls,
session,
type_name,
parent_type_names=None,
denorm=False):
"""Add a member to the model.
Args:
session (object): db session
type_name (str): type_name of the resource to add
parent_type_names (list): list of parent names to add
denorm (bool): whether to denormalize the GroupInGroup relation
Returns:
Member: the created member
Raises:
Exception: parent not found
"""
LOGGER.info('Adding a member to the model, type_name = %s,'
' parent_type_names = %s, denorm = %s, session = %s',
type_name, parent_type_names, denorm, session)
if not parent_type_names:
parent_type_names = []
res_type, name = type_name.split('/', 1)
parents = session.query(Member).filter(
Member.name.in_(parent_type_names)).all()
if len(parents) != len(parent_type_names):
msg = 'Parents: {}, expected: {}'.format(
parents, parent_type_names)
error_message = 'Parent not found, {}'.format(msg)
LOGGER.error(error_message)
raise Exception(error_message)
member = Member(name=type_name,
member_name=name,
type=res_type,
parents=parents)
session.add(member)
session.commit()
if denorm and res_type == 'group' and parents:
cls.denorm_group_in_group(session)
return member
@classmethod
def expand_resources_by_type_names(cls, session, res_type_names):
"""Expand resources by type/name format.
Args:
session (object): db session
res_type_names (list): list of resources in type_names
Returns:
dict: mapping in the form:
{res_type_name: Expansion(res_type_name), ... }
"""
res_key = aliased(Resource, name='res_key')
res_values = aliased(Resource, name='res_values')
expressions = []
for res_type_name in res_type_names:
expressions.append(and_(
res_key.type_name == res_type_name))
res = (
session.query(res_key, res_values)
.filter(res_key.type_name.in_(res_type_names))
.filter(res_values.full_name.startswith(
res_key.full_name))
.yield_per(1024)
)
mapping = collections.defaultdict(set)
for k, value in res:
mapping[k].add(value)
return mapping
@classmethod
def reverse_expand_members(cls, session, member_names,
request_graph=False):
"""Expand members to their groups.
List all groups that contains these members. Also return
the graph if requested.
Args:
session (object): db session
member_names (list): list of members to expand
request_graph (bool): wether the parent-child graph is provided
Returns:
object: set if graph not requested, set and graph if requested
"""
member_names.extend(cls.ALL_USER_MEMBERS)
members = session.query(Member).filter(
Member.name.in_(member_names)).all()
membership_graph = collections.defaultdict(set)
member_set = set()
new_member_set = set()
def add_to_sets(members, child):
"""Adds the members & children to the sets.
Args:
members (list): list of Members to be added
child (Member): child to be added
"""
for member in members:
if request_graph and child:
membership_graph[child.name].add(member.name)
if request_graph and not child:
if member.name not in membership_graph:
membership_graph[member.name] = set()
if member not in member_set:
new_member_set.add(member)
member_set.add(member)
add_to_sets(members, None)
while new_member_set:
members_to_walk = new_member_set
new_member_set = set()
for member in members_to_walk:
add_to_sets(member.parents, member)
if request_graph:
return member_set, membership_graph
return member_set
@classmethod
def expand_members_map(cls,
session,
member_names,
show_group_members=True,
member_contain_self=True):
"""Expand group membership keyed by member.
Args:
session (object): db session
member_names (set): Member names to expand
show_group_members (bool): Whether to include subgroups
member_contain_self (bool): Whether to include a parent
as its own member
Returns:
dict: <Member, set(Children)>
"""
def separate_groups(member_names):
"""Separate groups and other members in two lists.
This is a helper function. groups are needed to query on
group_in_group table
Args:
member_names (list): list of members to be separated
Returns:
tuples: two lists of strs containing groups and others
"""
groups = []
others = []
for name in member_names:
member_type = name.split('/')[0]
if member_type in cls.GROUP_TYPES:
groups.append(name)
else:
others.append(name)
return groups, others
selectables = []
group_names, other_names = separate_groups(member_names)
t_ging = GroupInGroup.__table__
t_members = group_members
# This resolves groups to its transitive non-group members.
transitive_membership = (
select([t_ging.c.parent, t_members.c.members_name])
.select_from(t_ging.join(t_members,
(t_ging.c.member ==
t_members.c.group_name)))
).where(t_ging.c.parent.in_(group_names))
if not show_group_members:
transitive_membership = transitive_membership.where(
not_(t_members.c.members_name.startswith('group/')))
selectables.append(
transitive_membership.alias('transitive_membership'))
direct_membership = (
select([t_members.c.group_name,
t_members.c.members_name])
.where(t_members.c.group_name.in_(group_names))
)
if not show_group_members:
direct_membership = direct_membership.where(
not_(t_members.c.members_name.startswith('group/')))
selectables.append(
direct_membership.alias('direct_membership'))
if show_group_members:
# Show groups as members of other groups
group_in_groups = (
select([t_ging.c.parent,
t_ging.c.member]).where(
t_ging.c.parent.in_(group_names))
)
selectables.append(
group_in_groups.alias('group_in_groups'))
# Union all the queries
qry = union(*selectables)
# Build the result dict
result = collections.defaultdict(set)
for parent, child in session.execute(qry):
result[parent].add(child)
for parent in other_names:
result[parent] = set()
# Add each parent as its own member
if member_contain_self:
for name in member_names:
result[name].add(name)
return result
@classmethod
def expand_members(cls, session, member_names):
"""Expand group membership towards the members.
Args:
session (object): db session
member_names (list): list of strs of member names
Returns:
set: expanded group members
"""
members = session.query(Member).filter(
Member.name.in_(member_names)).all()
def is_group(member):
"""Returns true iff the member is a group.
Args:
member (Member): member to check
Returns:
bool: whether the member is a group
"""
return member.type in cls.GROUP_TYPES
group_set = set()
non_group_set = set()
new_group_set = set()
def add_to_sets(members):
"""Adds new members to the sets.
Args:
members (list): members to be added
"""
for member in members:
if is_group(member):
if member not in group_set:
new_group_set.add(member)
group_set.add(member)
else:
non_group_set.add(member)
add_to_sets(members)
while new_group_set:
groups_to_walk = new_group_set
new_group_set = set()
for group in groups_to_walk:
add_to_sets(group.children)
return group_set.union(non_group_set)
@classmethod
def resource_ancestors(cls, session, resource_type_names):
"""Resolve the transitive ancestors by type/name format.
Given a group of resource and find out all their parents.
Then this method group the pairs with parent. Used to determine
resource candidates to grant access in explain denied.
Args:
session (object): db session
resource_type_names (list): list of strs, resources to query
Returns:
dict: <parent, childs> graph of the resource hierarchy
"""
resource_names = resource_type_names
resource_graph = collections.defaultdict(set)
res_childs = aliased(Resource, name='res_childs')
res_anc = aliased(Resource, name='resource_parent')
resources_set = set(resource_names)
resources_new = set(resource_names)
for resource in resources_new:
resource_graph[resource] = set()
while resources_new:
resources_new = set()
for parent, child in (
session.query(res_anc, res_childs)
.filter(res_childs.type_name.in_(resources_set))
.filter(res_childs.parent_type_name ==
res_anc.type_name)
.all()):
if parent.type_name not in resources_set:
resources_new.add(parent.type_name)
resources_set.add(parent.type_name)
resources_set.add(child.type_name)
resource_graph[parent.type_name].add(child.type_name)
return resource_graph
@classmethod
def find_resource_path(cls, session, resource_type_name):
"""Find resource ancestors by type/name format.
Find all ancestors of a resource and return them in order
Args:
session (object): db session
resource_type_name (str): resource to query
Returns:
list: list of Resources, transitive ancestors for the given
resource
"""
qry = (
session.query(Resource).filter(
Resource.type_name == resource_type_name)
)
resources = qry.all()
return cls._find_resource_path(session, resources)
@classmethod
def _find_resource_path(cls, _, resources):
"""Find the list of transitive ancestors for the given resource.
Args:
_ (object): position holder
resources (list): list of the resources to query
Returns:
list: list of Resources, transitive ancestors for the given
resource
"""
if not resources:
return []
path = []
resource = resources[0]
path.append(resource)
while resource.parent:
resource = resource.parent
path.append(resource)
return path
@classmethod
def get_roles_by_permission_names(cls, session, permission_names):
"""Return the list of roles covering the specified permissions.
Args:
session (object): db session
permission_names (list): permissions to be covered by.
Returns:
set: roles set that cover the permissions
"""
permission_set = set(permission_names)
qry = session.query(Permission)
if permission_set:
qry = qry.filter(Permission.name.in_(permission_set))
permissions = qry.all()
roles = set()
for permission in permissions:
for role in permission.roles:
roles.add(role)
result_set = set()
for role in roles:
role_permissions = set(
[p.name for p in role.permissions])
if permission_set.issubset(role_permissions):
result_set.add(role)
return result_set
@classmethod
def get_member(cls, session, name):
"""Get member by name.
Args:
session (object): db session
name (str): the name the member to query
Returns:
list: Members from the query
"""
return session.query(Member).filter(Member.name == name).all()
base.metadata.create_all(dbengine)
return sessionmaker(bind=dbengine), ModelAccess
def undefine_model(session_maker, data_access):
"""Deletes an entire model and the corresponding data in the database.
Args:
session_maker (func): session_maker function
data_access (ModelAccess): data access layer
"""
session = session_maker()
data_access.delete_all(session)
LOCK = Lock()
class ModelManager(object):
"""The Central class to create,list,get and delete models.
ModelManager is mostly used to do the lookup from model name to the
session cache which is given in each client's request.
"""
def __init__(self, dbengine):
"""Initialization
Args:
dbengine (object): Database engine
"""
self.engine = dbengine
self.modelmaker = self._create_model_session()
self.sessionmakers = {}
def _create_model_session(self):
"""Create a session to read from the models table.
Returns:
object: db session created
"""
MODEL_BASE.metadata.create_all(self.engine)
return db.ScopedSessionMaker(
sessionmaker(
bind=self.engine),
auto_commit=True)
@mutual_exclusive(LOCK)
def create(self, name):
"""Create a new model entry in the database.
Args:
name (str): model name
Returns:
str: the handle of the model
"""
LOGGER.info('Creating a new model entry in the database,'
' name = %s', name)
handle = generate_model_handle()
with self.modelmaker() as session:
utc_now = date_time.get_utc_now_datetime()
model = Model(
handle=handle,
name=name,
state='CREATED',
created_at_datetime=utc_now,
watchdog_timer_datetime=utc_now,
etag_seed=generate_model_seed(),
description='{}'
)
session.add(model)
self.sessionmakers[model.handle] = define_model(
model.handle, self.engine, model.etag_seed)
return handle
def get(self, model):
"""Get model data by handle.
Args:
model (str): model handle
Returns:
tuple: session and ModelAccess object
"""
session_maker, data_access = self._get(model)
return db.ScopedSession(session_maker()), data_access
def get_readonly_session(self):
"""Get read-only session.
Returns:
Session: The read-only session."""
return db.create_scoped_readonly_session(self.engine)
def _get(self, handle):
"""Get model data by name internal.
Args:
handle (str): the model handle
Returns:
Model: the model in the session maker
Raises:
KeyError: model handle not available
"""
if isinstance(handle, bytes):
handle = handle.decode('utf-8')
if handle not in [m.handle for m in self.models()]:
error_message = 'handle={}, available={}'.format(
handle,
[m.handle for m in self.models()]
)
LOGGER.error(error_message)
raise KeyError(error_message)
try:
return self.sessionmakers[handle]
except KeyError:
LOGGER.debug('Sessionmakers doesn\'t contain handle = %s,'
' creating a new handle.', handle)
with self.modelmaker() as session:
model = (
session.query(Model).filter(Model.handle == handle).one()
)
self.sessionmakers[model.handle] = define_model(
model.handle, self.engine, model.etag_seed)
return self.sessionmakers[model.handle]
@mutual_exclusive(LOCK)
def delete(self, model_name):
"""Delete a model entry in the database by name.
Args:
model_name (str): the name of the model to be deleted
"""
LOGGER.info('Deleting model by name, model_name = %s', model_name)
_, data_access = self._get(model_name)
if model_name in self.sessionmakers:
del self.sessionmakers[model_name]
with self.modelmaker() as session:
session.query(Model).filter(Model.handle == model_name).delete()
data_access.delete_all(self.engine)
def _models(self, expunge=False):
"""Return the list of models from the database.
Args:
expunge (bool): Whether or not to detach the object from
the session for use in another session.
Returns:
list: list of Models in the db
"""
with self.modelmaker() as session:
items = session.query(Model).all()
if expunge:
session.expunge_all()
return items
def models(self):
"""Expunging wrapper for _models.
Returns:
list: list of Models in the db
"""
return self._models(expunge=True)
def model(self, model_name, expunge=True, session=None):
"""Get model from database by name.
Args:
model_name (str): Model name or handle
expunge (bool): Whether or not to detach the object from
the session for use in another session.
session (object): Database session.
Returns:
Model: the dbo of the queried model
"""
def instantiate_model(session, model_name, expunge):
"""Creates a model object by querying the database.
Args:
session (object): Database session.
model_name (str): Model name to instantiate.
expunge (bool): Whether or not to detach the object from
the session for use in another session.
Returns:
Model: the dbo of the queried model
"""
item = session.query(Model).filter(
Model.handle == model_name).one()
if expunge:
session.expunge(item)
return item
if not session:
with self.modelmaker() as scoped_session:
return instantiate_model(scoped_session, model_name, expunge)
else:
return instantiate_model(session, model_name, expunge)
def get_model(self, model, expunge=True, session=None):
"""Get model from database by name or handle.
Args:
model (str): Model name or handle
expunge (bool): Whether or not to detach the object from
the session for use in another session.
session (object): Database session.
Returns:
Model: the dbo of the queried model
"""
def query_model(session, model, expunge):
"""Get a model object by querying the database.
Args:
session (object): Database session.
model (str): Model name or handle.
expunge (bool): Whether or not to detach the object from
the session for use in another session.
Returns:
Model: the dbo of the queried model
"""
item = session.query(Model).filter(or_(
Model.handle == model,
Model.name == model)).first()
if expunge and item:
session.expunge(item)
return item
if not session:
with self.modelmaker() as scoped_session:
return query_model(scoped_session, model, expunge)
else:
return query_model(session, model, expunge)
def add_description(self, model_name, new_description, session=None):
"""Add description to a model.
Args:
model_name (str): Model name
new_description (str): The description in json format.
session (object): Database session.
"""
if not session:
with self.modelmaker() as scoped_session:
model = scoped_session.query(Model).filter(
Model.handle == model_name).one()
else:
model = session.query(Model).filter(
Model.handle == model_name).one()
model.add_description(new_description)
def get_description(self, model_name, session=None):
"""Get the description to a model.
Args:
model_name (str): Model name
session (object): Database session.
Returns:
json: Dictionary of the model description.
"""
if not session:
with self.modelmaker() as scoped_session:
model = scoped_session.query(Model).filter(
Model.handle == model_name).one()
return json.loads(model.description)
else:
model = session.query(Model).filter(
Model.handle == model_name).one()
return json.loads(model.description)
def create_engine(*args, **kwargs):
"""Create engine wrapper to patch database options.
Args:
*args (list): Arguments.
**kwargs (dict): Arguments.
Returns:
object: Engine.
"""
sqlite_enforce_fks = 'sqlite_enforce_fks'
forward_kwargs = {k: v for k, v in kwargs.items()}
is_sqlite = False
for arg in args:
if 'sqlite' in arg:
is_sqlite = True
if sqlite_enforce_fks in forward_kwargs:
del forward_kwargs[sqlite_enforce_fks]
if is_sqlite:
engine = sqlalchemy_create_engine(*args, **forward_kwargs)
else:
# Default connection timeout for mysql is 10 seconds which is
# not enough for a bigger dataset, increasing this to 1 hour instead.
engine = sqlalchemy_create_engine(
*args,
pool_size=50,
connect_args={'connect_timeout': 3600},
**forward_kwargs)
dialect = engine.dialect.name
if dialect == 'sqlite':
@event.listens_for(engine, 'connect')
def do_connect(dbapi_connection, _):
"""Hooking database connect.
Args:
dbapi_connection (object): Database connection.
_ (object): Unknown.
"""
# Fix for nested transaction problems
dbapi_connection.isolation_level = None
# Ensure efficent journaling and synchronization modes are set.
dbapi_connection.execute('pragma journal_mode=wal;')
dbapi_connection.execute('pragma synchronous=off;')
# 512 MB of RAM max for mmap operations, ensure shared memory for
# indexes across connections. (Size set in bytes)
dbapi_connection.execute('pragma mmap_size=536870912;')
if kwargs.get(sqlite_enforce_fks, False):
# Enable foreign key constraints
dbapi_connection.execute('pragma foreign_keys=ON')
@event.listens_for(engine, 'begin')
def do_begin(conn):
"""Hooking database transaction begin.
Args:
conn (object): Database connection.
"""
# Fix for nested transaction problems
conn.execute('BEGIN')
# pylint: disable=protected-access
engine.__explain_hooks = [do_connect, do_begin]
# pylint: enable=protected-access
return engine
def session_creator(model_name, filename=None, seed=None, echo=False):
"""Create a session maker for the model and db file.
Args:
model_name (str): the model name
filename (str): the db file to load the sqlite database
seed (str): the unique model handle
echo (bool): whether to echo all the statements
Returns:
tuple: session_maker and the ModelAccess object
"""
LOGGER.info('Creating session maker, model_name = %s, filename = %s',
model_name, filename)
if filename:
engine = create_engine('sqlite:///{}'.format(filename),
pool_recycle=POOL_RECYCLE_SECONDS)
else:
engine = create_engine('sqlite:///:memory:',
pool_recycle=POOL_RECYCLE_SECONDS, echo=echo)
if seed is None:
seed = generate_model_seed()
session_maker, data_access = define_model(model_name, engine, seed)
return session_maker, data_access
| 1 | 35,019 | If i remembered correctly, type_name is '{RESOURCE_TYPE}/{RESOURCE_NAME}', if type_name is 700 max then name cannot exceed that size | forseti-security-forseti-security | py |
@@ -20,6 +20,13 @@ def GetSizePrefix(buf, offset):
"""Extract the size prefix from a buffer."""
return encode.Get(packer.int32, buf, offset)
+def GetBufferIdentifier(buf, offset, size_prefixed = False):
+ """Extract the file_identifier from a buffer"""
+ offset+=(number_types.UOffsetTFlags.bytewidth if size_prefixed else 0) + number_types.UOffsetTFlags.bytewidth # offset sizeof(root table offset) + sizeof(size prefix) if present
+ return buf[offset:offset+encode.FILE_IDENTIFIER_LENGTH]
+def BufferHasIdentifier(buf, offset, file_identifier, size_prefixed = False):
+ return GetBufferIdentifier(buf, offset, size_prefixed)==file_identifier
+
def RemoveSizePrefix(buf, offset):
"""
Create a slice of a size-prefixed buffer that has | 1 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import encode
from . import number_types
from . import packer
def GetSizePrefix(buf, offset):
"""Extract the size prefix from a buffer."""
return encode.Get(packer.int32, buf, offset)
def RemoveSizePrefix(buf, offset):
"""
Create a slice of a size-prefixed buffer that has
its position advanced just past the size prefix.
"""
return buf, offset + number_types.Int32Flags.bytewidth
| 1 | 14,851 | When calling functions with keyword arguments: please provide the arguments as keyword arguments, not positional arguments. | google-flatbuffers | java |
@@ -41,7 +41,7 @@ class BasicResBlock(nn.Module):
out_channels,
kernel_size=1,
bias=False,
- activation=None,
+ act_cfg=None,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
| 1 | import torch.nn as nn
from mmcv.cnn.weight_init import normal_init, xavier_init
from ..backbones.resnet import Bottleneck
from ..registry import HEADS
from ..utils import ConvModule
from .bbox_head import BBoxHead
class BasicResBlock(nn.Module):
"""Basic residual block.
This block is a little different from the block in the ResNet backbone.
The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
Args:
in_channels (int): Channels of the input feature map.
out_channels (int): Channels of the output feature map.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
"""
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicResBlock, self).__init__()
# main path
self.conv1 = ConvModule(
in_channels,
in_channels,
kernel_size=3,
padding=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.conv2 = ConvModule(
in_channels,
out_channels,
kernel_size=1,
bias=False,
activation=None,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
# identity path
self.conv_identity = ConvModule(
in_channels,
out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=None)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
identity = self.conv_identity(identity)
out = x + identity
out = self.relu(out)
return out
@HEADS.register_module
class DoubleConvFCBBoxHead(BBoxHead):
r"""Bbox head used in Double-Head R-CNN
/-> cls
/-> shared convs ->
\-> reg
roi features
/-> cls
\-> shared fc ->
\-> reg
""" # noqa: W605
def __init__(self,
num_convs=0,
num_fcs=0,
conv_out_channels=1024,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=dict(type='BN'),
**kwargs):
kwargs.setdefault('with_avg_pool', True)
super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
assert self.with_avg_pool
assert num_convs > 0
assert num_fcs > 0
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# increase the channel of input features
self.res_block = BasicResBlock(self.in_channels,
self.conv_out_channels)
# add conv heads
self.conv_branch = self._add_conv_branch()
# add fc heads
self.fc_branch = self._add_fc_branch()
out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU(inplace=True)
def _add_conv_branch(self):
"""Add the fc branch which consists of a sequential of conv layers"""
branch_convs = nn.ModuleList()
for i in range(self.num_convs):
branch_convs.append(
Bottleneck(
inplanes=self.conv_out_channels,
planes=self.conv_out_channels // 4,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
return branch_convs
def _add_fc_branch(self):
"""Add the fc branch which consists of a sequential of fc layers"""
branch_fcs = nn.ModuleList()
for i in range(self.num_fcs):
fc_in_channels = (
self.in_channels *
self.roi_feat_area if i == 0 else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
return branch_fcs
def init_weights(self):
normal_init(self.fc_cls, std=0.01)
normal_init(self.fc_reg, std=0.001)
for m in self.fc_branch.modules():
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
def forward(self, x_cls, x_reg):
# conv head
x_conv = self.res_block(x_reg)
for conv in self.conv_branch:
x_conv = conv(x_conv)
if self.with_avg_pool:
x_conv = self.avg_pool(x_conv)
x_conv = x_conv.view(x_conv.size(0), -1)
bbox_pred = self.fc_reg(x_conv)
# fc head
x_fc = x_cls.view(x_cls.size(0), -1)
for fc in self.fc_branch:
x_fc = self.relu(fc(x_fc))
cls_score = self.fc_cls(x_fc)
return cls_score, bbox_pred
| 1 | 18,780 | We may follow the argument order: conv_cfg, norm_cfg, act_cfg. | open-mmlab-mmdetection | py |
@@ -60,6 +60,19 @@ module Selenium
class << self
def chrome(opts = {})
+ define_method(:options) { @capabilities[:chrome_options] ||= {} }
+ define_method("options=") { |value| @capabilities[:chrome_options] = value }
+ define_method("profile=") do |profile|
+ profile_json = profile.as_json
+ options['args'] ||= []
+ if options['args'].none? { |arg| arg =~ /user-data-dir/ }
+ options['args'] << "--user-data-dir=#{profile_json[:directory]}"
+ end
+ options['extensions'] = profile_json[:extensions]
+ end
+ alias_method :chrome_options, :options
+ alias_method :chrome_options=, :options=
+
new({
browser_name: 'chrome',
javascript_enabled: true, | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Remote
#
# Specification of the desired and/or actual capabilities of the browser that the
# server is being asked to create.
#
class Capabilities
DEFAULTS = {
browser_name: '',
version: '',
platform: :any,
javascript_enabled: false,
css_selectors_enabled: false,
takes_screenshot: false,
native_events: false,
rotatable: false,
firefox_profile: nil,
proxy: nil
}.freeze
DEFAULTS.each_key do |key|
define_method key do
@capabilities.fetch(key)
end
define_method "#{key}=" do |value|
@capabilities[key] = value
end
end
alias_method :css_selectors_enabled?, :css_selectors_enabled
alias_method :javascript_enabled?, :javascript_enabled
alias_method :native_events?, :native_events
alias_method :takes_screenshot?, :takes_screenshot
alias_method :rotatable?, :rotatable
#
# Convenience methods for the common choices.
#
class << self
def chrome(opts = {})
new({
browser_name: 'chrome',
javascript_enabled: true,
css_selectors_enabled: true
}.merge(opts))
end
def edge(opts = {})
new({
browser_name: 'MicrosoftEdge',
platform: :windows,
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
def firefox(opts = {})
new({
browser_name: 'firefox',
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
def htmlunit(opts = {})
new({
browser_name: 'htmlunit'
}.merge(opts))
end
def htmlunitwithjs(opts = {})
new({
browser_name: 'htmlunit',
javascript_enabled: true
}.merge(opts))
end
def internet_explorer(opts = {})
new({
browser_name: 'internet explorer',
platform: :windows,
takes_screenshot: true,
css_selectors_enabled: true,
native_events: true
}.merge(opts))
end
alias_method :ie, :internet_explorer
def phantomjs(opts = {})
new({
browser_name: 'phantomjs',
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
def safari(opts = {})
new({
browser_name: 'safari',
platform: :mac,
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
#
# @api private
#
def json_create(data)
data = data.dup
caps = new
caps.browser_name = data.delete('browserName')
caps.version = data.delete('version')
caps.platform = data.delete('platform').downcase.to_sym if data.key?('platform')
caps.javascript_enabled = data.delete('javascriptEnabled')
caps.css_selectors_enabled = data.delete('cssSelectorsEnabled')
caps.takes_screenshot = data.delete('takesScreenshot')
caps.native_events = data.delete('nativeEvents')
caps.rotatable = data.delete('rotatable')
caps.proxy = Proxy.json_create(data['proxy']) if data.key?('proxy') && !data['proxy'].empty?
# any remaining pairs will be added as is, with no conversion
caps.merge!(data)
caps
end
end
# @option :browser_name [String] required browser name
# @option :version [String] required browser version number
# @option :platform [Symbol] one of :any, :win, :mac, or :x
# @option :javascript_enabled [Boolean] does the driver have javascript enabled?
# @option :css_selectors_enabled [Boolean] does the driver support CSS selectors?
# @option :takes_screenshot [Boolean] can this driver take screenshots?
# @option :native_events [Boolean] does this driver use native events?
# @option :proxy [Selenium::WebDriver::Proxy, Hash] proxy configuration
#
# Firefox-specific options:
#
# @option :firefox_profile [Selenium::WebDriver::Firefox::Profile] the firefox profile to use
#
# @api public
#
def initialize(opts = {})
@capabilities = DEFAULTS.merge(opts)
self.proxy = opts.delete(:proxy)
end
#
# Allows setting arbitrary capabilities.
#
def []=(key, value)
@capabilities[key] = value
end
def [](key)
@capabilities[key]
end
def merge!(other)
if other.respond_to?(:capabilities, true) && other.capabilities.is_a?(Hash)
@capabilities.merge! other.capabilities
elsif other.is_a? Hash
@capabilities.merge! other
else
raise ArgumentError, 'argument should be a Hash or implement #capabilities'
end
end
def proxy=(proxy)
case proxy
when Hash
@capabilities[:proxy] = Proxy.new(proxy)
when Proxy, nil
@capabilities[:proxy] = proxy
else
raise TypeError, "expected Hash or #{Proxy.name}, got #{proxy.inspect}:#{proxy.class}"
end
end
# @api private
#
def as_json(*)
hash = {}
@capabilities.each do |key, value|
case key
when :platform
hash['platform'] = value.to_s.upcase
when :firefox_profile
hash['firefox_profile'] = value.as_json['zip'] if value
when :proxy
hash['proxy'] = value.as_json if value
when String, :firefox_binary
hash[key.to_s] = value
when Symbol
hash[camel_case(key.to_s)] = value
else
raise TypeError, "expected String or Symbol, got #{key.inspect}:#{key.class} / #{value.inspect}"
end
end
hash
end
def to_json(*)
JSON.generate as_json
end
def ==(other)
return false unless other.is_a? self.class
as_json == other.as_json
end
alias_method :eql?, :==
protected
attr_reader :capabilities
private
def camel_case(str)
str.gsub(/_([a-z])/) { Regexp.last_match(1).upcase }
end
end # Capabilities
end # Remote
end # WebDriver
end # Selenium
| 1 | 14,241 | `options['binary'] = WebDriver::Chrome.path` if set? | SeleniumHQ-selenium | rb |
@@ -5,6 +5,7 @@
*/
#include <errno.h>
+#include <folly/ssl/Init.h>
#include <signal.h>
#include <string.h>
#include <thrift/lib/cpp2/server/ThriftServer.h> | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <thrift/lib/cpp2/server/ThriftServer.h>
#include "common/base/Base.h"
#include "common/base/SignalHandler.h"
#include "common/base/Status.h"
#include "common/fs/FileUtils.h"
#include "common/network/NetworkUtils.h"
#include "common/process/ProcessUtils.h"
#include "common/time/TimezoneInfo.h"
#include "graph/service/GraphFlags.h"
#include "graph/service/GraphService.h"
#include "graph/stats/StatsDef.h"
#include "version/Version.h"
#include "webservice/WebService.h"
using nebula::ProcessUtils;
using nebula::Status;
using nebula::fs::FileUtils;
using nebula::graph::GraphService;
using nebula::network::NetworkUtils;
static std::unique_ptr<apache::thrift::ThriftServer> gServer;
static void signalHandler(int sig);
static Status setupSignalHandler();
extern Status setupLogging();
static void printHelp(const char *prog);
static void setupThreadManager();
DECLARE_string(flagfile);
int main(int argc, char *argv[]) {
google::SetVersionString(nebula::versionString());
if (argc == 1) {
printHelp(argv[0]);
return EXIT_FAILURE;
}
if (argc == 2) {
if (::strcmp(argv[1], "-h") == 0) {
printHelp(argv[0]);
return EXIT_SUCCESS;
}
}
folly::init(&argc, &argv, true);
nebula::initCounters();
if (FLAGS_flagfile.empty()) {
printHelp(argv[0]);
return EXIT_FAILURE;
}
// Setup logging
auto status = setupLogging();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
// Detect if the server has already been started
auto pidPath = FLAGS_pid_file;
status = ProcessUtils::isPidAvailable(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
if (FLAGS_daemonize) {
status = ProcessUtils::daemonize(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
} else {
// Write the current pid into the pid file
status = ProcessUtils::makePidFile(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
}
// Get the IPv4 address the server will listen on
if (FLAGS_local_ip.empty()) {
LOG(ERROR) << "local_ip is empty, need to config it through config file";
return EXIT_FAILURE;
}
// TODO: Check the ip is valid
nebula::HostAddr localhost{FLAGS_local_ip, FLAGS_port};
// Initialize the global timezone, it's only used for datetime type compute
// won't affect the process timezone.
status = nebula::time::Timezone::initializeGlobalTimezone();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
LOG(INFO) << "Starting Graph HTTP Service";
auto webSvc = std::make_unique<nebula::WebService>();
status = webSvc->start();
if (!status.ok()) {
return EXIT_FAILURE;
}
if (FLAGS_num_netio_threads == 0) {
FLAGS_num_netio_threads = std::thread::hardware_concurrency();
}
if (FLAGS_num_netio_threads <= 0) {
LOG(WARNING) << "Number of networking IO threads should be greater than zero";
return EXIT_FAILURE;
}
LOG(INFO) << "Number of networking IO threads: " << FLAGS_num_netio_threads;
if (FLAGS_num_worker_threads == 0) {
FLAGS_num_worker_threads = std::thread::hardware_concurrency();
}
if (FLAGS_num_worker_threads <= 0) {
LOG(WARNING) << "Number of worker threads should be greater than zero";
return EXIT_FAILURE;
}
LOG(INFO) << "Number of worker threads: " << FLAGS_num_worker_threads;
auto threadFactory = std::make_shared<folly::NamedThreadFactory>("graph-netio");
auto ioThreadPool = std::make_shared<folly::IOThreadPoolExecutor>(FLAGS_num_netio_threads,
std::move(threadFactory));
gServer = std::make_unique<apache::thrift::ThriftServer>();
gServer->setIOThreadPool(ioThreadPool);
auto interface = std::make_shared<GraphService>();
status = interface->init(ioThreadPool, localhost);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
gServer->setPort(localhost.port);
gServer->setInterface(std::move(interface));
gServer->setReusePort(FLAGS_reuse_port);
gServer->setIdleTimeout(std::chrono::seconds(FLAGS_client_idle_timeout_secs));
gServer->setNumAcceptThreads(FLAGS_num_accept_threads);
gServer->setListenBacklog(FLAGS_listen_backlog);
setupThreadManager();
// Setup the signal handlers
status = setupSignalHandler();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
FLOG_INFO("Starting nebula-graphd on %s:%d\n", localhost.host.c_str(), localhost.port);
try {
gServer->serve(); // Blocking wait until shut down via gServer->stop()
} catch (const std::exception &e) {
FLOG_ERROR("Exception thrown while starting the RPC server: %s", e.what());
return EXIT_FAILURE;
}
FLOG_INFO("nebula-graphd on %s:%d has been stopped", localhost.host.c_str(), localhost.port);
return EXIT_SUCCESS;
}
Status setupSignalHandler() {
return nebula::SignalHandler::install(
{SIGINT, SIGTERM},
[](nebula::SignalHandler::GeneralSignalInfo *info) { signalHandler(info->sig()); });
}
void signalHandler(int sig) {
switch (sig) {
case SIGINT:
case SIGTERM:
FLOG_INFO("Signal %d(%s) received, stopping this server", sig, ::strsignal(sig));
gServer->stop();
break;
default:
FLOG_ERROR("Signal %d(%s) received but ignored", sig, ::strsignal(sig));
}
}
void printHelp(const char *prog) { fprintf(stderr, "%s --flagfile <config_file>\n", prog); }
void setupThreadManager() {
int numThreads =
FLAGS_num_worker_threads > 0 ? FLAGS_num_worker_threads : gServer->getNumIOWorkerThreads();
std::shared_ptr<apache::thrift::concurrency::ThreadManager> threadManager(
PriorityThreadManager::newPriorityThreadManager(numThreads, false /*stats*/));
threadManager->setNamePrefix("executor");
threadManager->start();
gServer->setThreadManager(threadManager);
}
| 1 | 30,648 | folly library should be put on the top. | vesoft-inc-nebula | cpp |
@@ -35,6 +35,7 @@ let CAMEL_REG = /-?(?=[A-Z])/g;
function setProperty(dom, name, value, oldValue, isSvg) {
let v;
if (name==='class' || name==='className') name = isSvg ? 'class' : 'className';
+ const ns = isSvg && (name !== (name = name.replace(/^xlink:?/, '')));
if (name==='style') {
| 1 | import { IS_NON_DIMENSIONAL } from '../constants';
import options from '../options';
/**
* Diff the old and new properties of a VNode and apply changes to the DOM node
* @param {import('../internal').PreactElement} dom The DOM node to apply
* changes to
* @param {object} newProps The new props
* @param {object} oldProps The old props
* @param {boolean} isSvg Whether or not this node is an SVG node
*/
export function diffProps(dom, newProps, oldProps, isSvg) {
for (let i in newProps) {
if (i!=='children' && i!=='key' && (!oldProps || ((i==='value' || i==='checked') ? dom : oldProps)[i]!==newProps[i])) {
setProperty(dom, i, newProps[i], oldProps[i], isSvg);
}
}
for (let i in oldProps) {
if (i!=='children' && i!=='key' && (!newProps || !(i in newProps))) {
setProperty(dom, i, null, oldProps[i], isSvg);
}
}
}
let CAMEL_REG = /-?(?=[A-Z])/g;
/**
* Set a property value on a DOM node
* @param {import('../internal').PreactElement} dom The DOM node to modify
* @param {string} name The name of the property to set
* @param {*} value The value to set the property to
* @param {*} oldValue The old value the property had
* @param {boolean} isSvg Whether or not this DOM node is an SVG node or not
*/
function setProperty(dom, name, value, oldValue, isSvg) {
let v;
if (name==='class' || name==='className') name = isSvg ? 'class' : 'className';
if (name==='style') {
/* Possible golfing activities for setting styles:
* - we could just drop String style values. They're not supported in other VDOM libs.
* - assigning to .style sets .style.cssText - TODO: benchmark this, might not be worth the bytes.
* - assigning also casts to String, and ignores invalid values. This means assigning an Object clears all styles.
*/
let s = dom.style;
if (typeof value==='string') {
s.cssText = value;
}
else {
if (typeof oldValue==='string') s.cssText = '';
else {
// remove values not in the new list
for (let i in oldValue) {
if (value==null || !(i in value)) s.setProperty(i.replace(CAMEL_REG, '-'), '');
}
}
for (let i in value) {
v = value[i];
if (oldValue==null || v!==oldValue[i]) {
s.setProperty(i.replace(CAMEL_REG, '-'), typeof v==='number' && IS_NON_DIMENSIONAL.test(i)===false ? (v + 'px') : v);
}
}
}
}
else if (name==='dangerouslySetInnerHTML') {
return;
}
// Benchmark for comparison: https://esbench.com/bench/574c954bdb965b9a00965ac6
else if (name[0]==='o' && name[1]==='n') {
let useCapture = name !== (name=name.replace(/Capture$/, ''));
let nameLower = name.toLowerCase();
name = (nameLower in dom ? nameLower : name).substring(2);
if (value) {
if (!oldValue) dom.addEventListener(name, eventProxy, useCapture);
}
else {
dom.removeEventListener(name, eventProxy, useCapture);
}
(dom._listeners || (dom._listeners = {}))[name] = value;
}
else if (name!=='list' && name!=='tagName' && !isSvg && (name in dom)) {
dom[name] = value==null ? '' : value;
}
else if (value==null || value===false) {
dom.removeAttribute(name);
}
else if (typeof value!=='function') {
dom.setAttribute(name, value);
}
}
/**
* Proxy an event to hooked event handlers
* @param {Event} e The event object from the browser
* @private
*/
function eventProxy(e) {
return this._listeners[e.type](options.event ? options.event(e) : e);
}
| 1 | 12,803 | Do we have to restrict this to SVG elements? I think setting namespaced attributes on normal dom elements (while not as common) is still valid, so perhaps we can save some bytes by removing the `isSvg` check? | preactjs-preact | js |
@@ -134,7 +134,7 @@ class DataProvider {
* @returns {boolean}
*/
_isHiddenRow(row) {
- return this.hot.hasHook('hiddenRow') && this.hot.runHooks('hiddenRow', row);
+ return this.hot.rowIndexMapper.isHidden(this.hot.toPhysicalRow(row));
}
/** | 1 | import { rangeEach } from '../../helpers/number';
// Waiting for jshint >=2.9.0 where they added support for destructing
// jshint ignore: start
/**
* @plugin ExportFile
* @private
*/
class DataProvider {
constructor(hotInstance) {
/**
* Handsontable instance.
*
* @type {Core}
*/
this.hot = hotInstance;
/**
* Format type class options.
*
* @type {object}
*/
this.options = {};
}
/**
* Set options for data provider.
*
* @param {object} options Object with specified options.
*/
setOptions(options) {
this.options = options;
}
/**
* Get table data based on provided settings to the class constructor.
*
* @returns {Array}
*/
getData() {
const { startRow, startCol, endRow, endCol } = this._getDataRange();
const options = this.options;
const data = [];
rangeEach(startRow, endRow, (rowIndex) => {
const row = [];
if (!options.exportHiddenRows && this._isHiddenRow(rowIndex)) {
return;
}
rangeEach(startCol, endCol, (colIndex) => {
if (!options.exportHiddenColumns && this._isHiddenColumn(colIndex)) {
return;
}
row.push(this.hot.getDataAtCell(rowIndex, colIndex));
});
data.push(row);
});
return data;
}
/**
* Gets list of row headers.
*
* @returns {Array}
*/
getRowHeaders() {
const headers = [];
if (this.options.rowHeaders) {
const { startRow, endRow } = this._getDataRange();
const rowHeaders = this.hot.getRowHeader();
rangeEach(startRow, endRow, (row) => {
if (!this.options.exportHiddenRows && this._isHiddenRow(row)) {
return;
}
headers.push(rowHeaders[row]);
});
}
return headers;
}
/**
* Gets list of columns headers.
*
* @returns {Array}
*/
getColumnHeaders() {
const headers = [];
if (this.options.columnHeaders) {
const { startCol, endCol } = this._getDataRange();
const colHeaders = this.hot.getColHeader();
rangeEach(startCol, endCol, (column) => {
if (!this.options.exportHiddenColumns && this._isHiddenColumn(column)) {
return;
}
headers.push(colHeaders[column]);
});
}
return headers;
}
/**
* Get data range object based on settings provided in the class constructor.
*
* @private
* @returns {object} Returns object with keys `startRow`, `startCol`, `endRow` and `endCol`.
*/
_getDataRange() {
const cols = this.hot.countCols() - 1;
const rows = this.hot.countRows() - 1;
let [startRow = 0, startCol = 0, endRow = rows, endCol = cols] = this.options.range;
startRow = Math.max(startRow, 0);
startCol = Math.max(startCol, 0);
endRow = Math.min(endRow, rows);
endCol = Math.min(endCol, cols);
return { startRow, startCol, endRow, endCol };
}
/**
* Check if row at specified row index is hidden.
*
* @private
* @param {number} row Row index.
* @returns {boolean}
*/
_isHiddenRow(row) {
return this.hot.hasHook('hiddenRow') && this.hot.runHooks('hiddenRow', row);
}
/**
* Check if column at specified column index is hidden.
*
* @private
* @param {number} column Visual column index.
* @returns {boolean}
*/
_isHiddenColumn(column) {
return this.hot.columnIndexMapper.isHidden(this.hot.toPhysicalColumn(column));
}
}
export default DataProvider;
| 1 | 16,692 | Please add it to the `REMOVED_HOOKS` constant. | handsontable-handsontable | js |
@@ -1,7 +1,9 @@
describe('fieldset', function () {
'use strict';
var fixture = document.getElementById('fixture');
+ var shadowSupport = axe.testUtils.shadowSupport;
var fixtureSetup = axe.testUtils.fixtureSetup;
+
var checkContext = {
_data: null,
data: function (d) { | 1 | describe('fieldset', function () {
'use strict';
var fixture = document.getElementById('fixture');
var fixtureSetup = axe.testUtils.fixtureSetup;
var checkContext = {
_data: null,
data: function (d) {
this._data = d;
},
relatedNodes: function (nodes) {
this._relatedNodes = Array.isArray(nodes) ? nodes : [nodes];
}
};
afterEach(function () {
fixture.innerHTML = '';
checkContext._data = null;
});
function tests(type) {
it('should return true if there is only one ' + type + ' element with the same name', function () {
fixtureSetup('<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="differentname">');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
});
it('should return false if there are two ungrouped ' + type + ' elements with the same name', function () {
fixtureSetup('<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname">');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
failureCode: 'no-group',
type: type,
name: 'uniqueyname'
});
assert.lengthOf(checkContext._relatedNodes, 1);
assert.equal(checkContext._relatedNodes[0], fixture.querySelectorAll('input')[1]);
});
it('should return false if the group has no legend element', function () {
fixtureSetup('<fieldset><input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></fieldset>');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
failureCode: 'no-legend',
type: type,
name: 'uniqueyname'
});
assert.lengthOf(checkContext._relatedNodes, 1);
assert.equal(checkContext._relatedNodes[0], fixture.querySelector('fieldset'));
});
it('should return false if the group has no legend text', function () {
fixtureSetup('<fieldset><legend></legend>' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></fieldset>');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
failureCode: 'empty-legend',
type: type,
name: 'uniqueyname'
});
assert.lengthOf(checkContext._relatedNodes, 1);
assert.equal(checkContext._relatedNodes[0], fixture.querySelector('legend'));
});
it('should return false if the group contains extra elements', function () {
fixtureSetup('<fieldset><legend>Legendary</legend>' +
'<input type="text" id="random">' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></fieldset>');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
failureCode: 'mixed-inputs',
type: type,
name: 'uniqueyname'
});
assert.lengthOf(checkContext._relatedNodes, 1);
assert.equal(checkContext._relatedNodes[0], fixture.querySelector('#random'));
});
it('should return true if the group contains only the right elements and has legend', function () {
fixtureSetup('<fieldset><legend>Legendary</legend>' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></fieldset>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
});
it('should return false if an unlabelled ARIA group contains only the right elements', function () {
fixtureSetup('<div role="group">' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
failureCode: 'no-group-label',
type: type,
name: 'uniqueyname'
});
assert.lengthOf(checkContext._relatedNodes, 1);
assert.equal(checkContext._relatedNodes[0], fixture.querySelector('div'));
});
it('should return false if an improperly labelled-by ARIA group contains only the right elements', function () {
fixtureSetup('<div id="grouplabel"></div>' +
'<div role="group" aria-labelledby="grouplabel">' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
failureCode: 'no-group-label',
type: type,
name: 'uniqueyname'
});
assert.lengthOf(checkContext._relatedNodes, 1);
assert.equal(checkContext._relatedNodes[0], fixture.querySelector('[role=group]'));
});
it('should return false if the group contains extra elements', function () {
fixtureSetup('<div role="group" aria-labelledby="g1"><div id="g1">Legendary</div>' +
'<input type="text" id="random">' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
failureCode: 'group-mixed-inputs',
type: type,
name: 'uniqueyname'
});
assert.lengthOf(checkContext._relatedNodes, 1);
assert.equal(checkContext._relatedNodes[0], fixture.querySelector('#random'));
});
it('should return true if a properly labelled-by ARIA group contains only the right elements', function () {
fixtureSetup('<div id="grouplabel">Label</div>' +
'<div role="group" aria-labelledby="grouplabel">' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
type: type,
name: 'uniqueyname'
});
});
it('should return true if a properly labelled-by ARIA group contains only the right elements - special characters', function () {
fixtureSetup('<div id="grouplabel">Label</div>' +
'<div role="group" aria-labelledby="grouplabel">' +
'<input type="' + type + '" id="target" name="s.%$#n">' +
'<input type="' + type + '" name="s.%$#n"></div>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, {
type: type,
name: 's.%$#n'
});
});
it('should return true if a properly labelled ARIA group contains only the right elements', function () {
fixtureSetup('<div role="group" aria-label="group label">' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
});
it('should ignore hidden inputs', function () {
fixtureSetup('<fieldset><legend>Legendary</legend>' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>' +
'<input type="hidden" name="things"></fieldset>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
});
it('should allow elements to be contained in 2 or more fieldsets', function () {
fixtureSetup('<fieldset><legend>Legendary</legend>' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>' +
'</fieldset>' +
'<fieldset><legend>Also Legendary</legend>' +
'<input type="' + type + '" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>' +
'</fieldset>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
});
it('should allow elements to be contained in 2 or more groups', function () {
fixtureSetup('<div role="group" aria-labelledby="g1"><div id="g1">Legendary</div>' +
'<input type="' + type + '" id="target" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>' +
'</div>' +
'<div role="group" aria-labelledby="g2"><div id="g2">Also Legendary</div>' +
'<input type="' + type + '" name="uniqueyname">' +
'<input type="' + type + '" name="uniqueyname"></div>' +
'</fieldset>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
});
}
describe('radio', function () {
var type = 'radio';
tests(type);
it('should allow radiogroup role', function () {
fixtureSetup('<div id="grouplabel">Label</div>' +
'<div role="radiogroup" aria-labelledby="grouplabel">' +
'<input type="' + type + '" id="target" name="s.%$#n">' +
'<input type="' + type + '" name="s.%$#n"></div>');
var node = fixture.querySelector('#target');
assert.isTrue(checks.fieldset.evaluate.call(checkContext, node));
});
});
describe('checkbox', function () {
var type = 'checkbox';
tests(type);
it('should NOT allow radiogroup role', function () {
fixtureSetup('<div id="grouplabel">Label</div>' +
'<div role="radiogroup" aria-labelledby="grouplabel">' +
'<input type="' + type + '" id="target" name="s.%$#n">' +
'<input type="' + type + '" name="s.%$#n"></div>');
var node = fixture.querySelector('#target');
assert.isFalse(checks.fieldset.evaluate.call(checkContext, node));
});
});
});
| 1 | 11,349 | This variable needs `.v1` at the end of it, or the tests below need it. There are some test failures as a result | dequelabs-axe-core | js |
@@ -25,6 +25,12 @@ class AccountsController < ApplicationController
render json: Chart.new(@account).commits_by_language(params[:scope])
end
+ def label_as_spammer
+ @account = Account.find_by(id: params[:id])
+ Account::Access.new(@account).spam!
+ render template: 'accounts/disabled'
+ end
+
private
def account | 1 | class AccountsController < ApplicationController
before_action :account, only: [:show, :commits_by_project_chart, :commits_by_language_chart]
before_action :redirect_if_disabled, only: [:show, :commits_by_project_chart, :commits_by_language_chart]
# before_action :account_context, only: [:show]
def index
@people = Person.find_claimed(page: params[:page])
@cbp_map = PeopleDecorator.new(@people).commits_by_project_map
@positions_map = Position.where(id: @cbp_map.values.map(&:first).flatten).includes(:project)
.references(:all).index_by(&:id)
end
def show
@projects, @logos = @account.project_core.used
@twitter_detail = TwitterDetail.new(@account)
end
# NOTE: Replaces commits_history
def commits_by_project_chart
render json: Chart.new(@account).commits_by_project
end
# NOTE: Replaces language_experience
def commits_by_language_chart
render json: Chart.new(@account).commits_by_language(params[:scope])
end
private
def account
accounts = Account.arel_table
@account = Account.where(accounts[:id].eq(params[:id]).or(accounts[:login].eq(params[:id]))).first
end
def redirect_if_disabled
redirect_to disabled_account_url(@account) if @account && Account::Access.new(@account).disabled?
end
end
| 1 | 7,013 | Is this access protected so that only admins can access this route? | blackducksoftware-ohloh-ui | rb |
@@ -328,7 +328,7 @@ void wlr_output_layout_closest_point(struct wlr_output_layout *layout,
return;
}
- double min_x = DBL_MAX, min_y = DBL_MAX, min_distance = DBL_MAX;
+ double min_x = 0, min_y = 0, min_distance = DBL_MAX;
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
if (reference != NULL && reference != l_output->output) { | 1 | #include <assert.h>
#include <float.h>
#include <limits.h>
#include <stdlib.h>
#include <wlr/types/wlr_box.h>
#include <wlr/types/wlr_output_layout.h>
#include <wlr/types/wlr_output.h>
#include <wlr/util/log.h>
#include "util/signal.h"
struct wlr_output_layout_state {
struct wlr_box _box; // should never be read directly, use the getter
};
struct wlr_output_layout_output_state {
struct wlr_output_layout *layout;
struct wlr_output_layout_output *l_output;
struct wlr_box _box; // should never be read directly, use the getter
bool auto_configured;
struct wl_listener mode;
struct wl_listener scale;
struct wl_listener transform;
struct wl_listener output_destroy;
};
struct wlr_output_layout *wlr_output_layout_create(void) {
struct wlr_output_layout *layout =
calloc(1, sizeof(struct wlr_output_layout));
if (layout == NULL) {
return NULL;
}
layout->state = calloc(1, sizeof(struct wlr_output_layout_state));
if (layout->state == NULL) {
free(layout);
return NULL;
}
wl_list_init(&layout->outputs);
wl_signal_init(&layout->events.add);
wl_signal_init(&layout->events.change);
wl_signal_init(&layout->events.destroy);
return layout;
}
static void output_layout_output_destroy(
struct wlr_output_layout_output *l_output) {
wlr_signal_emit_safe(&l_output->events.destroy, l_output);
wlr_output_destroy_global(l_output->output);
wl_list_remove(&l_output->state->mode.link);
wl_list_remove(&l_output->state->scale.link);
wl_list_remove(&l_output->state->transform.link);
wl_list_remove(&l_output->state->output_destroy.link);
wl_list_remove(&l_output->link);
free(l_output->state);
free(l_output);
}
void wlr_output_layout_destroy(struct wlr_output_layout *layout) {
if (!layout) {
return;
}
wlr_signal_emit_safe(&layout->events.destroy, layout);
struct wlr_output_layout_output *l_output, *temp;
wl_list_for_each_safe(l_output, temp, &layout->outputs, link) {
output_layout_output_destroy(l_output);
}
free(layout->state);
free(layout);
}
static struct wlr_box *output_layout_output_get_box(
struct wlr_output_layout_output *l_output) {
l_output->state->_box.x = l_output->x;
l_output->state->_box.y = l_output->y;
int width, height;
wlr_output_effective_resolution(l_output->output, &width, &height);
l_output->state->_box.width = width;
l_output->state->_box.height = height;
return &l_output->state->_box;
}
/**
* This must be called whenever the layout changes to reconfigure the auto
* configured outputs and emit the `changed` event.
*
* Auto configured outputs are placed to the right of the north east corner of
* the rightmost output in the layout in a horizontal line.
*/
static void output_layout_reconfigure(struct wlr_output_layout *layout) {
int max_x = INT_MIN;
int max_x_y = INT_MIN; // y value for the max_x output
// find the rightmost x coordinate occupied by a manually configured output
// in the layout
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
if (l_output->state->auto_configured) {
continue;
}
struct wlr_box *box = output_layout_output_get_box(l_output);
if (box->x + box->width > max_x) {
max_x = box->x + box->width;
max_x_y = box->y;
}
}
if (max_x == INT_MIN) {
// there are no manually configured outputs
max_x = 0;
max_x_y = 0;
}
wl_list_for_each(l_output, &layout->outputs, link) {
if (!l_output->state->auto_configured) {
continue;
}
struct wlr_box *box = output_layout_output_get_box(l_output);
l_output->x = max_x;
l_output->y = max_x_y;
max_x += box->width;
}
wlr_signal_emit_safe(&layout->events.change, layout);
}
static void output_update_global(struct wlr_output *output) {
// Don't expose the output if it doesn't have a current mode
if (wl_list_empty(&output->modes) || output->current_mode != NULL) {
wlr_output_create_global(output);
} else {
wlr_output_destroy_global(output);
}
}
static void handle_output_mode(struct wl_listener *listener, void *data) {
struct wlr_output_layout_output_state *state =
wl_container_of(listener, state, mode);
output_layout_reconfigure(state->layout);
output_update_global(state->l_output->output);
}
static void handle_output_scale(struct wl_listener *listener, void *data) {
struct wlr_output_layout_output_state *state =
wl_container_of(listener, state, scale);
output_layout_reconfigure(state->layout);
}
static void handle_output_transform(struct wl_listener *listener, void *data) {
struct wlr_output_layout_output_state *state =
wl_container_of(listener, state, transform);
output_layout_reconfigure(state->layout);
}
static void handle_output_destroy(struct wl_listener *listener, void *data) {
struct wlr_output_layout_output_state *state =
wl_container_of(listener, state, output_destroy);
struct wlr_output_layout *layout = state->layout;
output_layout_output_destroy(state->l_output);
output_layout_reconfigure(layout);
}
static struct wlr_output_layout_output *output_layout_output_create(
struct wlr_output_layout *layout, struct wlr_output *output) {
struct wlr_output_layout_output *l_output =
calloc(1, sizeof(struct wlr_output_layout_output));
if (l_output == NULL) {
return NULL;
}
l_output->state = calloc(1, sizeof(struct wlr_output_layout_output_state));
if (l_output->state == NULL) {
free(l_output);
return NULL;
}
l_output->state->l_output = l_output;
l_output->state->layout = layout;
l_output->output = output;
wl_signal_init(&l_output->events.destroy);
wl_list_insert(&layout->outputs, &l_output->link);
wl_signal_add(&output->events.mode, &l_output->state->mode);
l_output->state->mode.notify = handle_output_mode;
wl_signal_add(&output->events.scale, &l_output->state->scale);
l_output->state->scale.notify = handle_output_scale;
wl_signal_add(&output->events.transform, &l_output->state->transform);
l_output->state->transform.notify = handle_output_transform;
wl_signal_add(&output->events.destroy, &l_output->state->output_destroy);
l_output->state->output_destroy.notify = handle_output_destroy;
return l_output;
}
void wlr_output_layout_add(struct wlr_output_layout *layout,
struct wlr_output *output, int lx, int ly) {
struct wlr_output_layout_output *l_output =
wlr_output_layout_get(layout, output);
bool is_new = l_output == NULL;
if (!l_output) {
l_output = output_layout_output_create(layout, output);
if (!l_output) {
wlr_log(WLR_ERROR, "Failed to create wlr_output_layout_output");
return;
}
}
l_output->x = lx;
l_output->y = ly;
l_output->state->auto_configured = false;
output_layout_reconfigure(layout);
output_update_global(output);
if (is_new) {
wlr_signal_emit_safe(&layout->events.add, l_output);
}
}
struct wlr_output_layout_output *wlr_output_layout_get(
struct wlr_output_layout *layout, struct wlr_output *reference) {
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
if (l_output->output == reference) {
return l_output;
}
}
return NULL;
}
bool wlr_output_layout_contains_point(struct wlr_output_layout *layout,
struct wlr_output *reference, int lx, int ly) {
if (reference) {
struct wlr_output_layout_output *l_output =
wlr_output_layout_get(layout, reference);
struct wlr_box *box = output_layout_output_get_box(l_output);
return wlr_box_contains_point(box, lx, ly);
} else {
return !!wlr_output_layout_output_at(layout, lx, ly);
}
}
bool wlr_output_layout_intersects(struct wlr_output_layout *layout,
struct wlr_output *reference, const struct wlr_box *target_lbox) {
struct wlr_box out_box;
if (reference == NULL) {
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
struct wlr_box *output_box =
output_layout_output_get_box(l_output);
if (wlr_box_intersection(&out_box, output_box, target_lbox)) {
return true;
}
}
return false;
} else {
struct wlr_output_layout_output *l_output =
wlr_output_layout_get(layout, reference);
if (!l_output) {
return false;
}
struct wlr_box *output_box = output_layout_output_get_box(l_output);
return wlr_box_intersection(&out_box, output_box, target_lbox);
}
}
struct wlr_output *wlr_output_layout_output_at(struct wlr_output_layout *layout,
double lx, double ly) {
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
struct wlr_box *box = output_layout_output_get_box(l_output);
if (wlr_box_contains_point(box, lx, ly)) {
return l_output->output;
}
}
return NULL;
}
void wlr_output_layout_move(struct wlr_output_layout *layout,
struct wlr_output *output, int lx, int ly) {
struct wlr_output_layout_output *l_output =
wlr_output_layout_get(layout, output);
if (l_output) {
l_output->x = lx;
l_output->y = ly;
l_output->state->auto_configured = false;
output_layout_reconfigure(layout);
} else {
wlr_log(WLR_ERROR, "output not found in this layout: %s", output->name);
}
}
void wlr_output_layout_remove(struct wlr_output_layout *layout,
struct wlr_output *output) {
struct wlr_output_layout_output *l_output =
wlr_output_layout_get(layout, output);
if (l_output) {
output_layout_output_destroy(l_output);
output_layout_reconfigure(layout);
}
}
void wlr_output_layout_output_coords(struct wlr_output_layout *layout,
struct wlr_output *reference, double *lx, double *ly) {
assert(layout && reference);
double src_x = *lx;
double src_y = *ly;
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
if (l_output->output == reference) {
*lx = src_x - (double)l_output->x;
*ly = src_y - (double)l_output->y;
return;
}
}
}
void wlr_output_layout_closest_point(struct wlr_output_layout *layout,
struct wlr_output *reference, double lx, double ly, double *dest_lx,
double *dest_ly) {
if (dest_lx == NULL && dest_ly == NULL) {
return;
}
double min_x = DBL_MAX, min_y = DBL_MAX, min_distance = DBL_MAX;
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
if (reference != NULL && reference != l_output->output) {
continue;
}
double output_x, output_y, output_distance;
struct wlr_box *box = output_layout_output_get_box(l_output);
wlr_box_closest_point(box, lx, ly, &output_x, &output_y);
// calculate squared distance suitable for comparison
output_distance =
(lx - output_x) * (lx - output_x) + (ly - output_y) * (ly - output_y);
if (!isfinite(output_distance)) {
output_distance = DBL_MAX;
}
if (output_distance <= min_distance) {
min_x = output_x;
min_y = output_y;
min_distance = output_distance;
}
}
if (dest_lx) {
*dest_lx = min_x;
}
if (dest_ly) {
*dest_ly = min_y;
}
}
struct wlr_box *wlr_output_layout_get_box(
struct wlr_output_layout *layout, struct wlr_output *reference) {
struct wlr_output_layout_output *l_output;
if (reference) {
// output extents
l_output = wlr_output_layout_get(layout, reference);
if (l_output) {
return output_layout_output_get_box(l_output);
} else {
return NULL;
}
} else {
// layout extents
int min_x = 0, max_x = 0, min_y = 0, max_y = 0;
if (!wl_list_empty(&layout->outputs)) {
min_x = min_y = INT_MAX;
max_x = max_y = INT_MIN;
wl_list_for_each(l_output, &layout->outputs, link) {
struct wlr_box *box = output_layout_output_get_box(l_output);
if (box->x < min_x) {
min_x = box->x;
}
if (box->y < min_y) {
min_y = box->y;
}
if (box->x + box->width > max_x) {
max_x = box->x + box->width;
}
if (box->y + box->height > max_y) {
max_y = box->y + box->height;
}
}
}
layout->state->_box.x = min_x;
layout->state->_box.y = min_y;
layout->state->_box.width = max_x - min_x;
layout->state->_box.height = max_y - min_y;
return &layout->state->_box;
}
// not reached
}
void wlr_output_layout_add_auto(struct wlr_output_layout *layout,
struct wlr_output *output) {
struct wlr_output_layout_output *l_output =
wlr_output_layout_get(layout, output);
bool is_new = l_output == NULL;
if (!l_output) {
l_output = output_layout_output_create(layout, output);
if (!l_output) {
wlr_log(WLR_ERROR, "Failed to create wlr_output_layout_output");
return;
}
}
l_output->state->auto_configured = true;
output_layout_reconfigure(layout);
output_update_global(output);
if (is_new) {
wlr_signal_emit_safe(&layout->events.add, l_output);
}
}
struct wlr_output *wlr_output_layout_get_center_output(
struct wlr_output_layout *layout) {
if (wl_list_empty(&layout->outputs)) {
return NULL;
}
struct wlr_box *extents = wlr_output_layout_get_box(layout, NULL);
double center_x = extents->width / 2. + extents->x;
double center_y = extents->height / 2. + extents->y;
double dest_x = 0, dest_y = 0;
wlr_output_layout_closest_point(layout, NULL, center_x, center_y,
&dest_x, &dest_y);
return wlr_output_layout_output_at(layout, dest_x, dest_y);
}
enum distance_selection_method {
NEAREST,
FARTHEST
};
struct wlr_output *wlr_output_layout_output_in_direction(
struct wlr_output_layout *layout, enum wlr_direction direction,
struct wlr_output *reference, double ref_lx, double ref_ly,
enum distance_selection_method distance_method) {
assert(reference);
struct wlr_box *ref_box = wlr_output_layout_get_box(layout, reference);
double min_distance = (distance_method == NEAREST) ? DBL_MAX : DBL_MIN;
struct wlr_output *closest_output = NULL;
struct wlr_output_layout_output *l_output;
wl_list_for_each(l_output, &layout->outputs, link) {
if (reference != NULL && reference == l_output->output) {
continue;
}
struct wlr_box *box = output_layout_output_get_box(l_output);
bool match = false;
// test to make sure this output is in the given direction
if (direction & WLR_DIRECTION_LEFT) {
match = box->x + box->width <= ref_box->x || match;
}
if (direction & WLR_DIRECTION_RIGHT) {
match = box->x >= ref_box->x + ref_box->width || match;
}
if (direction & WLR_DIRECTION_UP) {
match = box->y + box->height <= ref_box->y || match;
}
if (direction & WLR_DIRECTION_DOWN) {
match = box->y >= ref_box->y + ref_box->height || match;
}
if (!match) {
continue;
}
// calculate distance from the given reference point
double x, y;
wlr_output_layout_closest_point(layout, l_output->output,
ref_lx, ref_ly, &x, &y);
double distance =
(x - ref_lx) * (x - ref_lx) + (y - ref_ly) * (y - ref_ly);
if ((distance_method == NEAREST)
? distance < min_distance
: distance > min_distance) {
min_distance = distance;
closest_output = l_output->output;
}
}
return closest_output;
}
struct wlr_output *wlr_output_layout_adjacent_output(
struct wlr_output_layout *layout, enum wlr_direction direction,
struct wlr_output *reference, double ref_lx, double ref_ly) {
return wlr_output_layout_output_in_direction(layout, direction,
reference, ref_lx, ref_ly, NEAREST);
}
struct wlr_output *wlr_output_layout_farthest_output(
struct wlr_output_layout *layout, enum wlr_direction direction,
struct wlr_output *reference, double ref_lx, double ref_ly) {
return wlr_output_layout_output_in_direction(layout, direction,
reference, ref_lx, ref_ly, FARTHEST);
}
| 1 | 14,326 | What happens if the minimum x coord is > 0? This will incorrectly set it as zero, right? | swaywm-wlroots | c |
@@ -1637,6 +1637,11 @@ func (o *consumer) processNextMsgReq(_ *subscription, c *client, _, reply string
return
}
+ if o.maxp > 0 && batchSize > o.maxp {
+ sendErr(409, "Exceeded MaxAckPending")
+ return
+ }
+
// In case we have to queue up this request.
wr := waitingRequest{client: c, reply: reply, n: batchSize, noWait: noWait, expires: expires}
| 1 | // Copyright 2019-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math/rand"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nuid"
"golang.org/x/time/rate"
)
type ConsumerInfo struct {
Stream string `json:"stream_name"`
Name string `json:"name"`
Created time.Time `json:"created"`
Config *ConsumerConfig `json:"config,omitempty"`
Delivered SequencePair `json:"delivered"`
AckFloor SequencePair `json:"ack_floor"`
NumAckPending int `json:"num_ack_pending"`
NumRedelivered int `json:"num_redelivered"`
NumWaiting int `json:"num_waiting"`
NumPending uint64 `json:"num_pending"`
Cluster *ClusterInfo `json:"cluster,omitempty"`
}
type ConsumerConfig struct {
Durable string `json:"durable_name,omitempty"`
DeliverSubject string `json:"deliver_subject,omitempty"`
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
AckPolicy AckPolicy `json:"ack_policy"`
AckWait time.Duration `json:"ack_wait,omitempty"`
MaxDeliver int `json:"max_deliver,omitempty"`
FilterSubject string `json:"filter_subject,omitempty"`
ReplayPolicy ReplayPolicy `json:"replay_policy"`
RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec
SampleFrequency string `json:"sample_freq,omitempty"`
MaxWaiting int `json:"max_waiting,omitempty"`
MaxAckPending int `json:"max_ack_pending,omitempty"`
Heartbeat time.Duration `json:"idle_heartbeat,omitempty"`
FlowControl bool `json:"flow_control,omitempty"`
}
type CreateConsumerRequest struct {
Stream string `json:"stream_name"`
Config ConsumerConfig `json:"config"`
}
// DeliverPolicy determines how the consumer should select the first message to deliver.
type DeliverPolicy int
const (
// DeliverAll will be the default so can be omitted from the request.
DeliverAll DeliverPolicy = iota
// DeliverLast will start the consumer with the last sequence received.
DeliverLast
// DeliverNew will only deliver new messages that are sent after the consumer is created.
DeliverNew
// DeliverByStartSequence will look for a defined starting sequence to start.
DeliverByStartSequence
// DeliverByStartTime will select the first messsage with a timestamp >= to StartTime
DeliverByStartTime
)
func (dp DeliverPolicy) String() string {
switch dp {
case DeliverAll:
return "all"
case DeliverLast:
return "last"
case DeliverNew:
return "new"
case DeliverByStartSequence:
return "by_start_sequence"
case DeliverByStartTime:
return "by_start_time"
default:
return "undefined"
}
}
// AckPolicy determines how the consumer should acknowledge delivered messages.
type AckPolicy int
const (
// AckNone requires no acks for delivered messages.
AckNone AckPolicy = iota
// AckAll when acking a sequence number, this implicitly acks all sequences below this one as well.
AckAll
// AckExplicit requires ack or nack for all messages.
AckExplicit
)
func (a AckPolicy) String() string {
switch a {
case AckNone:
return "none"
case AckAll:
return "all"
default:
return "explicit"
}
}
// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
type ReplayPolicy int
const (
// ReplayInstant will replay messages as fast as possible.
ReplayInstant ReplayPolicy = iota
// ReplayOriginal will maintain the same timing as the messages were received.
ReplayOriginal
)
func (r ReplayPolicy) String() string {
switch r {
case ReplayInstant:
return "instant"
default:
return "original"
}
}
// OK
const OK = "+OK"
// Ack responses. Note that a nil or no payload is same as AckAck
var (
// Ack
AckAck = []byte("+ACK") // nil or no payload to ack subject also means ACK
AckOK = []byte(OK) // deprecated but +OK meant ack as well.
// Nack
AckNak = []byte("-NAK")
// Progress indicator
AckProgress = []byte("+WPI")
// Ack + Deliver the next message(s).
AckNext = []byte("+NXT")
// Terminate delivery of the message.
AckTerm = []byte("+TERM")
)
// Consumer is a jetstream consumer.
type consumer struct {
mu sync.RWMutex
js *jetStream
mset *stream
acc *Account
srv *Server
client *client
sysc *client
sid int
name string
stream string
sseq uint64
dseq uint64
adflr uint64
asflr uint64
sgap uint64
dsubj string
rlimit *rate.Limiter
reqSub *subscription
ackSub *subscription
ackReplyT string
ackSubj string
nextMsgSubj string
maxp int
pblimit int
maxpb int
pbytes int
pfcs int
fcSub *subscription
outq *jsOutQ
pending map[uint64]*Pending
ptmr *time.Timer
rdq []uint64
rdqi map[uint64]struct{}
rdc map[uint64]uint64
maxdc uint64
waiting *waitQueue
cfg ConsumerConfig
store ConsumerStore
active bool
replay bool
filterWC bool
dtmr *time.Timer
dthresh time.Duration
mch chan struct{}
qch chan struct{}
inch chan bool
sfreq int32
ackEventT string
deliveryExcEventT string
created time.Time
closed bool
// Clustered.
ca *consumerAssignment
node RaftNode
infoSub *subscription
lqsent time.Time
}
const (
// JsAckWaitDefault is the default AckWait, only applicable on explicit ack policy observables.
JsAckWaitDefault = 30 * time.Second
// JsDeleteWaitTimeDefault is the default amount of time we will wait for non-durable
// observables to be in an inactive state before deleting them.
JsDeleteWaitTimeDefault = 5 * time.Second
// JsFlowControlMaxPending specifies default pending bytes during flow control that can be
// outstanding.
JsFlowControlMaxPending = 64 * 1024 * 1024
)
func (mset *stream) addConsumer(config *ConsumerConfig) (*consumer, error) {
return mset.addConsumerWithAssignment(config, _EMPTY_, nil)
}
func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname string, ca *consumerAssignment) (*consumer, error) {
mset.mu.RLock()
s, jsa := mset.srv, mset.jsa
mset.mu.RUnlock()
// If we do not have the consumer currently assigned to us in cluster mode we will proceed but warn.
// This can happen on startup with restored state where on meta replay we still do not have
// the assignment. Running in single server mode this always returns true.
if oname != _EMPTY_ && !jsa.consumerAssigned(mset.name(), oname) {
s.Debugf("Consumer %q > %q does not seem to be assigned to this server", mset.name(), oname)
}
if config == nil {
return nil, fmt.Errorf("consumer config required")
}
var err error
// For now expect a literal subject if its not empty. Empty means work queue mode (pull mode).
if config.DeliverSubject != _EMPTY_ {
if !subjectIsLiteral(config.DeliverSubject) {
return nil, fmt.Errorf("consumer deliver subject has wildcards")
}
if mset.deliveryFormsCycle(config.DeliverSubject) {
return nil, fmt.Errorf("consumer deliver subject forms a cycle")
}
if config.MaxWaiting != 0 {
return nil, fmt.Errorf("consumer in push mode can not set max waiting")
}
if config.MaxAckPending > 0 && config.AckPolicy == AckNone {
return nil, fmt.Errorf("consumer requires ack policy for max ack pending")
}
if config.Heartbeat > 0 && config.Heartbeat < 100*time.Millisecond {
return nil, fmt.Errorf("consumer idle heartbeat needs to be > 100ms")
}
} else {
// Pull mode / work queue mode require explicit ack.
if config.AckPolicy != AckExplicit {
return nil, fmt.Errorf("consumer in pull mode requires explicit ack policy")
}
// They are also required to be durable since otherwise we will not know when to
// clean them up.
if config.Durable == _EMPTY_ {
return nil, fmt.Errorf("consumer in pull mode requires a durable name")
}
if config.RateLimit > 0 {
return nil, fmt.Errorf("consumer in pull mode can not have rate limit set")
}
if config.MaxWaiting < 0 {
return nil, fmt.Errorf("consumer max waiting needs to be positive")
}
// Set to default if not specified.
if config.MaxWaiting == 0 {
config.MaxWaiting = JSWaitQueueDefaultMax
}
if config.Heartbeat > 0 {
return nil, fmt.Errorf("consumer idle heartbeat requires a push based consumer")
}
if config.FlowControl {
return nil, fmt.Errorf("consumer flow control requires a push based consumer")
}
}
// Setup proper default for ack wait if we are in explicit ack mode.
if config.AckWait == 0 && (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) {
config.AckWait = JsAckWaitDefault
}
// Setup default of -1, meaning no limit for MaxDeliver.
if config.MaxDeliver == 0 {
config.MaxDeliver = -1
}
// Make sure any partition subject is also a literal.
if config.FilterSubject != _EMPTY_ {
if !mset.validSubject(config.FilterSubject) {
return nil, fmt.Errorf("consumer filter subject is not a valid subset of the interest subjects")
}
}
// Check on start position conflicts.
switch config.DeliverPolicy {
case DeliverAll:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start time is also set")
}
case DeliverLast:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start time is also set")
}
case DeliverNew:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start time is also set")
}
case DeliverByStartSequence:
if config.OptStartSeq == 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start sequence is not set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start time is also set")
}
case DeliverByStartTime:
if config.OptStartTime == nil {
return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start time is not set")
}
if config.OptStartSeq != 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start sequence is also set")
}
}
sampleFreq := 0
if config.SampleFrequency != "" {
s := strings.TrimSuffix(config.SampleFrequency, "%")
sampleFreq, err = strconv.Atoi(s)
if err != nil {
return nil, fmt.Errorf("failed to parse consumer sampling configuration: %v", err)
}
}
// Grab the client, account and server reference.
c := mset.client
if c == nil {
return nil, fmt.Errorf("stream not valid")
}
c.mu.Lock()
s, a := c.srv, c.acc
c.mu.Unlock()
// Hold mset lock here.
mset.mu.Lock()
// If this one is durable and already exists, we let that be ok as long as the configs match.
if isDurableConsumer(config) {
if eo, ok := mset.consumers[config.Durable]; ok {
mset.mu.Unlock()
ocfg := eo.config()
if reflect.DeepEqual(&ocfg, config) {
return eo, nil
} else {
// If we are a push mode and not active and the only difference
// is deliver subject then update and return.
if configsEqualSansDelivery(ocfg, *config) && eo.hasNoLocalInterest() {
eo.updateDeliverSubject(config.DeliverSubject)
return eo, nil
} else {
return nil, fmt.Errorf("consumer already exists")
}
}
}
}
// Check for any limits, if the config for the consumer sets a limit we check against that
// but if not we use the value from account limits, if account limits is more restrictive
// than stream config we prefer the account limits to handle cases where account limits are
// updated during the lifecycle of the stream
maxc := mset.cfg.MaxConsumers
if mset.cfg.MaxConsumers <= 0 || mset.jsa.limits.MaxConsumers < mset.cfg.MaxConsumers {
maxc = mset.jsa.limits.MaxConsumers
}
if maxc > 0 && len(mset.consumers) >= maxc {
mset.mu.Unlock()
return nil, fmt.Errorf("maximum consumers limit reached")
}
// Check on stream type conflicts.
switch mset.cfg.Retention {
case WorkQueuePolicy:
// Force explicit acks here.
if config.AckPolicy != AckExplicit {
mset.mu.Unlock()
return nil, fmt.Errorf("workqueue stream requires explicit ack")
}
if len(mset.consumers) > 0 {
if config.FilterSubject == _EMPTY_ {
mset.mu.Unlock()
return nil, fmt.Errorf("multiple non-filtered observables not allowed on workqueue stream")
} else if !mset.partitionUnique(config.FilterSubject) {
// We have a partition but it is not unique amongst the others.
mset.mu.Unlock()
return nil, fmt.Errorf("filtered consumer not unique on workqueue stream")
}
}
if config.DeliverPolicy != DeliverAll {
mset.mu.Unlock()
return nil, fmt.Errorf("consumer must be deliver all on workqueue stream")
}
}
// Set name, which will be durable name if set, otherwise we create one at random.
o := &consumer{
mset: mset,
js: s.getJetStream(),
acc: a,
srv: s,
client: s.createInternalJetStreamClient(),
sysc: s.createInternalJetStreamClient(),
cfg: *config,
dsubj: config.DeliverSubject,
outq: mset.outq,
active: true,
qch: make(chan struct{}),
mch: make(chan struct{}, 1),
sfreq: int32(sampleFreq),
maxdc: uint64(config.MaxDeliver),
maxp: config.MaxAckPending,
created: time.Now().UTC(),
}
// Bind internal client to the user account.
o.client.registerWithAccount(a)
// Bind to the system account.
o.sysc.registerWithAccount(s.SystemAccount())
if isDurableConsumer(config) {
if len(config.Durable) > JSMaxNameLen {
mset.mu.Unlock()
return nil, fmt.Errorf("consumer name is too long, maximum allowed is %d", JSMaxNameLen)
}
o.name = config.Durable
if o.isPullMode() {
o.waiting = newWaitQueue(config.MaxWaiting)
}
} else if oname != _EMPTY_ {
o.name = oname
} else {
for {
o.name = createConsumerName()
if _, ok := mset.consumers[o.name]; !ok {
break
}
}
}
// Check if we have a rate limit set.
if config.RateLimit != 0 {
// TODO(dlc) - Make sane values or error if not sane?
// We are configured in bits per sec so adjust to bytes.
rl := rate.Limit(config.RateLimit / 8)
// Burst should be set to maximum msg size for this account, etc.
var burst int
if mset.cfg.MaxMsgSize > 0 {
burst = int(mset.cfg.MaxMsgSize)
} else if mset.jsa.account.limits.mpay > 0 {
burst = int(mset.jsa.account.limits.mpay)
} else {
s := mset.jsa.account.srv
burst = int(s.getOpts().MaxPayload)
}
o.rlimit = rate.NewLimiter(rl, burst)
}
// Check if we have filtered subject that is a wildcard.
if config.FilterSubject != _EMPTY_ && !subjectIsLiteral(config.FilterSubject) {
o.filterWC = true
}
// already under lock, mset.Name() would deadlock
o.stream = mset.cfg.Name
o.ackEventT = JSMetricConsumerAckPre + "." + o.stream + "." + o.name
o.deliveryExcEventT = JSAdvisoryConsumerMaxDeliveryExceedPre + "." + o.stream + "." + o.name
store, err := mset.store.ConsumerStore(o.name, config)
if err != nil {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("error creating store for observable: %v", err)
}
o.store = store
if !isValidName(o.name) {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("durable name can not contain '.', '*', '>'")
}
// Select starting sequence number
o.selectStartingSeqNo()
// Now register with mset and create the ack subscription.
// Check if we already have this one registered.
if eo, ok := mset.consumers[o.name]; ok {
mset.mu.Unlock()
if !o.isDurable() || !o.isPushMode() {
o.name = _EMPTY_ // Prevent removal since same name.
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("consumer already exists")
}
// If we are here we have already registered this durable. If it is still active that is an error.
if eo.isActive() {
o.name = _EMPTY_ // Prevent removal since same name.
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("consumer already exists and is still active")
}
// Since we are here this means we have a potentially new durable so we should update here.
// Check that configs are the same.
if !configsEqualSansDelivery(o.cfg, eo.cfg) {
o.name = _EMPTY_ // Prevent removal since same name.
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("consumer replacement durable config not the same")
}
// Once we are here we have a replacement push-based durable.
eo.updateDeliverSubject(o.cfg.DeliverSubject)
return eo, nil
}
// Set up the ack subscription for this observable. Will use wildcard for all acks.
// We will remember the template to generate replies with sequence numbers and use
// that to scanf them back in.
mn := mset.cfg.Name
pre := fmt.Sprintf(jsAckT, mn, o.name)
o.ackReplyT = fmt.Sprintf("%s.%%d.%%d.%%d.%%d.%%d", pre)
o.ackSubj = fmt.Sprintf("%s.*.*.*.*.*", pre)
o.nextMsgSubj = fmt.Sprintf(JSApiRequestNextT, mn, o.name)
if o.isPushMode() {
o.dthresh = JsDeleteWaitTimeDefault
if !o.isDurable() {
// Check if we are not durable that the delivery subject has interest.
// Check in place here for interest. Will setup properly in setLeader.
r := o.acc.sl.Match(o.cfg.DeliverSubject)
if !o.hasDeliveryInterest(len(r.psubs)+len(r.qsubs) > 0) {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("consumer requires interest for delivery subject when ephemeral")
}
}
}
// Set our ca.
if ca != nil {
o.setConsumerAssignment(ca)
}
mset.setConsumer(o)
mset.mu.Unlock()
if !s.JetStreamIsClustered() && s.standAloneMode() {
o.setLeader(true)
}
// This is always true in single server mode.
if o.isLeader() {
// Send advisory.
var suppress bool
if !s.standAloneMode() && ca == nil {
suppress = true
} else if ca != nil {
suppress = ca.responded
}
if !suppress {
o.sendCreateAdvisory()
}
}
return o, nil
}
func (o *consumer) consumerAssignment() *consumerAssignment {
o.mu.RLock()
defer o.mu.RUnlock()
return o.ca
}
func (o *consumer) setConsumerAssignment(ca *consumerAssignment) {
o.mu.Lock()
defer o.mu.Unlock()
o.ca = ca
// Set our node.
if ca != nil {
o.node = ca.Group.node
}
}
// Lock should be held.
func (o *consumer) isLeader() bool {
if o.node != nil {
return o.node.Leader()
}
return true
}
func (o *consumer) setLeader(isLeader bool) {
o.mu.RLock()
mset := o.mset
isRunning := o.ackSub != nil
o.mu.RUnlock()
// If we are here we have a change in leader status.
if isLeader {
if mset == nil || isRunning {
return
}
mset.mu.RLock()
s, jsa, stream := mset.srv, mset.jsa, mset.cfg.Name
mset.mu.RUnlock()
o.mu.Lock()
// Restore our saved state. During non-leader status we just update our underlying store.
o.readStoredState()
// Do info sub.
if o.infoSub == nil && jsa != nil {
isubj := fmt.Sprintf(clusterConsumerInfoT, jsa.acc(), stream, o.name)
// Note below the way we subscribe here is so that we can send requests to ourselves.
o.infoSub, _ = s.systemSubscribe(isubj, _EMPTY_, false, o.sysc, o.handleClusterConsumerInfoRequest)
}
var err error
if o.ackSub, err = o.subscribeInternal(o.ackSubj, o.processAck); err != nil {
o.mu.Unlock()
o.deleteWithoutAdvisory()
return
}
// Setup the internal sub for next message requests regardless.
// Will error if wrong mode to provide feedback to users.
if o.reqSub, err = o.subscribeInternal(o.nextMsgSubj, o.processNextMsgReq); err != nil {
o.mu.Unlock()
o.deleteWithoutAdvisory()
return
}
// Check on flow control settings.
if o.cfg.FlowControl {
o.setMaxPendingBytes(JsFlowControlMaxPending)
if o.fcSub, err = o.subscribeInternal(jsFlowControl, o.processFlowControl); err != nil {
o.mu.Unlock()
o.deleteWithoutAdvisory()
return
}
}
// Setup initial pending.
o.setInitialPending()
// If push mode, register for notifications on interest.
if o.isPushMode() {
o.inch = make(chan bool, 8)
o.acc.sl.RegisterNotification(o.cfg.DeliverSubject, o.inch)
if o.active = <-o.inch; !o.active {
// Check gateways in case they are enabled.
o.active = s.hasGatewayInterest(o.acc.Name, o.cfg.DeliverSubject)
}
}
// If we are not in ReplayInstant mode mark us as in replay state until resolved.
if o.cfg.ReplayPolicy != ReplayInstant {
o.replay = true
}
// Recreate quit channel.
o.qch = make(chan struct{})
qch := o.qch
o.mu.Unlock()
// Now start up Go routine to deliver msgs.
go o.loopAndGatherMsgs(qch)
} else {
// Shutdown the go routines and the subscriptions.
o.mu.Lock()
o.unsubscribe(o.ackSub)
o.unsubscribe(o.reqSub)
o.unsubscribe(o.fcSub)
o.ackSub = nil
o.reqSub = nil
o.fcSub = nil
if o.infoSub != nil {
o.srv.sysUnsubscribe(o.infoSub)
o.infoSub = nil
}
if o.qch != nil {
close(o.qch)
o.qch = nil
}
o.mu.Unlock()
}
}
func (o *consumer) handleClusterConsumerInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
o.mu.RLock()
sysc := o.sysc
o.mu.RUnlock()
sysc.sendInternalMsg(reply, _EMPTY_, nil, o.info())
}
// Lock should be held.
func (o *consumer) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
c := o.client
if c == nil {
return nil, fmt.Errorf("invalid consumer")
}
if !c.srv.eventsEnabled() {
return nil, ErrNoSysAccount
}
if cb == nil {
return nil, fmt.Errorf("undefined message handler")
}
o.sid++
// Now create the subscription
return c.processSub([]byte(subject), nil, []byte(strconv.Itoa(o.sid)), cb, false)
}
// Unsubscribe from our subscription.
// Lock should be held.
func (o *consumer) unsubscribe(sub *subscription) {
if sub == nil || o.client == nil {
return
}
o.client.processUnsub(sub.sid)
}
// We need to make sure we protect access to the outq.
// Do all advisory sends here.
func (o *consumer) sendAdvisory(subj string, msg []byte) {
o.outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, msg, nil, 0, nil})
}
func (o *consumer) sendDeleteAdvisoryLocked() {
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: DeleteEvent,
}
j, err := json.Marshal(e)
if err != nil {
return
}
subj := JSAdvisoryConsumerDeletedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
func (o *consumer) sendCreateAdvisory() {
o.mu.Lock()
defer o.mu.Unlock()
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: CreateEvent,
}
j, err := json.Marshal(e)
if err != nil {
return
}
subj := JSAdvisoryConsumerCreatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Created returns created time.
func (o *consumer) createdTime() time.Time {
o.mu.Lock()
created := o.created
o.mu.Unlock()
return created
}
// Internal to allow creation time to be restored.
func (o *consumer) setCreatedTime(created time.Time) {
o.mu.Lock()
o.created = created
o.mu.Unlock()
}
// This will check for extended interest in a subject. If we have local interest we just return
// that, but in the absence of local interest and presence of gateways or service imports we need
// to check those as well.
func (o *consumer) hasDeliveryInterest(localInterest bool) bool {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return false
}
acc := o.acc
deliver := o.cfg.DeliverSubject
o.mu.Unlock()
if localInterest {
return true
}
// If we are here check gateways.
if s := acc.srv; s != nil && s.hasGatewayInterest(acc.Name, deliver) {
return true
}
return false
}
func (s *Server) hasGatewayInterest(account, subject string) bool {
gw := s.gateway
if !gw.enabled {
return false
}
gw.RLock()
defer gw.RUnlock()
for _, gwc := range gw.outo {
psi, qr := gwc.gatewayInterest(account, subject)
if psi || qr != nil {
return true
}
}
return false
}
// This processes an update to the local interest for a deliver subject.
func (o *consumer) updateDeliveryInterest(localInterest bool) {
interest := o.hasDeliveryInterest(localInterest)
o.mu.Lock()
defer o.mu.Unlock()
mset := o.mset
if mset == nil || o.isPullMode() {
return
}
if interest && !o.active {
o.signalNewMessages()
}
o.active = interest
// Stop and clear the delete timer always.
stopAndClearTimer(&o.dtmr)
// If we do not have interest anymore and we are not durable start
// a timer to delete us. We wait for a bit in case of server reconnect.
if !o.isDurable() && !interest {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() })
}
}
func (o *consumer) deleteNotActive() {
// Need to check again if there is not an interest now that the timer fires.
if !o.hasNoLocalInterest() {
return
}
o.mu.RLock()
if o.mset == nil {
o.mu.RUnlock()
return
}
s, js, jsa := o.mset.srv, o.mset.srv.js, o.mset.jsa
acc, stream, name := o.acc.Name, o.stream, o.name
o.mu.RUnlock()
// If we are clustered, check if we still have this consumer assigned.
// If we do forward a proposal to delete ourselves to the metacontroller leader.
if s.JetStreamIsClustered() {
if ca := js.consumerAssignment(acc, stream, name); ca != nil {
// We copy and clear the reply since this removal is internal.
jsa.mu.Lock()
js := jsa.js
jsa.mu.Unlock()
if js != nil {
js.mu.RLock()
if cc := js.cluster; cc != nil {
cca := *ca
cca.Reply = _EMPTY_
meta, removeEntry := cc.meta, encodeDeleteConsumerAssignment(&cca)
meta.ForwardProposal(removeEntry)
// Check to make sure we went away.
// Don't think this needs to be a monitored go routine.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca := js.consumerAssignment(acc, stream, name)
js.mu.RUnlock()
if ca != nil {
s.Warnf("Consumer assignment not cleaned up, retrying")
meta.ForwardProposal(removeEntry)
} else {
return
}
}
}()
}
js.mu.RUnlock()
}
}
}
// We will delete here regardless.
o.delete()
}
// Config returns the consumer's configuration.
func (o *consumer) config() ConsumerConfig {
o.mu.Lock()
defer o.mu.Unlock()
return o.cfg
}
// Force expiration of all pending.
// Lock should be held.
func (o *consumer) forceExpirePending() {
var expired []uint64
for seq := range o.pending {
if !o.onRedeliverQueue(seq) {
expired = append(expired, seq)
}
}
if len(expired) > 0 {
sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
o.addToRedeliverQueue(expired...)
// Now we should update the timestamp here since we are redelivering.
// We will use an incrementing time to preserve order for any other redelivery.
off := time.Now().UnixNano() - o.pending[expired[0]].Timestamp
for _, seq := range expired {
if p, ok := o.pending[seq]; ok && p != nil {
p.Timestamp += off
}
}
o.ptmr.Reset(o.ackWait(0))
}
o.signalNewMessages()
}
// This is a config change for the delivery subject for a
// push based consumer.
func (o *consumer) updateDeliverSubject(newDeliver string) {
// Update the config and the dsubj
o.mu.Lock()
defer o.mu.Unlock()
if o.closed || o.isPullMode() || o.cfg.DeliverSubject == newDeliver {
return
}
// Force redeliver of all pending on change of delivery subject.
if len(o.pending) > 0 {
o.forceExpirePending()
}
o.acc.sl.ClearNotification(o.dsubj, o.inch)
o.dsubj, o.cfg.DeliverSubject = newDeliver, newDeliver
// When we register new one it will deliver to update state loop.
o.acc.sl.RegisterNotification(newDeliver, o.inch)
}
// Check that configs are equal but allow delivery subjects to be different.
func configsEqualSansDelivery(a, b ConsumerConfig) bool {
// These were copied in so can set Delivery here.
a.DeliverSubject, b.DeliverSubject = _EMPTY_, _EMPTY_
return a == b
}
// Helper to send a reply to an ack.
func (o *consumer) sendAckReply(subj string) {
o.mu.Lock()
defer o.mu.Unlock()
o.sendAdvisory(subj, nil)
}
// Process a message for the ack reply subject delivered with a message.
func (o *consumer) processAck(_ *subscription, c *client, subject, reply string, rmsg []byte) {
_, msg := c.msgParts(rmsg)
sseq, dseq, dc := ackReplyInfo(subject)
skipAckReply := sseq == 0
switch {
case len(msg) == 0, bytes.Equal(msg, AckAck), bytes.Equal(msg, AckOK):
o.ackMsg(sseq, dseq, dc)
case bytes.HasPrefix(msg, AckNext):
o.ackMsg(sseq, dseq, dc)
// processNextMsgReq can be invoked from an internal subscription or from here.
// Therefore, it has to call msgParts(), so we can't simply pass msg[len(AckNext):]
// with current c.pa.hdr because it would cause a panic. We will save the current
// c.pa.hdr value and disable headers before calling processNextMsgReq and then
// restore so that we don't mess with the calling stack in case it is used
// somewhere else.
phdr := c.pa.hdr
c.pa.hdr = -1
o.processNextMsgReq(nil, c, subject, reply, msg[len(AckNext):])
c.pa.hdr = phdr
skipAckReply = true
case bytes.Equal(msg, AckNak):
o.processNak(sseq, dseq)
case bytes.Equal(msg, AckProgress):
o.progressUpdate(sseq)
case bytes.Equal(msg, AckTerm):
o.processTerm(sseq, dseq, dc)
}
// Ack the ack if requested.
if len(reply) > 0 && !skipAckReply {
o.sendAckReply(reply)
}
}
// Used to process a working update to delay redelivery.
func (o *consumer) progressUpdate(seq uint64) {
o.mu.Lock()
if len(o.pending) > 0 {
if p, ok := o.pending[seq]; ok {
p.Timestamp = time.Now().UnixNano()
// Update store system.
o.updateDelivered(p.Sequence, seq, 1, p.Timestamp)
}
}
o.mu.Unlock()
}
// Lock should be held.
func (o *consumer) updateSkipped() {
// Clustered mode and R>1 only.
if o.node == nil || !o.isLeader() {
return
}
var b [1 + 8]byte
b[0] = byte(updateSkipOp)
var le = binary.LittleEndian
le.PutUint64(b[1:], o.sseq)
o.node.Propose(b[:])
}
// Lock should be held.
func (o *consumer) updateDelivered(dseq, sseq, dc uint64, ts int64) {
// Clustered mode and R>1.
if o.node != nil {
// Inline for now, use variable compression.
var b [4*binary.MaxVarintLen64 + 1]byte
b[0] = byte(updateDeliveredOp)
n := 1
n += binary.PutUvarint(b[n:], dseq)
n += binary.PutUvarint(b[n:], sseq)
n += binary.PutUvarint(b[n:], dc)
n += binary.PutVarint(b[n:], ts)
o.node.Propose(b[:n])
}
// Update local state always.
o.store.UpdateDelivered(dseq, sseq, dc, ts)
}
// Lock should be held.
func (o *consumer) updateAcks(dseq, sseq uint64) {
if o.node != nil {
// Inline for now, use variable compression.
var b [2*binary.MaxVarintLen64 + 1]byte
b[0] = byte(updateAcksOp)
n := 1
n += binary.PutUvarint(b[n:], dseq)
n += binary.PutUvarint(b[n:], sseq)
o.node.Propose(b[:n])
} else {
o.store.UpdateAcks(dseq, sseq)
}
}
// Process a NAK.
func (o *consumer) processNak(sseq, dseq uint64) {
o.mu.Lock()
defer o.mu.Unlock()
// Check for out of range.
if dseq <= o.adflr || dseq > o.dseq {
return
}
// If we are explicit ack make sure this is still on our pending list.
if len(o.pending) > 0 {
if _, ok := o.pending[sseq]; !ok {
return
}
}
// If already queued up also ignore.
if !o.onRedeliverQueue(sseq) {
o.addToRedeliverQueue(sseq)
}
o.signalNewMessages()
}
// Process a TERM
func (o *consumer) processTerm(sseq, dseq, dc uint64) {
// Treat like an ack to suppress redelivery.
o.processAckMsg(sseq, dseq, dc, false)
o.mu.Lock()
defer o.mu.Unlock()
// Deliver an advisory
e := JSConsumerDeliveryTerminatedAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryTerminatedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Deliveries: dc,
}
j, err := json.Marshal(e)
if err != nil {
return
}
subj := JSAdvisoryConsumerMsgTerminatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Introduce a small delay in when timer fires to check pending.
// Allows bursts to be treated in same time frame.
const ackWaitDelay = time.Millisecond
// ackWait returns how long to wait to fire the pending timer.
func (o *consumer) ackWait(next time.Duration) time.Duration {
if next > 0 {
return next + ackWaitDelay
}
return o.cfg.AckWait + ackWaitDelay
}
// This will restore the state from disk.
func (o *consumer) readStoredState() error {
if o.store == nil {
return nil
}
state, err := o.store.State()
if err == nil && state != nil {
o.applyState(state)
}
return err
}
// Apply the consumer stored state.
func (o *consumer) applyState(state *ConsumerState) {
if state == nil {
return
}
o.dseq = state.Delivered.Consumer + 1
o.sseq = state.Delivered.Stream + 1
o.adflr = state.AckFloor.Consumer
o.asflr = state.AckFloor.Stream
o.pending = state.Pending
o.rdc = state.Redelivered
// Setup tracking timer if we have restored pending.
if len(o.pending) > 0 && o.ptmr == nil {
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
}
}
func (o *consumer) readStoreState() *ConsumerState {
o.mu.RLock()
defer o.mu.RUnlock()
if o.store == nil {
return nil
}
state, _ := o.store.State()
return state
}
// Sets our store state from another source. Used in clustered mode on snapshot restore.
func (o *consumer) setStoreState(state *ConsumerState) error {
if state == nil {
return nil
}
o.applyState(state)
return o.store.Update(state)
}
// Update our state to the store.
func (o *consumer) writeStoreState() error {
o.mu.Lock()
defer o.mu.Unlock()
if o.store == nil {
return nil
}
state := ConsumerState{
Delivered: SequencePair{
Consumer: o.dseq - 1,
Stream: o.sseq - 1,
},
AckFloor: SequencePair{
Consumer: o.adflr,
Stream: o.asflr,
},
Pending: o.pending,
Redelivered: o.rdc,
}
return o.store.Update(&state)
}
// Info returns our current consumer state.
func (o *consumer) info() *ConsumerInfo {
o.mu.RLock()
mset := o.mset
if mset == nil || mset.srv == nil {
o.mu.RUnlock()
return nil
}
js := o.js
o.mu.RUnlock()
if js == nil {
return nil
}
ci := js.clusterInfo(o.raftGroup())
o.mu.RLock()
defer o.mu.RUnlock()
cfg := o.cfg
info := &ConsumerInfo{
Stream: o.stream,
Name: o.name,
Created: o.created,
Config: &cfg,
Delivered: SequencePair{
Consumer: o.dseq - 1,
Stream: o.sseq - 1,
},
AckFloor: SequencePair{
Consumer: o.adflr,
Stream: o.asflr,
},
NumAckPending: len(o.pending),
NumRedelivered: len(o.rdc),
NumPending: o.sgap,
Cluster: ci,
}
// If we are a pull mode consumer, report on number of waiting requests.
if o.isPullMode() {
info.NumWaiting = o.waiting.len()
}
return info
}
// Will signal us that new messages are available. Will break out of waiting.
func (o *consumer) signalNewMessages() {
// Kick our new message channel
select {
case o.mch <- struct{}{}:
default:
}
}
// shouldSample lets us know if we are sampling metrics on acks.
func (o *consumer) shouldSample() bool {
switch {
case o.sfreq <= 0:
return false
case o.sfreq >= 100:
return true
}
// TODO(ripienaar) this is a tad slow so we need to rethink here, however this will only
// hit for those with sampling enabled and its not the default
return rand.Int31n(100) <= o.sfreq
}
func (o *consumer) sampleAck(sseq, dseq, dc uint64) {
if !o.shouldSample() {
return
}
now := time.Now().UTC()
unow := now.UnixNano()
e := JSConsumerAckMetric{
TypedEvent: TypedEvent{
Type: JSConsumerAckMetricType,
ID: nuid.Next(),
Time: now,
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Delay: unow - o.pending[sseq].Timestamp,
Deliveries: dc,
}
j, err := json.Marshal(e)
if err != nil {
return
}
o.sendAdvisory(o.ackEventT, j)
}
// Process an ack for a message.
func (o *consumer) ackMsg(sseq, dseq, dc uint64) {
o.processAckMsg(sseq, dseq, dc, true)
}
func (o *consumer) processAckMsg(sseq, dseq, dc uint64, doSample bool) {
o.mu.Lock()
var sagap uint64
var needSignal bool
switch o.cfg.AckPolicy {
case AckExplicit:
if p, ok := o.pending[sseq]; ok {
if doSample {
o.sampleAck(sseq, dseq, dc)
}
if o.maxp > 0 && len(o.pending) >= o.maxp {
needSignal = true
}
delete(o.pending, sseq)
// Use the original deliver sequence from our pending record.
dseq = p.Sequence
if dseq == o.adflr+1 {
o.adflr, o.asflr = dseq, sseq
for ss := sseq + 1; ss < o.sseq; ss++ {
if p, ok := o.pending[ss]; ok {
if p.Sequence > 0 {
o.adflr, o.asflr = p.Sequence-1, ss-1
}
break
}
}
}
}
// We do these regardless.
delete(o.rdc, sseq)
o.removeFromRedeliverQueue(sseq)
case AckAll:
// no-op
if dseq <= o.adflr || sseq <= o.asflr {
o.mu.Unlock()
return
}
if o.maxp > 0 && len(o.pending) >= o.maxp {
needSignal = true
}
sagap = sseq - o.asflr
o.adflr, o.asflr = dseq, sseq
for seq := sseq; seq > sseq-sagap; seq-- {
delete(o.pending, seq)
delete(o.rdc, seq)
o.removeFromRedeliverQueue(seq)
}
case AckNone:
// FIXME(dlc) - This is error but do we care?
o.mu.Unlock()
return
}
// Update underlying store.
o.updateAcks(dseq, sseq)
mset := o.mset
clustered := o.node != nil
o.mu.Unlock()
// Let the owning stream know if we are interest or workqueue retention based.
// If this consumer is clustered this will be handled by processReplicatedAck
// after the ack has propagated.
if !clustered && mset != nil && mset.cfg.Retention != LimitsPolicy {
if sagap > 1 {
// FIXME(dlc) - This is very inefficient, will need to fix.
for seq := sseq; seq > sseq-sagap; seq-- {
mset.ackMsg(o, seq)
}
} else {
mset.ackMsg(o, sseq)
}
}
// If we had max ack pending set and were at limit we need to unblock folks.
if needSignal {
o.signalNewMessages()
}
}
// Check if we need an ack for this store seq.
// This is called for interest based retention streams to remove messages.
func (o *consumer) needAck(sseq uint64) bool {
var needAck bool
var asflr, osseq uint64
var pending map[uint64]*Pending
o.mu.RLock()
if o.isLeader() {
asflr, osseq = o.asflr, o.sseq
pending = o.pending
} else {
state, err := o.store.State()
if err != nil || state == nil {
o.mu.RUnlock()
return false
}
asflr, osseq = state.AckFloor.Stream, o.sseq
pending = state.Pending
}
switch o.cfg.AckPolicy {
case AckNone, AckAll:
needAck = sseq > asflr
case AckExplicit:
if sseq > asflr {
// Generally this means we need an ack, but just double check pending acks.
needAck = true
if sseq < osseq {
if len(pending) == 0 {
needAck = false
} else {
_, needAck = pending[sseq]
}
}
}
}
o.mu.RUnlock()
return needAck
}
// Helper for the next message requests.
func nextReqFromMsg(msg []byte) (time.Time, int, bool, error) {
req := bytes.TrimSpace(msg)
switch {
case len(req) == 0:
return time.Time{}, 1, false, nil
case req[0] == '{':
var cr JSApiConsumerGetNextRequest
if err := json.Unmarshal(req, &cr); err != nil {
return time.Time{}, -1, false, err
}
if cr.Expires == time.Duration(0) {
return time.Time{}, cr.Batch, cr.NoWait, nil
}
return time.Now().Add(cr.Expires), cr.Batch, cr.NoWait, nil
default:
if n, err := strconv.Atoi(string(req)); err == nil {
return time.Time{}, n, false, nil
}
}
return time.Time{}, 1, false, nil
}
// Represents a request that is on the internal waiting queue
type waitingRequest struct {
client *client
reply string
n int // For batching
expires time.Time
noWait bool
}
// waiting queue for requests that are waiting for new messages to arrive.
type waitQueue struct {
rp, wp int
reqs []*waitingRequest
}
// Create a new ring buffer with at most max items.
func newWaitQueue(max int) *waitQueue {
return &waitQueue{rp: -1, reqs: make([]*waitingRequest, max)}
}
var (
errWaitQueueFull = errors.New("wait queue is full")
errWaitQueueNil = errors.New("wait queue is nil")
)
// Adds in a new request.
func (wq *waitQueue) add(req *waitingRequest) error {
if wq == nil {
return errWaitQueueNil
}
if wq.isFull() {
return errWaitQueueFull
}
wq.reqs[wq.wp] = req
// TODO(dlc) - Could make pow2 and get rid of mod.
wq.wp = (wq.wp + 1) % cap(wq.reqs)
// Adjust read pointer if we were empty.
if wq.rp < 0 {
wq.rp = 0
}
return nil
}
func (wq *waitQueue) isFull() bool {
return wq.rp == wq.wp
}
func (wq *waitQueue) len() int {
if wq == nil || wq.rp < 0 {
return 0
}
if wq.rp < wq.wp {
return wq.wp - wq.rp
}
return cap(wq.reqs) - wq.rp + wq.wp
}
// Peek will return the next request waiting or nil if empty.
func (wq *waitQueue) peek() *waitingRequest {
if wq == nil {
return nil
}
var wr *waitingRequest
if wq.rp >= 0 {
wr = wq.reqs[wq.rp]
}
return wr
}
// pop will return the next request and move the read cursor.
func (wq *waitQueue) pop() *waitingRequest {
wr := wq.peek()
if wr != nil {
wr.n--
if wr.n <= 0 {
wq.reqs[wq.rp] = nil
wq.rp = (wq.rp + 1) % cap(wq.reqs)
// Check if we are empty.
if wq.rp == wq.wp {
wq.rp, wq.wp = -1, 0
}
}
}
return wr
}
// processNextMsgReq will process a request for the next message available. A nil message payload means deliver
// a single message. If the payload is a formal request or a number parseable with Atoi(), then we will send a
// batch of messages without requiring another request to this endpoint, or an ACK.
func (o *consumer) processNextMsgReq(_ *subscription, c *client, _, reply string, msg []byte) {
_, msg = c.msgParts(msg)
o.mu.Lock()
defer o.mu.Unlock()
s, mset, js := o.srv, o.mset, o.js
if mset == nil {
return
}
sendErr := func(status int, description string) {
hdr := []byte(fmt.Sprintf("NATS/1.0 %d %s\r\n\r\n", status, description))
o.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0, nil})
}
if o.isPushMode() {
sendErr(409, "Consumer is push based")
return
}
if o.waiting.isFull() {
// Try to expire some of the requests.
if expired := o.expireWaiting(); expired == 0 {
// Force expiration if needed.
o.forceExpireFirstWaiting()
}
}
// Check payload here to see if they sent in batch size or a formal request.
expires, batchSize, noWait, err := nextReqFromMsg(msg)
if err != nil {
sendErr(400, fmt.Sprintf("Bad Request - %v", err))
return
}
// In case we have to queue up this request.
wr := waitingRequest{client: c, reply: reply, n: batchSize, noWait: noWait, expires: expires}
// If we are in replay mode, defer to processReplay for delivery.
if o.replay {
o.waiting.add(&wr)
o.mu.Unlock()
o.signalNewMessages()
o.mu.Lock()
return
}
sendBatch := func(wr *waitingRequest) {
for i, batchSize := 0, wr.n; i < batchSize; i++ {
// See if we have more messages available.
if subj, hdr, msg, seq, dc, ts, err := o.getNextMsg(); err == nil {
o.deliverMsg(reply, subj, hdr, msg, seq, dc, ts)
// Need to discount this from the total n for the request.
wr.n--
} else {
if wr.noWait {
switch err {
case errMaxAckPending:
sendErr(409, "Exceeded MaxAckPending")
default:
sendErr(404, "No Messages")
}
} else {
o.waiting.add(wr)
}
return
}
}
}
// If this is direct from a client can proceed inline.
if c.kind == CLIENT {
sendBatch(&wr)
} else {
// Check for API outstanding requests.
if apiOut := atomic.AddInt64(&js.apiCalls, 1); apiOut > 1024 {
atomic.AddInt64(&js.apiCalls, -1)
o.mu.Unlock()
sendErr(503, "JetStream API limit exceeded")
s.Warnf("JetStream API limit exceeded: %d calls outstanding", apiOut)
return
}
// Dispatch the API call to its own Go routine.
go func() {
o.mu.Lock()
sendBatch(&wr)
o.mu.Unlock()
atomic.AddInt64(&js.apiCalls, -1)
}()
}
}
// Increase the delivery count for this message.
// ONLY used on redelivery semantics.
// Lock should be held.
func (o *consumer) incDeliveryCount(sseq uint64) uint64 {
if o.rdc == nil {
o.rdc = make(map[uint64]uint64)
}
o.rdc[sseq] += 1
return o.rdc[sseq] + 1
}
// send a delivery exceeded advisory.
func (o *consumer) notifyDeliveryExceeded(sseq, dc uint64) {
e := JSConsumerDeliveryExceededAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryExceededAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
StreamSeq: sseq,
Deliveries: dc,
}
j, err := json.Marshal(e)
if err != nil {
return
}
o.sendAdvisory(o.deliveryExcEventT, j)
}
// Check to see if the candidate subject matches a filter if its present.
// Lock should be held.
func (o *consumer) isFilteredMatch(subj string) bool {
// No filter is automatic match.
if o.cfg.FilterSubject == _EMPTY_ {
return true
}
if !o.filterWC {
return subj == o.cfg.FilterSubject
}
// If we are here we have a wildcard filter subject.
// TODO(dlc) at speed might be better to just do a sublist with L2 and/or possibly L1.
return subjectIsSubsetMatch(subj, o.cfg.FilterSubject)
}
var errMaxAckPending = errors.New("max ack pending reached")
var errBadConsumer = errors.New("consumer not valid")
// Get next available message from underlying store.
// Is partition aware and redeliver aware.
// Lock should be held.
func (o *consumer) getNextMsg() (subj string, hdr, msg []byte, seq uint64, dc uint64, ts int64, err error) {
if o.mset == nil || o.mset.store == nil {
return _EMPTY_, nil, nil, 0, 0, 0, errBadConsumer
}
for {
seq, dc := o.sseq, uint64(1)
if o.hasRedeliveries() {
seq = o.getNextToRedeliver()
dc = o.incDeliveryCount(seq)
if o.maxdc > 0 && dc > o.maxdc {
// Only send once
if dc == o.maxdc+1 {
o.notifyDeliveryExceeded(seq, dc-1)
}
// Make sure to remove from pending.
delete(o.pending, seq)
continue
}
} else if o.maxp > 0 && len(o.pending) >= o.maxp {
// maxp only set when ack policy != AckNone and user set MaxAckPending
// Stall if we have hit max pending.
return _EMPTY_, nil, nil, 0, 0, 0, errMaxAckPending
}
subj, hdr, msg, ts, err := o.mset.store.LoadMsg(seq)
if err == nil {
if dc == 1 { // First delivery.
o.sseq++
if o.cfg.FilterSubject != _EMPTY_ && !o.isFilteredMatch(subj) {
o.updateSkipped()
continue
}
}
// We have the msg here.
return subj, hdr, msg, seq, dc, ts, nil
}
// We got an error here. If this is an EOF we will return, otherwise
// we can continue looking.
if err == ErrStoreEOF || err == ErrStoreClosed {
return _EMPTY_, nil, nil, 0, 0, 0, err
}
// Skip since its probably deleted or expired.
o.sseq++
}
}
// forceExpireFirstWaiting will force expire the first waiting.
// Lock should be held.
func (o *consumer) forceExpireFirstWaiting() *waitingRequest {
// FIXME(dlc) - Should we do advisory here as well?
wr := o.waiting.pop()
if wr == nil {
return wr
}
// If we are expiring this and we think there is still interest, alert.
if rr := o.acc.sl.Match(wr.reply); len(rr.psubs)+len(rr.qsubs) > 0 && o.mset != nil {
// We still appear to have interest, so send alert as courtesy.
hdr := []byte("NATS/1.0 408 Request Timeout\r\n\r\n")
o.outq.send(&jsPubMsg{wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0, nil})
}
return wr
}
// Will check for expiration and lack of interest on waiting requests.
func (o *consumer) expireWaiting() int {
var expired int
now := time.Now()
for wr := o.waiting.peek(); wr != nil; wr = o.waiting.peek() {
if !wr.expires.IsZero() && now.After(wr.expires) {
o.forceExpireFirstWaiting()
expired++
continue
}
s, acc := o.acc.srv, o.acc
rr := acc.sl.Match(wr.reply)
if len(rr.psubs)+len(rr.qsubs) > 0 {
break
}
// If we are here check on gateways.
if s != nil && s.hasGatewayInterest(acc.Name, wr.reply) {
break
}
// No more interest so go ahead and remove this one from our list.
o.forceExpireFirstWaiting()
expired++
}
return expired
}
// Will check to make sure those waiting still have registered interest.
func (o *consumer) checkWaitingForInterest() bool {
o.expireWaiting()
return o.waiting.len() > 0
}
// Lock should be held.
func (o *consumer) hbTimer() (time.Duration, *time.Timer) {
if o.cfg.Heartbeat == 0 {
return 0, nil
}
return o.cfg.Heartbeat, time.NewTimer(o.cfg.Heartbeat)
}
func (o *consumer) loopAndGatherMsgs(qch chan struct{}) {
// On startup check to see if we are in a a reply situtation where replay policy is not instant.
var (
lts int64 // last time stamp seen, used for replay.
lseq uint64
)
o.mu.Lock()
if o.replay {
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
lseq = o.mset.state().LastSeq
}
var hbc <-chan time.Time
hbd, hb := o.hbTimer()
if hb != nil {
hbc = hb.C
}
inch := o.inch
o.mu.Unlock()
// Deliver all the msgs we have now, once done or on a condition, we wait for new ones.
for {
var (
seq, dc uint64
subj, dsubj string
hdr []byte
msg []byte
err error
ts int64
delay time.Duration
)
o.mu.Lock()
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
// If we are in push mode and not active or under flowcontrol let's stop sending.
if o.isPushMode() {
if !o.active {
goto waitForMsgs
}
if o.maxpb > 0 && o.pbytes > o.maxpb {
goto waitForMsgs
}
}
// If we are in pull mode and no one is waiting already break and wait.
if o.isPullMode() && !o.checkWaitingForInterest() {
goto waitForMsgs
}
subj, hdr, msg, seq, dc, ts, err = o.getNextMsg()
// On error either wait or return.
if err != nil {
if err == ErrStoreMsgNotFound || err == ErrStoreEOF || err == errMaxAckPending {
goto waitForMsgs
} else {
o.mu.Unlock()
return
}
}
if wr := o.waiting.pop(); wr != nil {
dsubj = wr.reply
} else {
dsubj = o.dsubj
}
// If we are in a replay scenario and have not caught up check if we need to delay here.
if o.replay && lts > 0 {
if delay = time.Duration(ts - lts); delay > time.Millisecond {
o.mu.Unlock()
select {
case <-qch:
return
case <-time.After(delay):
}
o.mu.Lock()
}
}
// Track this regardless.
lts = ts
// If we have a rate limit set make sure we check that here.
if o.rlimit != nil {
now := time.Now()
r := o.rlimit.ReserveN(now, len(msg)+len(hdr)+len(subj)+len(dsubj)+len(o.ackReplyT))
delay := r.DelayFrom(now)
if delay > 0 {
o.mu.Unlock()
select {
case <-qch:
return
case <-time.After(delay):
}
o.mu.Lock()
}
}
// Do actual delivery.
o.deliverMsg(dsubj, subj, hdr, msg, seq, dc, ts)
o.mu.Unlock()
continue
waitForMsgs:
// If we were in a replay state check to see if we are caught up. If so clear.
if o.replay && o.sseq > lseq {
o.replay = false
}
// Reset our idle heartbeat timer if set.
if hb != nil {
if !hb.Stop() {
select {
case <-hbc:
default:
}
}
hb.Reset(hbd)
}
// We will wait here for new messages to arrive.
mch, outq, odsubj := o.mch, o.outq, o.cfg.DeliverSubject
o.mu.Unlock()
select {
case interest := <-inch:
// inch can be nil on pull-based, but then this will
// just block and not fire.
o.updateDeliveryInterest(interest)
case <-qch:
return
case <-mch:
// Messages are waiting.
case <-hbc:
hdr := []byte("NATS/1.0 100 Idle Heartbeat\r\n\r\n")
outq.send(&jsPubMsg{odsubj, _EMPTY_, _EMPTY_, hdr, nil, nil, 0, nil})
}
}
}
func (o *consumer) ackReply(sseq, dseq, dc uint64, ts int64, pending uint64) string {
return fmt.Sprintf(o.ackReplyT, dc, sseq, dseq, ts, pending)
}
// Used mostly for testing. Sets max pending bytes for flow control setups.
func (o *consumer) setMaxPendingBytes(limit int) {
o.pblimit = limit
o.maxpb = limit / 16
if o.maxpb == 0 {
o.maxpb = 1
}
}
// Deliver a msg to the consumer.
// Lock should be held and o.mset validated to be non-nil.
func (o *consumer) deliverMsg(dsubj, subj string, hdr, msg []byte, seq, dc uint64, ts int64) {
if o.mset == nil {
return
}
// Update pending on first attempt
if dc == 1 && o.sgap > 0 {
o.sgap--
}
dseq := o.dseq
o.dseq++
pmsg := &jsPubMsg{dsubj, subj, o.ackReply(seq, dseq, dc, ts, o.sgap), hdr, msg, o, seq, nil}
if o.maxpb > 0 {
o.pbytes += pmsg.size()
}
mset := o.mset
ap := o.cfg.AckPolicy
// Send message.
o.outq.send(pmsg)
// If we are ack none and mset is interest only we should make sure stream removes interest.
if ap == AckNone && mset.cfg.Retention == InterestPolicy && !mset.checkInterest(seq, o) {
mset.rmch <- seq
}
if ap == AckExplicit || ap == AckAll {
o.trackPending(seq, dseq)
} else if ap == AckNone {
o.adflr = dseq
o.asflr = seq
}
// Flow control.
if o.maxpb > 0 && o.needFlowControl() {
o.sendFlowControl()
}
// FIXME(dlc) - Capture errors?
o.updateDelivered(dseq, seq, dc, ts)
}
func (o *consumer) needFlowControl() bool {
if o.maxpb == 0 {
return false
}
// Decide whether to send a flow control message which we will need the user to respond.
// We send if we are at the limit or over, and at 25%, 50% and 75%.
if o.pbytes >= o.maxpb {
return true
} else if o.pfcs == 0 && o.pbytes > o.maxpb/4 {
return true
} else if o.pfcs == 1 && o.pbytes > o.maxpb/2 {
return true
} else if o.pfcs == 2 && o.pbytes > o.maxpb*3/4 {
return true
}
return false
}
func (o *consumer) processFlowControl(_ *subscription, c *client, subj, _ string, _ []byte) {
sz, err := strconv.Atoi(tokenAt(subj, 5))
if err != nil {
o.srv.Warnf("Bad flow control response subject: %q", subj)
return
}
o.mu.Lock()
defer o.mu.Unlock()
// For slow starts and ramping up.
if o.maxpb < o.pblimit {
o.maxpb *= 2
if o.maxpb > o.pblimit {
o.maxpb = o.pblimit
}
}
// Update accounting.
o.pbytes -= sz
o.pfcs--
// In case they are sent out of order or we get duplicates etc.
if o.pbytes < 0 {
o.pbytes = 0
}
if o.pfcs < 0 {
o.pfcs = 0
}
o.signalNewMessages()
}
// sendFlowControl will send a flow control packet to the consumer.
// Lock should be held.
func (o *consumer) sendFlowControl() {
if !o.isPushMode() {
return
}
subj := o.cfg.DeliverSubject
o.pfcs++
reply := fmt.Sprintf(jsFlowControlT, o.stream, o.name, o.pbytes)
hdr := []byte("NATS/1.0 100 FlowControl Request\r\n\r\n")
o.outq.send(&jsPubMsg{subj, _EMPTY_, reply, hdr, nil, nil, 0, nil})
}
// Tracks our outstanding pending acks. Only applicable to AckExplicit mode.
// Lock should be held.
func (o *consumer) trackPending(sseq, dseq uint64) {
if o.pending == nil {
o.pending = make(map[uint64]*Pending)
}
if o.ptmr == nil {
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
}
if p, ok := o.pending[sseq]; ok {
p.Timestamp = time.Now().UnixNano()
} else {
o.pending[sseq] = &Pending{dseq, time.Now().UnixNano()}
}
}
// didNotDeliver is called when a delivery for a consumer message failed.
// Depending on our state, we will process the failure.
func (o *consumer) didNotDeliver(seq uint64) {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return
}
if o.isPushMode() {
o.active = false
} else if o.pending != nil {
// pull mode and we have pending.
if _, ok := o.pending[seq]; ok {
// We found this messsage on pending, we need
// to queue it up for immediate redelivery since
// we know it was not delivered.
if !o.onRedeliverQueue(seq) {
o.addToRedeliverQueue(seq)
o.signalNewMessages()
}
}
}
o.mu.Unlock()
}
// Lock should be held.
func (o *consumer) addToRedeliverQueue(seqs ...uint64) {
if o.rdqi == nil {
o.rdqi = make(map[uint64]struct{})
}
o.rdq = append(o.rdq, seqs...)
for _, seq := range seqs {
o.rdqi[seq] = struct{}{}
}
}
// Lock should be held.
func (o *consumer) hasRedeliveries() bool {
return len(o.rdq) > 0
}
func (o *consumer) getNextToRedeliver() uint64 {
if len(o.rdq) == 0 {
return 0
}
seq := o.rdq[0]
if len(o.rdq) == 1 {
o.rdq, o.rdqi = nil, nil
} else {
o.rdq = append(o.rdq[:0], o.rdq[1:]...)
delete(o.rdqi, seq)
}
return seq
}
// This checks if we already have this sequence queued for redelivery.
// FIXME(dlc) - This is O(n) but should be fast with small redeliver size.
// Lock should be held.
func (o *consumer) onRedeliverQueue(seq uint64) bool {
if o.rdqi == nil {
return false
}
_, ok := o.rdqi[seq]
return ok
}
// Remove a sequence from the redelivery queue.
// Lock should be held.
func (o *consumer) removeFromRedeliverQueue(seq uint64) bool {
if !o.onRedeliverQueue(seq) {
return false
}
for i, rseq := range o.rdq {
if rseq == seq {
if len(o.rdq) == 1 {
o.rdq, o.rdqi = nil, nil
} else {
o.rdq = append(o.rdq[:i], o.rdq[i+1:]...)
delete(o.rdqi, seq)
}
return true
}
}
return false
}
// Checks the pending messages.
func (o *consumer) checkPending() {
o.mu.Lock()
defer o.mu.Unlock()
mset := o.mset
if mset == nil {
return
}
ttl := int64(o.cfg.AckWait)
next := int64(o.ackWait(0))
now := time.Now().UnixNano()
// Since we can update timestamps, we have to review all pending.
// We may want to unlock here or warn if list is big.
var expired []uint64
for seq, p := range o.pending {
elapsed := now - p.Timestamp
if elapsed >= ttl {
if !o.onRedeliverQueue(seq) {
expired = append(expired, seq)
o.signalNewMessages()
}
} else if ttl-elapsed < next {
// Update when we should fire next.
next = ttl - elapsed
}
}
if len(expired) > 0 {
// We need to sort.
sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
o.addToRedeliverQueue(expired...)
// Now we should update the timestamp here since we are redelivering.
// We will use an incrementing time to preserve order for any other redelivery.
off := now - o.pending[expired[0]].Timestamp
for _, seq := range expired {
if p, ok := o.pending[seq]; ok {
p.Timestamp += off
}
}
}
if len(o.pending) > 0 {
o.ptmr.Reset(o.ackWait(time.Duration(next)))
} else {
o.ptmr.Stop()
o.ptmr = nil
}
}
// SeqFromReply will extract a sequence number from a reply subject.
func (o *consumer) seqFromReply(reply string) uint64 {
_, dseq, _ := ackReplyInfo(reply)
return dseq
}
// StreamSeqFromReply will extract the stream sequence from the reply subject.
func (o *consumer) streamSeqFromReply(reply string) uint64 {
sseq, _, _ := ackReplyInfo(reply)
return sseq
}
// Quick parser for positive numbers in ack reply encoding.
func parseAckReplyNum(d string) (n int64) {
if len(d) == 0 {
return -1
}
for _, dec := range d {
if dec < asciiZero || dec > asciiNine {
return -1
}
n = n*10 + (int64(dec) - asciiZero)
}
return n
}
const expectedNumReplyTokens = 9
// Grab encoded information in the reply subject for a delivered message.
func replyInfo(subject string) (sseq, dseq, dc uint64, ts int64, pending uint64) {
tsa := [expectedNumReplyTokens]string{}
start, tokens := 0, tsa[:0]
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
return 0, 0, 0, 0, 0
}
// TODO(dlc) - Should we error if we do not match consumer name?
// stream is tokens[2], consumer is 3.
dc = uint64(parseAckReplyNum(tokens[4]))
sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
ts = parseAckReplyNum(tokens[7])
pending = uint64(parseAckReplyNum(tokens[8]))
return sseq, dseq, dc, ts, pending
}
func ackReplyInfo(subject string) (sseq, dseq, dc uint64) {
tsa := [expectedNumReplyTokens]string{}
start, tokens := 0, tsa[:0]
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
return 0, 0, 0
}
dc = uint64(parseAckReplyNum(tokens[4]))
sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
return sseq, dseq, dc
}
// NextSeq returns the next delivered sequence number for this observable.
func (o *consumer) nextSeq() uint64 {
o.mu.Lock()
dseq := o.dseq
o.mu.Unlock()
return dseq
}
// This will select the store seq to start with based on the
// partition subject.
func (o *consumer) selectSubjectLast() {
stats := o.mset.store.State()
if stats.LastSeq == 0 {
o.sseq = stats.LastSeq
return
}
// FIXME(dlc) - this is linear and can be optimized by store layer.
for seq := stats.LastSeq; seq >= stats.FirstSeq; seq-- {
subj, _, _, _, err := o.mset.store.LoadMsg(seq)
if err == ErrStoreMsgNotFound {
continue
}
if o.isFilteredMatch(subj) {
o.sseq = seq
o.updateSkipped()
return
}
}
}
// Will select the starting sequence.
func (o *consumer) selectStartingSeqNo() {
stats := o.mset.store.State()
if o.cfg.OptStartSeq == 0 {
if o.cfg.DeliverPolicy == DeliverAll {
o.sseq = stats.FirstSeq
} else if o.cfg.DeliverPolicy == DeliverLast {
o.sseq = stats.LastSeq
// If we are partitioned here we may need to walk backwards.
if o.cfg.FilterSubject != _EMPTY_ {
o.selectSubjectLast()
}
} else if o.cfg.OptStartTime != nil {
// If we are here we are time based.
// TODO(dlc) - Once clustered can't rely on this.
o.sseq = o.mset.store.GetSeqFromTime(*o.cfg.OptStartTime)
} else {
// Default is deliver new only.
o.sseq = stats.LastSeq + 1
}
} else {
o.sseq = o.cfg.OptStartSeq
}
if stats.FirstSeq == 0 {
o.sseq = 1
} else if o.sseq < stats.FirstSeq {
o.sseq = stats.FirstSeq
} else if o.sseq > stats.LastSeq {
o.sseq = stats.LastSeq + 1
}
// Always set delivery sequence to 1.
o.dseq = 1
// Set ack delivery floor to delivery-1
o.adflr = o.dseq - 1
// Set ack store floor to store-1
o.asflr = o.sseq - 1
}
// Test whether a config represents a durable subscriber.
func isDurableConsumer(config *ConsumerConfig) bool {
return config != nil && config.Durable != _EMPTY_
}
func (o *consumer) isDurable() bool {
return o.cfg.Durable != _EMPTY_
}
// Are we in push mode, delivery subject, etc.
func (o *consumer) isPushMode() bool {
return o.cfg.DeliverSubject != _EMPTY_
}
func (o *consumer) isPullMode() bool {
return o.cfg.DeliverSubject == _EMPTY_
}
// Name returns the name of this observable.
func (o *consumer) String() string {
o.mu.RLock()
n := o.name
o.mu.RUnlock()
return n
}
func createConsumerName() string {
return string(getHash(nuid.Next()))
}
// deleteConsumer will delete the consumer from this stream.
func (mset *stream) deleteConsumer(o *consumer) error {
return o.delete()
}
func (o *consumer) streamName() string {
o.mu.RLock()
mset := o.mset
o.mu.RUnlock()
if mset != nil {
return mset.name()
}
return _EMPTY_
}
// Active indicates if this consumer is still active.
func (o *consumer) isActive() bool {
o.mu.Lock()
active := o.active && o.mset != nil
o.mu.Unlock()
return active
}
// hasNoLocalInterest return true if we have no local interest.
func (o *consumer) hasNoLocalInterest() bool {
o.mu.RLock()
rr := o.acc.sl.Match(o.cfg.DeliverSubject)
o.mu.RUnlock()
return len(rr.psubs)+len(rr.qsubs) == 0
}
// This is when the underlying stream has been purged.
func (o *consumer) purge(sseq uint64) {
o.mu.Lock()
o.sseq = sseq
o.asflr = sseq - 1
o.adflr = o.dseq - 1
o.sgap = 0
if len(o.pending) > 0 {
o.pending = nil
if o.ptmr != nil {
o.ptmr.Stop()
// Do not nil this out here. This allows checkPending to fire
// and still be ok and not panic.
}
}
// We need to remove all those being queued for redelivery under o.rdq
if len(o.rdq) > 0 {
rdq := o.rdq
o.rdq, o.rdqi = nil, nil
for _, sseq := range rdq {
if sseq >= o.sseq {
o.addToRedeliverQueue(sseq)
}
}
}
o.mu.Unlock()
o.writeStoreState()
}
func stopAndClearTimer(tp **time.Timer) {
if *tp == nil {
return
}
// Will get drained in normal course, do not try to
// drain here.
(*tp).Stop()
*tp = nil
}
// Stop will shutdown the consumer for the associated stream.
func (o *consumer) stop() error {
return o.stopWithFlags(false, true, false)
}
func (o *consumer) deleteWithoutAdvisory() error {
return o.stopWithFlags(true, true, false)
}
// Delete will delete the consumer for the associated stream and send advisories.
func (o *consumer) delete() error {
return o.stopWithFlags(true, true, true)
}
func (o *consumer) stopWithFlags(dflag, doSignal, advisory bool) error {
o.mu.Lock()
if o.closed {
o.mu.Unlock()
return nil
}
o.closed = true
if dflag && advisory && o.isLeader() {
o.sendDeleteAdvisoryLocked()
}
if o.qch != nil {
close(o.qch)
o.qch = nil
}
a := o.acc
store := o.store
mset := o.mset
o.mset = nil
o.active = false
o.unsubscribe(o.ackSub)
o.unsubscribe(o.reqSub)
o.unsubscribe(o.infoSub)
o.ackSub = nil
o.reqSub = nil
o.infoSub = nil
c := o.client
o.client = nil
sysc := o.sysc
o.sysc = nil
stopAndClearTimer(&o.ptmr)
stopAndClearTimer(&o.dtmr)
delivery := o.cfg.DeliverSubject
o.waiting = nil
// Break us out of the readLoop.
if doSignal {
o.signalNewMessages()
}
n := o.node
o.mu.Unlock()
if c != nil {
c.closeConnection(ClientClosed)
}
if sysc != nil {
sysc.closeConnection(ClientClosed)
}
if delivery != _EMPTY_ {
a.sl.ClearNotification(delivery, o.inch)
}
mset.mu.Lock()
mset.removeConsumer(o)
rp := mset.cfg.Retention
mset.mu.Unlock()
// We need to optionally remove all messages since we are interest based retention.
// We will do this consistently on all replicas. Note that if in clustered mode the
// non-leader consumers will need to restore state first.
if dflag && rp == InterestPolicy {
var seqs []uint64
o.mu.Lock()
if !o.isLeader() {
o.readStoredState()
}
for seq := range o.pending {
seqs = append(seqs, seq)
}
o.mu.Unlock()
// Sort just to keep pending sparse array state small.
sort.Slice(seqs, func(i, j int) bool { return seqs[i] < seqs[j] })
for _, seq := range seqs {
mset.mu.Lock()
hasNoInterest := !mset.checkInterest(seq, o)
mset.mu.Unlock()
if hasNoInterest {
mset.store.RemoveMsg(seq)
}
}
}
// Cluster cleanup.
if n != nil {
if dflag {
n.Delete()
} else {
n.Stop()
}
}
var err error
if store != nil {
if dflag {
err = store.Delete()
} else {
err = store.Stop()
}
}
return err
}
// Check that we do not form a cycle by delivering to a delivery subject
// that is part of the interest group.
func (mset *stream) deliveryFormsCycle(deliverySubject string) bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
for _, subject := range mset.cfg.Subjects {
if subjectIsSubsetMatch(deliverySubject, subject) {
return true
}
}
return false
}
// This is same as check for delivery cycle.
func (mset *stream) validSubject(partitionSubject string) bool {
return mset.deliveryFormsCycle(partitionSubject)
}
// SetInActiveDeleteThreshold sets the delete threshold for how long to wait
// before deleting an inactive ephemeral observable.
func (o *consumer) setInActiveDeleteThreshold(dthresh time.Duration) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.isPullMode() {
return fmt.Errorf("consumer is not push-based")
}
if o.isDurable() {
return fmt.Errorf("consumer is not durable")
}
deleteWasRunning := o.dtmr != nil
stopAndClearTimer(&o.dtmr)
o.dthresh = dthresh
if deleteWasRunning {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() })
}
return nil
}
// switchToEphemeral is called on startup when recovering ephemerals.
func (o *consumer) switchToEphemeral() {
o.mu.Lock()
o.cfg.Durable = _EMPTY_
store, ok := o.store.(*consumerFileStore)
rr := o.acc.sl.Match(o.cfg.DeliverSubject)
o.mu.Unlock()
// Update interest
o.updateDeliveryInterest(len(rr.psubs)+len(rr.qsubs) > 0)
// Write out new config
if ok {
store.updateConfig(o.cfg)
}
}
// RequestNextMsgSubject returns the subject to request the next message when in pull or worker mode.
// Returns empty otherwise.
func (o *consumer) requestNextMsgSubject() string {
return o.nextMsgSubj
}
// Will set the initial pending.
// mset lock should be held.
func (o *consumer) setInitialPending() {
mset := o.mset
if mset == nil {
return
}
// Non-filtering, means we want all messages.
if o.cfg.FilterSubject == _EMPTY_ {
state := mset.store.State()
if state.Msgs > 0 {
o.sgap = state.Msgs - (o.sseq - state.FirstSeq)
}
} else {
// Here we are filtered.
// FIXME(dlc) - This could be slow with O(n)
for seq := o.sseq; ; seq++ {
subj, _, _, _, err := o.mset.store.LoadMsg(seq)
if err == ErrStoreMsgNotFound {
continue
} else if err == ErrStoreEOF {
break
} else if err == nil && o.isFilteredMatch(subj) {
o.sgap++
}
}
}
}
func (o *consumer) decStreamPending(sseq uint64, subj string) {
o.mu.Lock()
// Ignore if we have already seen this one.
if sseq >= o.sseq && o.sgap > 0 && o.isFilteredMatch(subj) && o.sgap > 0 {
o.sgap--
}
o.mu.Unlock()
}
func (o *consumer) account() *Account {
o.mu.RLock()
a := o.acc
o.mu.RUnlock()
return a
}
| 1 | 12,731 | I think you want to look at current pending + batchSize and if that is > o.maxp no? | nats-io-nats-server | go |
@@ -189,7 +189,7 @@ func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
span.AddEventWithTimestamp(context.Background(), time.Now(), "test event")
},
"#SetStatus": func(span trace.Span) {
- span.SetStatus(codes.Internal, "internal")
+ span.SetStatus(codes.Ok, "internal")
},
"#SetName": func(span trace.Span) {
span.SetName("new name") | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apitest
import (
"context"
"sync"
"testing"
"time"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/internal/matchers"
"go.opentelemetry.io/otel/label"
)
type Harness struct {
t *testing.T
}
func NewHarness(t *testing.T) *Harness {
return &Harness{
t: t,
}
}
func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) {
h.t.Run("#Start", func(t *testing.T) {
t.Run("propagates the original context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctxKey := testCtxKey{}
ctxValue := "ctx value"
ctx := context.WithValue(context.Background(), ctxKey, ctxValue)
ctx, _ = subject.Start(ctx, "test")
e.Expect(ctx.Value(ctxKey)).ToEqual(ctxValue)
})
t.Run("returns a span containing the expected properties", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, span := subject.Start(context.Background(), "test")
e.Expect(span).NotToBeNil()
e.Expect(span.Tracer()).ToEqual(subject)
e.Expect(span.SpanContext().IsValid()).ToBeTrue()
})
t.Run("stores the span on the provided context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctx, span := subject.Start(context.Background(), "test")
e.Expect(span).NotToBeNil()
e.Expect(span.SpanContext()).NotToEqual(trace.EmptySpanContext())
e.Expect(trace.SpanFromContext(ctx)).ToEqual(span)
})
t.Run("starts spans with unique trace and span IDs", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, span1 := subject.Start(context.Background(), "span1")
_, span2 := subject.Start(context.Background(), "span2")
sc1 := span1.SpanContext()
sc2 := span2.SpanContext()
e.Expect(sc1.TraceID).NotToEqual(sc2.TraceID)
e.Expect(sc1.SpanID).NotToEqual(sc2.SpanID)
})
t.Run("records the span if specified", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, span := subject.Start(context.Background(), "span", trace.WithRecord())
e.Expect(span.IsRecording()).ToBeTrue()
})
t.Run("propagates a parent's trace ID through the context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctx, parent := subject.Start(context.Background(), "parent")
_, child := subject.Start(ctx, "child")
psc := parent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).ToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
t.Run("ignores parent's trace ID when new root is requested", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctx, parent := subject.Start(context.Background(), "parent")
_, child := subject.Start(ctx, "child", trace.WithNewRoot())
psc := parent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).NotToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
t.Run("propagates remote parent's trace ID through the context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, remoteParent := subject.Start(context.Background(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
_, child := subject.Start(parentCtx, "child")
psc := remoteParent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).ToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
t.Run("ignores remote parent's trace ID when new root is requested", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, remoteParent := subject.Start(context.Background(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
_, child := subject.Start(parentCtx, "child", trace.WithNewRoot())
psc := remoteParent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).NotToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
})
h.testSpan(subjectFactory)
}
func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
var methods = map[string]func(span trace.Span){
"#End": func(span trace.Span) {
span.End()
},
"#AddEvent": func(span trace.Span) {
span.AddEvent(context.Background(), "test event")
},
"#AddEventWithTimestamp": func(span trace.Span) {
span.AddEventWithTimestamp(context.Background(), time.Now(), "test event")
},
"#SetStatus": func(span trace.Span) {
span.SetStatus(codes.Internal, "internal")
},
"#SetName": func(span trace.Span) {
span.SetName("new name")
},
"#SetAttributes": func(span trace.Span) {
span.SetAttributes(label.String("key1", "value"), label.Int("key2", 123))
},
}
var mechanisms = map[string]func() trace.Span{
"Span created via Tracer#Start": func() trace.Span {
tracer := tracerFactory()
_, subject := tracer.Start(context.Background(), "test")
return subject
},
}
for mechanismName, mechanism := range mechanisms {
h.t.Run(mechanismName, func(t *testing.T) {
for methodName, method := range methods {
t.Run(methodName, func(t *testing.T) {
t.Run("is thread-safe", func(t *testing.T) {
t.Parallel()
span := mechanism()
wg := &sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
method(span)
}()
go func() {
defer wg.Done()
method(span)
}()
wg.Wait()
})
})
}
t.Run("#End", func(t *testing.T) {
t.Run("can be called multiple times", func(t *testing.T) {
t.Parallel()
span := mechanism()
span.End()
span.End()
})
})
})
}
}
type testCtxKey struct{}
| 1 | 13,406 | Should this be `codes.Error`? `codes.Internal` was an error status before. | open-telemetry-opentelemetry-go | go |
@@ -26,6 +26,11 @@ class AccountsController < ApplicationController
@projects, @logos = @account.project_core.used
@twitter_detail = TwitterDetail.new(@account)
page_context[:page_header] = 'accounts/show/header'
+ respond_to do |format|
+ format.html
+ format.xml
+ format.json
+ end
end
def me | 1 | class AccountsController < ApplicationController
include RedirectIfDisabled
helper MapHelper
before_action :session_required, only: [:edit, :destroy, :confirm_delete, :me]
before_action :set_account, only: [:destroy, :show, :update, :edit, :confirm_delete, :disabled, :settings]
before_action :redirect_if_disabled, only: [:show, :update, :edit]
before_action :disabled_during_read_only_mode, only: [:new, :create, :edit, :update]
before_action :no_new_accounts, only: [:new, :create]
before_action :must_own_account, only: [:edit, :update, :destroy, :confirm_delete]
before_action :check_banned_domain, only: :create
before_action :captcha_response, only: :create
before_action :account_context, only: [:edit, :update, :confirm_delete]
before_action :find_claimed_people, only: :index
after_action :create_action_record, only: :create, if: -> { @account.persisted? && params[:_action].present? }
def index
@cbp_map = PeopleDecorator.new(@people).commits_by_project_map
@positions_map = Position.where(id: @cbp_map.values.map(&:first).flatten)
.preload(project: [{ best_analysis: :main_language }, :logo])
.index_by(&:id)
end
def show
@projects, @logos = @account.project_core.used
@twitter_detail = TwitterDetail.new(@account)
page_context[:page_header] = 'accounts/show/header'
end
def me
redirect_to account_path(current_user)
end
# FIXME: uncomment when new account creation is re-enabled.
# def new
# @account = Account.new
# end
#
# def create
# @account = Account.new(account_params)
#
# if @account.save
# redirect_to root_path, flash: { success: t('.success', email: @account.email) }
# else
# render :new
# end
# end
def update
if @account.update(account_params)
redirect_to account_path(@account), notice: t('.success')
else
render 'edit'
end
end
def destroy
@account.destroy
unless current_user_is_admin?
cookies.delete(:auth_token)
reset_session
end
redirect_to edit_deleted_account_path(@account.login)
end
def unsubscribe_emails
account_id = Ohloh::Cipher.decrypt(CGI.escape(params[:key]))
@account = Account.where(id: account_id).first
@status = @account.try(:email_master)
@account.update_attribute(:email_master, false) if @status
end
private
def no_new_accounts
flash[:notice] = t('accounts.temporarily_suspended')
redirect_to new_session_path
end
def find_claimed_people
total_entries = params[:query].blank? ? Person::Count.claimed : nil
@people = Person.find_claimed(params[:query], params[:sort])
.paginate(page: page_param, per_page: 10, total_entries: total_entries)
end
def set_account
@account = Account::Find.by_id_or_login(params[:id])
fail ParamRecordNotFound unless @account
end
# FIXME: uncomment when new account creation is re-enabled.
# def check_banned_domain
# @account = Account.new(account_params)
# return unless @account.email?
# render :new, status: 418 if DomainBlacklist.email_banned?(@account.email)
# end
#
# def captcha_response
# @account = Account.new(account_params)
# verify_recaptcha(model: @account, attribute: :captcha)
# render :new if @account.errors.messages[:captcha].present?
# end
#
# def create_action_record
# Action.create(account: @account, _action: params[:_action], status: :after_activation)
# end
def account_params
params.require(:account).permit(
:login, :email, :email_confirmation, :name, :country_code, :location, :latitude, :longitude,
:twitter_account, :organization_id, :organization_name, :affiliation_type, :invite_code,
:password, :password_confirmation, :about_raw, :url)
end
end
| 1 | 7,969 | I don't think you need these lines. | blackducksoftware-ohloh-ui | rb |
@@ -195,7 +195,7 @@ type tlfJournal struct {
// Invariant: this tlfJournal acquires exactly
// blockJournal.getStoredBytes() and
// blockJournal.getStoredFiles() until shutdown.
- DiskLimiter DiskLimiter
+ diskLimiter DiskLimiter
// All the channels below are used as simple on/off
// signals. They're buffered for one object, and all sends are | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"path/filepath"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/ioutil"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
// tlfJournalConfig is the subset of the Config interface needed by
// tlfJournal (for ease of testing).
type tlfJournalConfig interface {
BlockSplitter() BlockSplitter
Clock() Clock
Codec() kbfscodec.Codec
Crypto() Crypto
BlockCache() BlockCache
BlockOps() BlockOps
MDCache() MDCache
MetadataVersion() MetadataVer
Reporter() Reporter
encryptionKeyGetter() encryptionKeyGetter
mdDecryptionKeyGetter() mdDecryptionKeyGetter
MDServer() MDServer
usernameGetter() normalizedUsernameGetter
MakeLogger(module string) logger.Logger
diskLimitTimeout() time.Duration
}
// tlfJournalConfigWrapper is an adapter for Config objects to the
// tlfJournalConfig interface.
type tlfJournalConfigAdapter struct {
Config
}
func (ca tlfJournalConfigAdapter) encryptionKeyGetter() encryptionKeyGetter {
return ca.Config.KeyManager()
}
func (ca tlfJournalConfigAdapter) mdDecryptionKeyGetter() mdDecryptionKeyGetter {
return ca.Config.KeyManager()
}
func (ca tlfJournalConfigAdapter) usernameGetter() normalizedUsernameGetter {
return ca.Config.KBPKI()
}
func (ca tlfJournalConfigAdapter) diskLimitTimeout() time.Duration {
// Set this to slightly larger than the max delay, so that we
// don't start failing writes when we hit the max delay.
return defaultDiskLimitMaxDelay + time.Second
}
const (
// Maximum number of blocks that can be flushed in a single batch
// by the journal. TODO: make this configurable, so that users
// can choose how much bandwidth is used by the journal.
maxJournalBlockFlushBatchSize = 25
// This will be the final entry for unflushed paths if there are
// too many revisions to process at once.
incompleteUnflushedPathsMarker = "..."
// ForcedBranchSquashRevThreshold is the minimum number of MD
// revisions in the journal that will trigger an automatic branch
// conversion (and subsequent resolution).
ForcedBranchSquashRevThreshold = 20
// ForcedBranchSquashBytesThresholdDefault is the minimum number of
// unsquashed MD bytes in the journal that will trigger an
// automatic branch conversion (and subsequent resolution).
ForcedBranchSquashBytesThresholdDefault = uint64(25 << 20) // 25 MB
// Maximum number of blocks to delete from the local saved block
// journal at a time while holding the lock.
maxSavedBlockRemovalsAtATime = uint64(500)
)
// TLFJournalStatus represents the status of a TLF's journal for
// display in diagnostics. It is suitable for encoding directly as
// JSON.
type TLFJournalStatus struct {
Dir string
RevisionStart MetadataRevision
RevisionEnd MetadataRevision
BranchID string
BlockOpCount uint64
// The byte counters below are signed because
// os.FileInfo.Size() is signed. The file counter is signed
// for consistency.
StoredBytes int64
StoredFiles int64
UnflushedBytes int64
UnflushedPaths []string
LastFlushErr string `json:",omitempty"`
}
// TLFJournalBackgroundWorkStatus indicates whether a journal should
// be doing background work or not.
type TLFJournalBackgroundWorkStatus int
const (
// TLFJournalBackgroundWorkPaused indicates that the journal
// should not currently be doing background work.
TLFJournalBackgroundWorkPaused TLFJournalBackgroundWorkStatus = iota
// TLFJournalBackgroundWorkEnabled indicates that the journal
// should be doing background work.
TLFJournalBackgroundWorkEnabled
)
type tlfJournalPauseType int
const (
journalPauseConflict tlfJournalPauseType = 1 << iota
journalPauseCommand
)
func (bws TLFJournalBackgroundWorkStatus) String() string {
switch bws {
case TLFJournalBackgroundWorkEnabled:
return "Background work enabled"
case TLFJournalBackgroundWorkPaused:
return "Background work paused"
default:
return fmt.Sprintf("TLFJournalBackgroundWorkStatus(%d)", bws)
}
}
// bwState indicates the state of the background work goroutine.
type bwState int
const (
bwBusy bwState = iota
bwIdle
bwPaused
)
func (bws bwState) String() string {
switch bws {
case bwBusy:
return "bwBusy"
case bwIdle:
return "bwIdle"
case bwPaused:
return "bwPaused"
default:
return fmt.Sprintf("bwState(%d)", bws)
}
}
// tlfJournalBWDelegate is used by tests to know what the background
// goroutine is doing, and also to enforce a timeout (via the
// context).
type tlfJournalBWDelegate interface {
GetBackgroundContext() context.Context
OnNewState(ctx context.Context, bws bwState)
OnShutdown(ctx context.Context)
}
// A tlfJournal contains all the journals for a (TLF, user, device)
// tuple and controls the synchronization between the objects that are
// adding to those journals (via journalBlockServer or journalMDOps)
// and a background goroutine that flushes journal entries to the
// servers.
//
// The maximum number of characters added to the root dir by a TLF
// journal is 51, which just the max of the block journal and MD
// journal numbers.
type tlfJournal struct {
uid keybase1.UID
key kbfscrypto.VerifyingKey
tlfID tlf.ID
dir string
config tlfJournalConfig
delegateBlockServer BlockServer
log logger.Logger
deferLog logger.Logger
onBranchChange branchChangeListener
onMDFlush mdFlushListener
forcedSquashByBytes uint64
// Invariant: this tlfJournal acquires exactly
// blockJournal.getStoredBytes() and
// blockJournal.getStoredFiles() until shutdown.
DiskLimiter DiskLimiter
// All the channels below are used as simple on/off
// signals. They're buffered for one object, and all sends are
// asynchronous, so multiple sends get collapsed into one
// signal.
hasWorkCh chan struct{}
needPauseCh chan struct{}
needResumeCh chan struct{}
needShutdownCh chan struct{}
needBranchCheckCh chan struct{}
// Track the ways in which the journal is paused. We don't allow
// work to resume unless a resume has come in corresponding to
// each type of paused that's happened.
pauseLock sync.Mutex
pauseType tlfJournalPauseType
// This channel is closed when background work shuts down.
backgroundShutdownCh chan struct{}
// Serializes all flushes.
flushLock sync.Mutex
// Tracks background work.
wg kbfssync.RepeatedWaitGroup
// Protects all operations on blockJournal and mdJournal, and all
// the fields until the next blank line.
//
// TODO: Consider using https://github.com/pkg/singlefile
// instead.
journalLock sync.RWMutex
// both of these are nil after shutdown() is called.
blockJournal *blockJournal
mdJournal *mdJournal
disabled bool
lastFlushErr error
unflushedPaths unflushedPathCache
// An estimate of how many bytes have been written since the last
// squash.
unsquashedBytes uint64
flushingBlocks map[kbfsblock.ID]bool
bwDelegate tlfJournalBWDelegate
}
func getTLFJournalInfoFilePath(dir string) string {
return filepath.Join(dir, "info.json")
}
// tlfJournalInfo is the structure stored in
// getTLFJournalInfoFilePath(dir).
type tlfJournalInfo struct {
UID keybase1.UID
VerifyingKey kbfscrypto.VerifyingKey
TlfID tlf.ID
}
func readTLFJournalInfoFile(dir string) (
keybase1.UID, kbfscrypto.VerifyingKey, tlf.ID, error) {
var info tlfJournalInfo
err := ioutil.DeserializeFromJSONFile(
getTLFJournalInfoFilePath(dir), &info)
if err != nil {
return keybase1.UID(""), kbfscrypto.VerifyingKey{}, tlf.ID{}, err
}
return info.UID, info.VerifyingKey, info.TlfID, nil
}
func writeTLFJournalInfoFile(dir string, uid keybase1.UID,
key kbfscrypto.VerifyingKey, tlfID tlf.ID) error {
info := tlfJournalInfo{uid, key, tlfID}
return ioutil.SerializeToJSONFile(info, getTLFJournalInfoFilePath(dir))
}
func makeTLFJournal(
ctx context.Context, uid keybase1.UID, key kbfscrypto.VerifyingKey,
dir string, tlfID tlf.ID, config tlfJournalConfig,
delegateBlockServer BlockServer, bws TLFJournalBackgroundWorkStatus,
bwDelegate tlfJournalBWDelegate, onBranchChange branchChangeListener,
onMDFlush mdFlushListener, DiskLimiter DiskLimiter) (
*tlfJournal, error) {
if uid == keybase1.UID("") {
return nil, errors.New("Empty user")
}
if key == (kbfscrypto.VerifyingKey{}) {
return nil, errors.New("Empty verifying key")
}
if tlfID == (tlf.ID{}) {
return nil, errors.New("Empty tlf.ID")
}
readUID, readKey, readTlfID, err := readTLFJournalInfoFile(dir)
switch {
case ioutil.IsNotExist(err):
// Info file doesn't exist, so write it.
err := writeTLFJournalInfoFile(dir, uid, key, tlfID)
if err != nil {
return nil, err
}
case err != nil:
return nil, err
default:
// Info file exists, so it should match passed-in
// parameters.
if uid != readUID {
return nil, errors.Errorf(
"Expected UID %s, got %s", uid, readUID)
}
if key != readKey {
return nil, errors.Errorf(
"Expected verifying key %s, got %s",
key, readKey)
}
if tlfID != readTlfID {
return nil, errors.Errorf(
"Expected TLF ID %s, got %s", tlfID, readTlfID)
}
}
log := config.MakeLogger("TLFJ")
blockJournal, err := makeBlockJournal(ctx, config.Codec(), dir, log)
if err != nil {
return nil, err
}
mdJournal, err := makeMDJournal(
ctx, uid, key, config.Codec(), config.Crypto(), config.Clock(),
tlfID, config.MetadataVersion(), dir, log)
if err != nil {
return nil, err
}
j := &tlfJournal{
uid: uid,
key: key,
tlfID: tlfID,
dir: dir,
config: config,
delegateBlockServer: delegateBlockServer,
log: log,
deferLog: log.CloneWithAddedDepth(1),
onBranchChange: onBranchChange,
onMDFlush: onMDFlush,
forcedSquashByBytes: ForcedBranchSquashBytesThresholdDefault,
DiskLimiter: DiskLimiter,
hasWorkCh: make(chan struct{}, 1),
needPauseCh: make(chan struct{}, 1),
needResumeCh: make(chan struct{}, 1),
needShutdownCh: make(chan struct{}, 1),
needBranchCheckCh: make(chan struct{}, 1),
backgroundShutdownCh: make(chan struct{}),
blockJournal: blockJournal,
mdJournal: mdJournal,
flushingBlocks: make(map[kbfsblock.ID]bool),
bwDelegate: bwDelegate,
}
if bws == TLFJournalBackgroundWorkPaused {
j.pauseType |= journalPauseCommand
}
isConflict, err := j.isOnConflictBranch()
if err != nil {
return nil, err
}
if isConflict {
// Conflict branches must start off paused until the first
// resolution.
j.log.CDebugf(ctx, "Journal for %s has a conflict, so starting off "+
"paused (requested status %s)", tlfID, bws)
bws = TLFJournalBackgroundWorkPaused
j.pauseType |= journalPauseConflict
}
if bws == TLFJournalBackgroundWorkPaused {
j.wg.Pause()
}
// Do this only once we're sure we won't error.
storedBytes := j.blockJournal.getStoredBytes()
storedFiles := j.blockJournal.getStoredFiles()
availableBytes, availableFiles := j.DiskLimiter.onJournalEnable(
ctx, storedBytes, storedFiles)
go j.doBackgroundWorkLoop(bws, backoff.NewExponentialBackOff())
// Signal work to pick up any existing journal entries.
j.signalWork()
j.log.CDebugf(ctx,
"Enabled journal for %s (stored bytes=%d/files=%d, available bytes=%d/files=%d) with path %s",
tlfID, storedBytes, storedFiles, availableBytes, availableFiles, dir)
return j, nil
}
func (j *tlfJournal) signalWork() {
j.wg.Add(1)
select {
case j.hasWorkCh <- struct{}{}:
default:
j.wg.Done()
}
}
// CtxJournalTagKey is the type used for unique context tags within
// background journal work.
type CtxJournalTagKey int
const (
// CtxJournalIDKey is the type of the tag for unique operation IDs
// within background journal work.
CtxJournalIDKey CtxJournalTagKey = iota
)
// CtxJournalOpID is the display name for the unique operation
// enqueued journal ID tag.
const CtxJournalOpID = "JID"
// doBackgroundWorkLoop is the main function for the background
// goroutine. It spawns off a worker goroutine to call
// doBackgroundWork whenever there is work, and can be paused and
// resumed.
func (j *tlfJournal) doBackgroundWorkLoop(
bws TLFJournalBackgroundWorkStatus, retry backoff.BackOff) {
ctx := context.Background()
if j.bwDelegate != nil {
ctx = j.bwDelegate.GetBackgroundContext()
}
// Non-nil when a retry has been scheduled for the future.
var retryTimer *time.Timer
defer func() {
close(j.backgroundShutdownCh)
if j.bwDelegate != nil {
j.bwDelegate.OnShutdown(ctx)
}
if retryTimer != nil {
retryTimer.Stop()
}
}()
// Below we have a state machine with three states:
//
// 1) Idle, where we wait for new work or to be paused;
// 2) Busy, where we wait for the worker goroutine to
// finish, or to be paused;
// 3) Paused, where we wait to be resumed.
//
// We run this state machine until we are shutdown. Also, if
// we exit the busy state for any reason other than the worker
// goroutine finished, we stop the worker goroutine (via
// bwCancel below).
// errCh and bwCancel are non-nil only when we're in the busy
// state. errCh is the channel on which we receive the error
// from the worker goroutine, and bwCancel is the CancelFunc
// corresponding to the context passed to the worker
// goroutine.
var errCh <-chan error
var bwCancel context.CancelFunc
// Handle the case where we panic while in the busy state.
defer func() {
if bwCancel != nil {
bwCancel()
}
}()
for {
ctx := ctxWithRandomIDReplayable(ctx, CtxJournalIDKey, CtxJournalOpID,
j.log)
switch {
case bws == TLFJournalBackgroundWorkEnabled && errCh == nil:
// 1) Idle.
if j.bwDelegate != nil {
j.bwDelegate.OnNewState(ctx, bwIdle)
}
j.log.CDebugf(
ctx, "Waiting for the work signal for %s",
j.tlfID)
select {
case <-j.hasWorkCh:
j.log.CDebugf(ctx, "Got work signal for %s", j.tlfID)
if retryTimer != nil {
retryTimer.Stop()
retryTimer = nil
}
bwCtx, cancel := context.WithCancel(ctx)
errCh = j.doBackgroundWork(bwCtx)
bwCancel = cancel
case <-j.needPauseCh:
j.log.CDebugf(ctx,
"Got pause signal for %s", j.tlfID)
bws = TLFJournalBackgroundWorkPaused
case <-j.needShutdownCh:
j.log.CDebugf(ctx,
"Got shutdown signal for %s", j.tlfID)
return
}
case bws == TLFJournalBackgroundWorkEnabled && errCh != nil:
// 2) Busy.
if j.bwDelegate != nil {
j.bwDelegate.OnNewState(ctx, bwBusy)
}
j.log.CDebugf(ctx,
"Waiting for background work to be done for %s",
j.tlfID)
needShutdown := false
select {
case err := <-errCh:
if retryTimer != nil {
panic("Retry timer should be nil after work is done")
}
if err != nil {
j.log.CWarningf(ctx,
"Background work error for %s: %+v",
j.tlfID, err)
bTime := retry.NextBackOff()
if bTime != backoff.Stop {
j.log.CWarningf(ctx, "Retrying in %s", bTime)
retryTimer = time.AfterFunc(bTime, j.signalWork)
}
} else {
retry.Reset()
}
case <-j.needPauseCh:
j.log.CDebugf(ctx,
"Got pause signal for %s", j.tlfID)
bws = TLFJournalBackgroundWorkPaused
case <-j.needShutdownCh:
j.log.CDebugf(ctx,
"Got shutdown signal for %s", j.tlfID)
needShutdown = true
}
// Cancel the worker goroutine as we exit this
// state.
bwCancel()
bwCancel = nil
// Ensure the worker finishes after being canceled, so it
// doesn't pick up any new work. For example, if the
// worker doesn't check for cancellations before checking
// the journal for new work, it might process some journal
// entries before returning an error.
<-errCh
errCh = nil
if needShutdown {
return
}
case bws == TLFJournalBackgroundWorkPaused:
// 3) Paused
if j.bwDelegate != nil {
j.bwDelegate.OnNewState(ctx, bwPaused)
}
j.log.CDebugf(
ctx, "Waiting to resume background work for %s",
j.tlfID)
select {
case <-j.needResumeCh:
j.log.CDebugf(ctx,
"Got resume signal for %s", j.tlfID)
bws = TLFJournalBackgroundWorkEnabled
case <-j.needShutdownCh:
j.log.CDebugf(ctx,
"Got shutdown signal for %s", j.tlfID)
return
}
default:
j.log.CErrorf(
ctx, "Unknown TLFJournalBackgroundStatus %s",
bws)
return
}
}
}
// doBackgroundWork currently only does auto-flushing. It assumes that
// ctx is canceled when the background processing should stop.
//
// TODO: Handle garbage collection too.
func (j *tlfJournal) doBackgroundWork(ctx context.Context) <-chan error {
errCh := make(chan error, 1)
// TODO: Handle panics.
go func() {
defer j.wg.Done()
errCh <- j.flush(ctx)
close(errCh)
}()
return errCh
}
// We don't guarantee that background pause/resume requests will be
// processed in strict FIFO order. In particular, multiple pause
// requests are collapsed into one (also multiple resume requests), so
// it's possible that a pause-resume-pause sequence will be processed
// as pause-resume. But that's okay, since these are just for
// infrequent ad-hoc testing.
func (j *tlfJournal) pause(pauseType tlfJournalPauseType) {
j.pauseLock.Lock()
defer j.pauseLock.Unlock()
oldPauseType := j.pauseType
j.pauseType |= pauseType
if oldPauseType > 0 {
// No signal is needed since someone already called pause.
return
}
j.wg.Pause()
select {
case j.needPauseCh <- struct{}{}:
default:
}
}
func (j *tlfJournal) pauseBackgroundWork() {
j.pause(journalPauseCommand)
}
func (j *tlfJournal) resume(pauseType tlfJournalPauseType) {
j.pauseLock.Lock()
defer j.pauseLock.Unlock()
j.pauseType &= ^pauseType
if j.pauseType != 0 {
return
}
select {
case j.needResumeCh <- struct{}{}:
// Resume the wait group right away, so future callers will block
// even before the background goroutine picks up this signal.
j.wg.Resume()
default:
}
}
func (j *tlfJournal) resumeBackgroundWork() {
j.resume(journalPauseCommand)
}
func (j *tlfJournal) checkEnabledLocked() error {
if j.blockJournal == nil || j.mdJournal == nil {
return errors.WithStack(errTLFJournalShutdown{})
}
if j.disabled {
return errors.WithStack(errTLFJournalDisabled{})
}
return nil
}
func (j *tlfJournal) getJournalEnds(ctx context.Context) (
blockEnd journalOrdinal, mdEnd MetadataRevision, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return 0, MetadataRevisionUninitialized, err
}
blockEnd, err = j.blockJournal.end()
if err != nil {
return 0, 0, err
}
mdEnd, err = j.mdJournal.end()
if err != nil {
return 0, 0, err
}
return blockEnd, mdEnd, nil
}
func (j *tlfJournal) flush(ctx context.Context) (err error) {
j.flushLock.Lock()
defer j.flushLock.Unlock()
flushedBlockEntries := 0
flushedMDEntries := 0
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Flushed %d block entries and %d MD entries "+
"for %s, but got error %v",
flushedBlockEntries, flushedMDEntries,
j.tlfID, err)
}
j.journalLock.Lock()
j.lastFlushErr = err
j.journalLock.Unlock()
}()
// TODO: Avoid starving flushing MD ops if there are many
// block ops. See KBFS-1502.
for {
select {
case <-ctx.Done():
j.log.CDebugf(ctx, "Flush canceled: %+v", ctx.Err())
return nil
default:
}
isConflict, err := j.isOnConflictBranch()
if err != nil {
return err
}
if isConflict {
j.log.CDebugf(ctx, "Ignoring flush while on conflict branch")
// It's safe to send a pause signal here, because even if
// CR has already resolved the conflict and send the
// resume signal, we know the background work loop is
// still waiting for this flush() loop to finish before it
// processes either the pause or the resume channel.
j.pause(journalPauseConflict)
return nil
}
converted, err := j.convertMDsToBranchIfOverThreshold(ctx, true)
if err != nil {
return err
}
if converted {
return nil
}
blockEnd, mdEnd, err := j.getJournalEnds(ctx)
if err != nil {
return err
}
if blockEnd == 0 && mdEnd == MetadataRevisionUninitialized {
j.log.CDebugf(ctx, "Nothing else to flush")
break
}
j.log.CDebugf(ctx, "Flushing up to blockEnd=%d and mdEnd=%d",
blockEnd, mdEnd)
// Flush the block journal ops in parallel.
numFlushed, maxMDRevToFlush, converted, err :=
j.flushBlockEntries(ctx, blockEnd)
if err != nil {
return err
}
flushedBlockEntries += numFlushed
// If we ever switched branches while flushing block entries,
// we need to make sure `mdEnd` still reflects reality, since
// the number of md entries could have shrunk.
if converted {
_, mdEnd, err = j.getJournalEnds(ctx)
if err != nil {
return err
}
}
if numFlushed == 0 {
// There were no blocks to flush, so we can flush all of
// the remaining MDs.
maxMDRevToFlush = mdEnd
}
// TODO: Flush MDs in batch.
for {
flushed, err := j.flushOneMDOp(ctx, mdEnd, maxMDRevToFlush)
if err != nil {
return err
}
if !flushed {
break
}
flushedMDEntries++
}
}
j.log.CDebugf(ctx, "Flushed %d block entries and %d MD entries for %s",
flushedBlockEntries, flushedMDEntries, j.tlfID)
return nil
}
type errTLFJournalShutdown struct{}
func (e errTLFJournalShutdown) Error() string {
return "tlfJournal is shutdown"
}
type errTLFJournalDisabled struct{}
func (e errTLFJournalDisabled) Error() string {
return "tlfJournal is disabled"
}
type errTLFJournalNotEmpty struct{}
func (e errTLFJournalNotEmpty) Error() string {
return "tlfJournal is not empty"
}
func (j *tlfJournal) getNextBlockEntriesToFlush(
ctx context.Context, end journalOrdinal) (
entries blockEntriesToFlush, maxMDRevToFlush MetadataRevision, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return blockEntriesToFlush{}, MetadataRevisionUninitialized, err
}
return j.blockJournal.getNextEntriesToFlush(ctx, end,
maxJournalBlockFlushBatchSize)
}
func (j *tlfJournal) removeFlushedBlockEntries(ctx context.Context,
entries blockEntriesToFlush) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
storedBytesBefore := j.blockJournal.getStoredBytes()
// TODO: Check storedFiles also.
err := j.blockJournal.removeFlushedEntries(
ctx, entries, j.tlfID, j.config.Reporter())
if err != nil {
return err
}
storedBytesAfter := j.blockJournal.getStoredBytes()
// storedBytes shouldn't change since removedBytes is 0.
if storedBytesAfter != storedBytesBefore {
panic(fmt.Sprintf(
"storedBytes unexpectedly changed from %d to %d",
storedBytesBefore, storedBytesAfter))
}
return nil
}
func (j *tlfJournal) flushBlockEntries(
ctx context.Context, end journalOrdinal) (
numFlushed int, maxMDRevToFlush MetadataRevision,
converted bool, err error) {
entries, maxMDRevToFlush, err := j.getNextBlockEntriesToFlush(ctx, end)
if err != nil {
return 0, MetadataRevisionUninitialized, false, err
}
if entries.length() == 0 {
return 0, maxMDRevToFlush, false, nil
}
j.log.CDebugf(ctx, "Flushing %d blocks, up to rev %d",
len(entries.puts.blockStates), maxMDRevToFlush)
// Mark these blocks as flushing, and clear when done.
err = j.markFlushingBlockIDs(entries)
if err != nil {
return 0, MetadataRevisionUninitialized, false, err
}
cleared := false
defer func() {
if !cleared {
clearErr := j.clearFlushingBlockIDs(entries)
if err == nil {
err = clearErr
}
}
}()
// TODO: fill this in for logging/error purposes.
var tlfName CanonicalTlfName
eg, groupCtx := errgroup.WithContext(ctx)
convertCtx, convertCancel := context.WithCancel(groupCtx)
// Flush the blocks in a goroutine. Alongside make another
// goroutine that listens for MD puts and checks the size of the
// MD journal, and converts to a local squash branch if it gets
// too large. While the 2nd goroutine is waiting, it should exit
// immediately as soon as the 1st one finishes, but if it is
// already working on a conversion it should finish that work.
//
// If the 2nd goroutine makes a branch, foreground writes could
// trigger CR while blocks are still being flushed. This can't
// usually happen, because flushing is paused while CR is
// happening. flush() has to make sure to get the new MD journal
// end, and we need to make sure `maxMDRevToFlush` is still valid.
eg.Go(func() error {
defer convertCancel()
return flushBlockEntries(groupCtx, j.log, j.delegateBlockServer,
j.config.BlockCache(), j.config.Reporter(),
j.tlfID, tlfName, entries)
})
converted = false
eg.Go(func() error {
// We might need to run multiple conversions during a single
// batch of block flushes, so loop until the batch finishes.
for {
select {
case <-j.needBranchCheckCh:
// Don't signal a pause when doing this conversion in
// a separate goroutine, because it ends up canceling
// the flush context, which means all the block puts
// would get canceled and we don't want that.
// Instead, let the current iteration of the flush
// finish, and then signal at the top of the next
// iteration.
convertedNow, err :=
j.convertMDsToBranchIfOverThreshold(groupCtx, false)
if err != nil {
return err
}
converted = converted || convertedNow
case <-convertCtx.Done():
return nil // Canceled because the block puts finished
}
}
})
err = eg.Wait()
if err != nil {
return 0, MetadataRevisionUninitialized, false, err
}
err = j.clearFlushingBlockIDs(entries)
cleared = true
if err != nil {
return 0, MetadataRevisionUninitialized, false, err
}
err = j.removeFlushedBlockEntries(ctx, entries)
if err != nil {
return 0, MetadataRevisionUninitialized, false, err
}
// TODO: If both the block and MD journals are empty, nuke the
// entire TLF journal directory.
// If a conversion happened, the original `maxMDRevToFlush` only
// applies for sure if its mdRevMarker entry was already for a
// local squash. TODO: conversion might not have actually
// happened yet, in which case it's still ok to flush
// maxMDRevToFlush.
if converted && maxMDRevToFlush != MetadataRevisionUninitialized &&
!entries.revIsLocalSquash(maxMDRevToFlush) {
maxMDRevToFlush = MetadataRevisionUninitialized
}
return entries.length(), maxMDRevToFlush, converted, nil
}
func (j *tlfJournal) getNextMDEntryToFlush(ctx context.Context,
end MetadataRevision) (MdID, *RootMetadataSigned, ExtraMetadata, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return MdID{}, nil, nil, err
}
return j.mdJournal.getNextEntryToFlush(ctx, end, j.config.Crypto())
}
func (j *tlfJournal) convertMDsToBranchLocked(
ctx context.Context, bid BranchID, doSignal bool) error {
err := j.mdJournal.convertToBranch(
ctx, bid, j.config.Crypto(), j.config.Codec(), j.tlfID,
j.config.MDCache())
if err != nil {
return err
}
j.unsquashedBytes = 0
if j.onBranchChange != nil {
j.onBranchChange.onTLFBranchChange(j.tlfID, bid)
}
// Pause while on a conflict branch.
if doSignal {
j.pause(journalPauseConflict)
}
return nil
}
func (j *tlfJournal) convertMDsToBranch(ctx context.Context) error {
bid, err := j.config.Crypto().MakeRandomBranchID()
if err != nil {
return err
}
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
return j.convertMDsToBranchLocked(ctx, bid, true)
}
func (j *tlfJournal) convertMDsToBranchIfOverThreshold(ctx context.Context,
doSignal bool) (
bool, error) {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return false, err
}
if j.mdJournal.getBranchID() != NullBranchID {
// Already on a conflict branch, so nothing to do.
return false, nil
}
atLeastOneRev, err := j.mdJournal.atLeastNNonLocalSquashes(1)
if err != nil {
return false, err
}
if !atLeastOneRev {
// If there isn't at least one non-local-squash revision, we can
// bail early since there's definitely nothing to do.
return false, nil
}
squashByRev, err :=
j.mdJournal.atLeastNNonLocalSquashes(ForcedBranchSquashRevThreshold)
if err != nil {
return false, err
}
// Note that j.unsquashedBytes is just an estimate -- it doesn't
// account for blocks that will be eliminated as part of the
// squash, and it doesn't count unsquashed bytes that were written
// to disk before this tlfJournal instance started. But it should
// be close enough to work for the purposes of this optimization.
squashByBytes := j.unsquashedBytes >= j.forcedSquashByBytes
if !squashByRev && !squashByBytes {
// Not over either threshold yet.
return false, nil
}
j.log.CDebugf(ctx, "Converting journal with %d unsquashed bytes "+
"to a branch", j.unsquashedBytes)
// If we're squashing by bytes, and there's exactly one
// non-local-squash revision, just directly mark it as squashed to
// avoid the CR overhead.
if !squashByRev {
moreThanOneRev, err := j.mdJournal.atLeastNNonLocalSquashes(2)
if err != nil {
return false, err
}
if !moreThanOneRev {
j.log.CDebugf(ctx, "Avoiding CR when there is only one "+
"revision that needs squashing; marking as local squash")
err = j.mdJournal.markLatestAsLocalSquash(ctx)
if err != nil {
return false, err
}
err = j.blockJournal.markLatestRevMarkerAsLocalSquash()
if err != nil {
return false, err
}
j.unsquashedBytes = 0
return true, nil
}
}
err = j.convertMDsToBranchLocked(ctx, PendingLocalSquashBranchID, doSignal)
if err != nil {
return false, err
}
return true, nil
}
func (j *tlfJournal) getMDFlushRange() (
blockJournal *blockJournal, length int, earliest, latest journalOrdinal,
err error) {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return nil, 0, 0, 0, err
}
length, earliest, latest, err = j.blockJournal.getDeferredGCRange()
if err != nil {
return nil, 0, 0, 0, err
}
return j.blockJournal, length, earliest, latest, nil
}
func (j *tlfJournal) doOnMDFlush(ctx context.Context,
rmds *RootMetadataSigned) error {
if j.onMDFlush != nil {
j.onMDFlush.onMDFlush(rmds.MD.TlfID(), rmds.MD.BID(),
rmds.MD.RevisionNumber())
}
blockJournal, length, earliest, latest, err := j.getMDFlushRange()
if err != nil {
return err
}
if length == 0 {
return nil
}
// onMDFlush() only needs to be called under the flushLock, not
// the journalLock, as it doesn't touch the actual journal, only
// the deferred GC journal.
removedBytes, removedFiles, err := blockJournal.doGC(
ctx, earliest, latest)
if err != nil {
return err
}
j.DiskLimiter.onBlocksDelete(ctx, removedBytes, removedFiles)
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
clearedJournal, aggregateInfo, err :=
j.blockJournal.clearDeferredGCRange(
ctx, removedBytes, removedFiles, earliest, latest)
if err != nil {
return err
}
if clearedJournal {
equal, err := kbfscodec.Equal(
j.config.Codec(), aggregateInfo, blockAggregateInfo{})
if err != nil {
return err
}
if !equal {
j.log.CWarningf(ctx,
"Cleared block journal for %s, but still has aggregate info %+v",
j.tlfID, aggregateInfo)
// TODO: Consider trying to adjust the disk
// limiter state to compensate for the
// leftover bytes/files here. Ideally, the
// disk limiter would keep track of per-TLF
// state, so we could just call
// j.diskLimiter.onJournalClear(tlfID) to have
// it clear its state for this TLF.
}
}
return nil
}
func (j *tlfJournal) removeFlushedMDEntry(ctx context.Context,
mdID MdID, rmds *RootMetadataSigned) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
if err := j.mdJournal.removeFlushedEntry(ctx, mdID, rmds); err != nil {
return err
}
j.unflushedPaths.removeFromCache(rmds.MD.RevisionNumber())
return nil
}
func (j *tlfJournal) flushOneMDOp(
ctx context.Context, end MetadataRevision,
maxMDRevToFlush MetadataRevision) (flushed bool, err error) {
if maxMDRevToFlush == MetadataRevisionUninitialized {
// Short-cut `getNextMDEntryToFlush`, which would otherwise read
// an MD from disk and sign it unnecessarily.
return false, nil
}
j.log.CDebugf(ctx, "Flushing one MD to server")
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx, "Flush failed with %v", err)
}
}()
mdServer := j.config.MDServer()
// TODO: Do we need `end` at all, or can we just pass
// `maxMDRevToFlush+1` here? The only argument for `end` is that
// it might help if the block and MD journals are out of sync.
mdID, rmds, extra, err := j.getNextMDEntryToFlush(ctx, end)
if err != nil {
return false, err
}
if mdID == (MdID{}) {
return false, nil
}
// Only flush MDs for which the blocks have been fully flushed.
if rmds.MD.RevisionNumber() > maxMDRevToFlush {
j.log.CDebugf(ctx, "Haven't flushed all the blocks for TLF=%s "+
"with id=%s, rev=%s, bid=%s yet (maxMDRevToFlush=%d)",
rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID(),
maxMDRevToFlush)
return false, nil
}
j.log.CDebugf(ctx, "Flushing MD for TLF=%s with id=%s, rev=%s, bid=%s",
rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID())
pushErr := mdServer.Put(ctx, rmds, extra)
if isRevisionConflict(pushErr) {
headMdID, err := getMdID(ctx, mdServer, j.config.Crypto(),
rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.MergedStatus(),
rmds.MD.RevisionNumber())
if err != nil {
j.log.CWarningf(ctx,
"getMdID failed for TLF %s, BID %s, and revision %d: %v",
rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.RevisionNumber(), err)
} else if headMdID == mdID {
if headMdID == (MdID{}) {
panic("nil earliestID and revision conflict error returned by pushEarliestToServer")
}
// We must have already flushed this MD, so continue.
pushErr = nil
} else if rmds.MD.MergedStatus() == Merged {
j.log.CDebugf(ctx, "Conflict detected %v", pushErr)
// Convert MDs to a branch and return -- the journal
// pauses until the resolution is complete.
err = j.convertMDsToBranch(ctx)
if err != nil {
return false, err
}
return false, nil
}
}
if pushErr != nil {
return false, pushErr
}
err = j.doOnMDFlush(ctx, rmds)
if err != nil {
return false, err
}
err = j.removeFlushedMDEntry(ctx, mdID, rmds)
if err != nil {
return false, err
}
return true, nil
}
func (j *tlfJournal) getJournalEntryCounts() (
blockEntryCount, mdEntryCount uint64, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return 0, 0, err
}
blockEntryCount = j.blockJournal.length()
mdEntryCount = j.mdJournal.length()
return blockEntryCount, mdEntryCount, nil
}
func (j *tlfJournal) isOnConflictBranch() (bool, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return false, err
}
return j.mdJournal.getBranchID() != NullBranchID, nil
}
func (j *tlfJournal) getJournalStatusLocked() (TLFJournalStatus, error) {
if err := j.checkEnabledLocked(); err != nil {
return TLFJournalStatus{}, err
}
earliestRevision, err := j.mdJournal.readEarliestRevision()
if err != nil {
return TLFJournalStatus{}, err
}
latestRevision, err := j.mdJournal.readLatestRevision()
if err != nil {
return TLFJournalStatus{}, err
}
blockEntryCount := j.blockJournal.length()
lastFlushErr := ""
if j.lastFlushErr != nil {
lastFlushErr = j.lastFlushErr.Error()
}
storedBytes := j.blockJournal.getStoredBytes()
storedFiles := j.blockJournal.getStoredFiles()
unflushedBytes := j.blockJournal.getUnflushedBytes()
return TLFJournalStatus{
Dir: j.dir,
BranchID: j.mdJournal.getBranchID().String(),
RevisionStart: earliestRevision,
RevisionEnd: latestRevision,
BlockOpCount: blockEntryCount,
StoredBytes: storedBytes,
StoredFiles: storedFiles,
UnflushedBytes: unflushedBytes,
LastFlushErr: lastFlushErr,
}, nil
}
func (j *tlfJournal) getJournalStatus() (TLFJournalStatus, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
return j.getJournalStatusLocked()
}
// getJournalStatusWithRange returns the journal status, and either a
// non-nil unflushedPathsMap is returned, which can be used directly
// to fill in UnflushedPaths, or a list of ImmutableBareRootMetadatas
// is returned (along with a bool indicating whether that list is
// complete), which can be used to build an unflushedPathsMap. If
// complete is true, then the list may be empty; otherwise, it is
// guaranteed to not be empty.
func (j *tlfJournal) getJournalStatusWithRange() (
jStatus TLFJournalStatus, unflushedPaths unflushedPathsMap,
ibrmds []ImmutableBareRootMetadata, complete bool, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
jStatus, err = j.getJournalStatusLocked()
if err != nil {
return TLFJournalStatus{}, nil, nil, false, err
}
unflushedPaths = j.unflushedPaths.getUnflushedPaths()
if unflushedPaths != nil {
return jStatus, unflushedPaths, nil, true, nil
}
if jStatus.RevisionEnd == MetadataRevisionUninitialized {
return jStatus, nil, nil, true, nil
}
complete = true
stop := jStatus.RevisionEnd
if stop > jStatus.RevisionStart+1000 {
stop = jStatus.RevisionStart + 1000
complete = false
}
// It would be nice to avoid getting this range if we are not
// the initializer, but at this point we don't know if we'll
// need to initialize or not.
ibrmds, err = j.mdJournal.getRange(j.mdJournal.branchID,
jStatus.RevisionStart, stop)
if err != nil {
return TLFJournalStatus{}, nil, nil, false, err
}
return jStatus, nil, ibrmds, complete, nil
}
// getUnflushedPathMDInfos converts the given list of bare root
// metadatas into unflushedPathMDInfo objects. The caller must NOT
// hold `j.journalLock`, because blocks from the journal may need to
// be read as part of the decryption.
func (j *tlfJournal) getUnflushedPathMDInfos(ctx context.Context,
ibrmds []ImmutableBareRootMetadata) ([]unflushedPathMDInfo, error) {
if len(ibrmds) == 0 {
return nil, nil
}
ibrmdBareHandle, err := ibrmds[0].MakeBareTlfHandleWithExtra()
if err != nil {
return nil, err
}
handle, err := MakeTlfHandle(
ctx, ibrmdBareHandle, j.config.usernameGetter())
if err != nil {
return nil, err
}
mdInfos := make([]unflushedPathMDInfo, 0, len(ibrmds))
for _, ibrmd := range ibrmds {
// TODO: Avoid having to do this type assertion and
// convert to RootMetadata.
brmd, ok := ibrmd.BareRootMetadata.(MutableBareRootMetadata)
if !ok {
return nil, MutableBareRootMetadataNoImplError{}
}
rmd := makeRootMetadata(brmd, ibrmd.extra, handle)
// Assume, since journal is running, that we're in default mode.
mode := InitDefault
pmd, err := decryptMDPrivateData(
ctx, j.config.Codec(), j.config.Crypto(),
j.config.BlockCache(), j.config.BlockOps(),
j.config.mdDecryptionKeyGetter(), mode, j.uid,
rmd.GetSerializedPrivateMetadata(), rmd, rmd, j.log)
if err != nil {
return nil, err
}
mdInfo := unflushedPathMDInfo{
revision: ibrmd.RevisionNumber(),
kmd: rmd,
pmd: pmd,
localTimestamp: ibrmd.localTimestamp,
}
mdInfos = append(mdInfos, mdInfo)
}
return mdInfos, nil
}
func (j *tlfJournal) getJournalStatusWithPaths(ctx context.Context,
cpp chainsPathPopulator) (jStatus TLFJournalStatus, err error) {
// This loop is limited only by the lifetime of `ctx`.
var unflushedPaths unflushedPathsMap
var complete bool
for {
var ibrmds []ImmutableBareRootMetadata
jStatus, unflushedPaths, ibrmds, complete, err =
j.getJournalStatusWithRange()
if err != nil {
return TLFJournalStatus{}, err
}
if unflushedPaths != nil {
break
}
// We need to make or initialize the unflushed paths.
if !complete {
// Figure out the paths for the truncated MD range,
// but don't cache it.
unflushedPaths = make(unflushedPathsMap)
j.log.CDebugf(ctx, "Making incomplete unflushed path cache")
mdInfos, err := j.getUnflushedPathMDInfos(ctx, ibrmds)
if err != nil {
return TLFJournalStatus{}, err
}
err = addUnflushedPaths(ctx, j.uid, j.key,
j.config.Codec(), j.log, mdInfos, cpp,
unflushedPaths)
if err != nil {
return TLFJournalStatus{}, err
}
break
}
// We need to init it ourselves, or wait for someone else
// to do it.
doInit, err := j.unflushedPaths.startInitializeOrWait(ctx)
if err != nil {
return TLFJournalStatus{}, err
}
if doInit {
initSuccess := false
defer func() {
if !initSuccess || err != nil {
j.unflushedPaths.abortInitialization()
}
}()
mdInfos, err := j.getUnflushedPathMDInfos(ctx, ibrmds)
if err != nil {
return TLFJournalStatus{}, err
}
unflushedPaths, initSuccess, err = j.unflushedPaths.initialize(
ctx, j.uid, j.key, j.config.Codec(),
j.log, cpp, mdInfos)
if err != nil {
return TLFJournalStatus{}, err
}
// All done!
break
}
j.log.CDebugf(ctx, "Waited for unflushed paths initialization, "+
"trying again to get the status")
}
pathsSeen := make(map[string]bool)
for _, revPaths := range unflushedPaths {
for path := range revPaths {
if !pathsSeen[path] {
jStatus.UnflushedPaths = append(jStatus.UnflushedPaths, path)
pathsSeen[path] = true
}
}
}
if !complete {
jStatus.UnflushedPaths =
append(jStatus.UnflushedPaths, incompleteUnflushedPathsMarker)
}
return jStatus, nil
}
func (j *tlfJournal) getByteCounts() (
storedBytes, storedFiles, unflushedBytes int64, err error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return 0, 0, 0, err
}
return j.blockJournal.getStoredBytes(), j.blockJournal.getStoredFiles(),
j.blockJournal.getUnflushedBytes(), nil
}
func (j *tlfJournal) shutdown(ctx context.Context) {
select {
case j.needShutdownCh <- struct{}{}:
default:
}
<-j.backgroundShutdownCh
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
// Already shutdown.
return
}
// Even if we shut down the journal, its blocks still take up
// space, but we don't want to double-count them if we start
// up this journal again, so we need to adjust them here.
//
// TODO: If we ever expect to shut down non-empty journals any
// time other than during shutdown, we should still count
// shut-down journals against the disk limit.
storedBytes := j.blockJournal.getStoredBytes()
storedFiles := j.blockJournal.getStoredFiles()
j.DiskLimiter.onJournalDisable(ctx, storedBytes, storedFiles)
// Make further accesses error out.
j.blockJournal = nil
j.mdJournal = nil
}
// disable prevents new operations from hitting the journal. Will
// fail unless the journal is completely empty.
func (j *tlfJournal) disable() (wasEnabled bool, err error) {
j.journalLock.Lock()
defer j.journalLock.Unlock()
err = j.checkEnabledLocked()
switch errors.Cause(err).(type) {
case nil:
// Continue.
break
case errTLFJournalDisabled:
// Already disabled.
return false, nil
default:
return false, err
}
blockEntryCount := j.blockJournal.length()
mdEntryCount := j.mdJournal.length()
// You can only disable an empty journal.
if blockEntryCount > 0 || mdEntryCount > 0 {
return false, errors.WithStack(errTLFJournalNotEmpty{})
}
j.disabled = true
return true, nil
}
func (j *tlfJournal) enable() error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
err := j.checkEnabledLocked()
switch errors.Cause(err).(type) {
case nil:
// Already enabled.
return nil
case errTLFJournalDisabled:
// Continue.
break
default:
return err
}
j.disabled = false
return nil
}
// All the functions below just do the equivalent blockJournal or
// mdJournal function under j.journalLock.
// getBlockData doesn't take a block context param, unlike the remote
// block server, since we still want to serve blocks even if all local
// references have been deleted (for example, a block that's been
// flushed but is still being and served on disk until the next
// successful MD flush). This is safe because the journal doesn't
// support removing references for anything other than a flush (see
// the comment in tlfJournal.removeBlockReferences).
func (j *tlfJournal) getBlockData(id kbfsblock.ID) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return j.blockJournal.getData(id)
}
func (j *tlfJournal) getBlockSize(id kbfsblock.ID) (uint32, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return 0, err
}
size, err := j.blockJournal.getDataSize(id)
if err != nil {
return 0, err
}
// Block sizes are restricted, but `size` is an int64 because
// that's what the OS gives us. Convert it to a uint32. TODO:
// check this is safe?
return uint32(size), nil
}
// ErrDiskLimitTimeout is returned when putBlockData exceeds
// diskLimitTimeout when trying to acquire bytes to put.
type ErrDiskLimitTimeout struct {
timeout time.Duration
requestedBytes int64
requestedFiles int64
availableBytes int64
availableFiles int64
err error
}
func (e ErrDiskLimitTimeout) Error() string {
return fmt.Sprintf("Disk limit timeout of %s reached; requested %d bytes and %d files, %d bytes and %d files available: %+v",
e.timeout, e.requestedBytes, e.requestedFiles,
e.availableBytes, e.availableFiles, e.err)
}
func (j *tlfJournal) putBlockData(
ctx context.Context, id kbfsblock.ID, blockCtx kbfsblock.Context, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) {
// Since Acquire can block, it should happen outside of the
// journal lock.
timeout := j.config.diskLimitTimeout()
acquireCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
bufLen := int64(len(buf))
availableBytes, availableFiles, err := j.DiskLimiter.beforeBlockPut(
acquireCtx, bufLen, filesPerBlockMax)
switch errors.Cause(err) {
case nil:
// Continue.
case context.DeadlineExceeded:
return errors.WithStack(ErrDiskLimitTimeout{
timeout, bufLen, filesPerBlockMax,
availableBytes, availableFiles, err,
})
default:
return err
}
var putData bool
defer func() {
j.DiskLimiter.afterBlockPut(
ctx, bufLen, filesPerBlockMax, putData)
}()
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
storedBytesBefore := j.blockJournal.getStoredBytes()
putData, err = j.blockJournal.putData(
ctx, id, blockCtx, buf, serverHalf)
if err != nil {
return err
}
storedBytesAfter := j.blockJournal.getStoredBytes()
if putData && storedBytesAfter != (storedBytesBefore+bufLen) {
panic(fmt.Sprintf(
"storedBytes changed from %d to %d, but %d bytes of data was put",
storedBytesBefore, storedBytesAfter, bufLen))
} else if !putData && storedBytesBefore != storedBytesAfter {
panic(fmt.Sprintf(
"storedBytes changed from %d to %d, but data was not put",
storedBytesBefore, storedBytesAfter))
}
if putData && j.mdJournal.branchID == NullBranchID {
j.unsquashedBytes += uint64(bufLen)
}
j.config.Reporter().NotifySyncStatus(ctx, &keybase1.FSPathSyncStatus{
PublicTopLevelFolder: j.tlfID.IsPublic(),
// Path: TODO,
// TODO: should this be the complete total for the file/directory,
// rather than the diff?
SyncingBytes: bufLen,
// SyncingOps: TODO,
})
j.signalWork()
return nil
}
func (j *tlfJournal) addBlockReference(
ctx context.Context, id kbfsblock.ID, context kbfsblock.Context) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
err := j.blockJournal.addReference(ctx, id, context)
if err != nil {
return err
}
j.signalWork()
return nil
}
func (j *tlfJournal) removeBlockReferences(
ctx context.Context, contexts kbfsblock.ContextMap) (
liveCounts map[kbfsblock.ID]int, err error) {
// Currently the block journal will still serve block data even if
// all journal references to a block have been removed (i.e.,
// because they have all been flushed to the remote server). If
// we ever need to support the `BlockServer.RemoveReferences` call
// in the journal, we might need to change the journal so that it
// marks blocks as flushed-but-still-readable, so that we can
// distinguish them from blocks that has had all its references
// removed and shouldn't be served anymore. For now, just fail
// this call to make sure no uses of it creep in.
return nil, errors.Errorf(
"Removing block references is currently unsupported in the journal")
}
func (j *tlfJournal) archiveBlockReferences(
ctx context.Context, contexts kbfsblock.ContextMap) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
err := j.blockJournal.archiveReferences(ctx, contexts)
if err != nil {
return err
}
j.signalWork()
return nil
}
func (j *tlfJournal) isBlockUnflushed(id kbfsblock.ID) (bool, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return false, err
}
// Conservatively assume that a block that's on its way to the
// server _has_ been flushed, so that the caller will try to clean
// it up if it's not needed anymore.
if j.flushingBlocks[id] {
return true, nil
}
return j.blockJournal.isUnflushed(id)
}
func (j *tlfJournal) markFlushingBlockIDs(entries blockEntriesToFlush) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
entries.markFlushingBlockIDs(j.flushingBlocks)
return nil
}
func (j *tlfJournal) clearFlushingBlockIDs(entries blockEntriesToFlush) error {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
entries.clearFlushingBlockIDs(j.flushingBlocks)
return nil
}
func (j *tlfJournal) getBranchID() (BranchID, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return NullBranchID, err
}
return j.mdJournal.branchID, nil
}
func (j *tlfJournal) getMDHead(
ctx context.Context, bid BranchID) (ImmutableBareRootMetadata, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return ImmutableBareRootMetadata{}, err
}
return j.mdJournal.getHead(bid)
}
func (j *tlfJournal) getMDRange(
ctx context.Context, bid BranchID, start, stop MetadataRevision) (
[]ImmutableBareRootMetadata, error) {
j.journalLock.RLock()
defer j.journalLock.RUnlock()
if err := j.checkEnabledLocked(); err != nil {
return nil, err
}
return j.mdJournal.getRange(bid, start, stop)
}
func (j *tlfJournal) doPutMD(ctx context.Context, rmd *RootMetadata,
mdInfo unflushedPathMDInfo,
perRevMap unflushedPathsPerRevMap) (
mdID MdID, retryPut bool, err error) {
// Now take the lock and put the MD, merging in the unflushed
// paths while under the lock.
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return MdID{}, false, err
}
if !j.unflushedPaths.appendToCache(mdInfo, perRevMap) {
return MdID{}, true, nil
}
// TODO: remove the revision from the cache on any errors below?
// Tricky when the append is only queued.
mdID, err = j.mdJournal.put(ctx, j.config.Crypto(),
j.config.encryptionKeyGetter(), j.config.BlockSplitter(),
rmd, false)
if err != nil {
return MdID{}, false, err
}
err = j.blockJournal.markMDRevision(ctx, rmd.Revision(), false)
if err != nil {
return MdID{}, false, err
}
j.signalWork()
select {
case j.needBranchCheckCh <- struct{}{}:
default:
}
return mdID, false, nil
}
// prepAndAddRMDWithRetry prepare the paths without holding the lock,
// as `f` might need to take the lock. This is a no-op if the
// unflushed path cache is uninitialized. TODO: avoid doing this if
// we can somehow be sure the cache won't be initialized by the time
// we finish this operation.
func (j *tlfJournal) prepAndAddRMDWithRetry(ctx context.Context,
rmd *RootMetadata,
f func(unflushedPathMDInfo, unflushedPathsPerRevMap) (bool, error)) error {
mdInfo := unflushedPathMDInfo{
revision: rmd.Revision(),
kmd: rmd,
pmd: *rmd.Data(),
// TODO: Plumb through clock? Though the timestamp doesn't
// matter for the unflushed path cache.
localTimestamp: time.Now(),
}
perRevMap, err := j.unflushedPaths.prepUnflushedPaths(
ctx, j.uid, j.key, j.config.Codec(), j.log, mdInfo)
if err != nil {
return err
}
retry, err := f(mdInfo, perRevMap)
if err != nil {
return err
}
if retry {
// The cache was initialized after the last time we tried to
// prepare the unflushed paths.
perRevMap, err = j.unflushedPaths.prepUnflushedPaths(
ctx, j.uid, j.key, j.config.Codec(), j.log, mdInfo)
if err != nil {
return err
}
retry, err := f(mdInfo, perRevMap)
if err != nil {
return err
}
if retry {
return errors.New("Unexpectedly asked to retry " +
"MD put more than once")
}
}
return nil
}
func (j *tlfJournal) putMD(ctx context.Context, rmd *RootMetadata) (
MdID, error) {
var mdID MdID
err := j.prepAndAddRMDWithRetry(ctx, rmd,
func(mdInfo unflushedPathMDInfo, perRevMap unflushedPathsPerRevMap) (
retry bool, err error) {
mdID, retry, err = j.doPutMD(ctx, rmd, mdInfo, perRevMap)
return retry, err
})
if err != nil {
return MdID{}, err
}
return mdID, nil
}
func (j *tlfJournal) clearMDs(ctx context.Context, bid BranchID) error {
if j.onBranchChange != nil {
j.onBranchChange.onTLFBranchChange(j.tlfID, NullBranchID)
}
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return err
}
err := j.mdJournal.clear(ctx, bid)
if err != nil {
return err
}
j.resume(journalPauseConflict)
return nil
}
func (j *tlfJournal) doResolveBranch(ctx context.Context,
bid BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata,
extra ExtraMetadata, mdInfo unflushedPathMDInfo,
perRevMap unflushedPathsPerRevMap) (mdID MdID, retry bool, err error) {
j.journalLock.Lock()
defer j.journalLock.Unlock()
if err := j.checkEnabledLocked(); err != nil {
return MdID{}, false, err
}
// The set of unflushed paths could change as part of the
// resolution, and the revision numbers definitely change.
isPendingLocalSquash := bid == PendingLocalSquashBranchID
if !j.unflushedPaths.reinitializeWithResolution(
mdInfo, perRevMap, isPendingLocalSquash) {
return MdID{}, true, nil
}
// First write the resolution to a new branch, and swap it with
// the existing branch, then clear the existing branch.
mdID, err = j.mdJournal.resolveAndClear(
ctx, j.config.Crypto(), j.config.encryptionKeyGetter(),
j.config.BlockSplitter(), j.config.MDCache(), bid, rmd)
if err != nil {
return MdID{}, false, err
}
// Then go through and mark blocks and md rev markers for ignoring.
err = j.blockJournal.ignoreBlocksAndMDRevMarkers(ctx, blocksToDelete,
rmd.Revision())
if err != nil {
return MdID{}, false, err
}
// Finally, append a new, non-ignored md rev marker for the new revision.
err = j.blockJournal.markMDRevision(
ctx, rmd.Revision(), isPendingLocalSquash)
if err != nil {
return MdID{}, false, err
}
j.resume(journalPauseConflict)
j.signalWork()
// TODO: kick off a background goroutine that deletes ignored
// block data files before the flush gets to them.
return mdID, false, nil
}
func (j *tlfJournal) resolveBranch(ctx context.Context,
bid BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata,
extra ExtraMetadata) (MdID, error) {
var mdID MdID
err := j.prepAndAddRMDWithRetry(ctx, rmd,
func(mdInfo unflushedPathMDInfo, perRevMap unflushedPathsPerRevMap) (
retry bool, err error) {
mdID, retry, err = j.doResolveBranch(
ctx, bid, blocksToDelete, rmd, extra, mdInfo, perRevMap)
return retry, err
})
if err != nil {
return MdID{}, err
}
return mdID, nil
}
func (j *tlfJournal) wait(ctx context.Context) error {
workLeft, err := j.wg.WaitUnlessPaused(ctx)
if err != nil {
return err
}
if workLeft {
j.log.CDebugf(ctx, "Wait completed with work left, "+
"due to paused journal")
}
return nil
}
| 1 | 16,266 | You didn't mean to export this, right? | keybase-kbfs | go |
@@ -3,7 +3,7 @@
# Purpose:
# sns-ruby-example-show-topics.rb demonstrates how to list Amazon Simple Notification Services (SNS) topics using
-# the AWS SDK for JavaScript (v3).
+# the AWS SDK for Ruby.
# Inputs:
# - REGION | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Purpose:
# sns-ruby-example-show-topics.rb demonstrates how to list Amazon Simple Notification Services (SNS) topics using
# the AWS SDK for JavaScript (v3).
# Inputs:
# - REGION
# snippet-start:[sns.Ruby.showTopics]
require 'aws-sdk-sns' # v2: require 'aws-sdk'
def list_topics?(sns_client)
sns_client.topics.each do |topic|
puts topic.arn
rescue StandardError => e
puts "Error while listing the topics: #{e.message}"
end
end
def run_me
region = 'eu-west-1'
sns_client = Aws::SNS::Resource.new(region: region)
puts "Listing the topics."
if list_topics?(sns_client)
else
puts 'The bucket was not created. Stopping program.'
exit 1
end
end
run_me if $PROGRAM_NAME == __FILE__
# snippet-end:[sns.Ruby.showTopics]
| 1 | 20,569 | Simple Notification **Service** (singular) | awsdocs-aws-doc-sdk-examples | rb |
@@ -313,6 +313,7 @@ type client struct {
replayMutex sync.RWMutex
nodeConfig *config.NodeConfig
networkConfig *config.NetworkConfig
+ egressConfig *config.EgressConfig
gatewayOFPort uint32
// ovsDatapathType is the type of the datapath used by the bridge.
ovsDatapathType ovsconfig.OVSDatapathType | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openflow
import (
"encoding/binary"
"fmt"
"math"
"net"
"sort"
"sync"
"time"
"antrea.io/libOpenflow/protocol"
"antrea.io/ofnet/ofctrl"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"antrea.io/antrea/pkg/agent/config"
"antrea.io/antrea/pkg/agent/metrics"
"antrea.io/antrea/pkg/agent/openflow/cookie"
"antrea.io/antrea/pkg/agent/types"
binding "antrea.io/antrea/pkg/ovs/openflow"
"antrea.io/antrea/pkg/ovs/ovsconfig"
"antrea.io/antrea/pkg/ovs/ovsctl"
"antrea.io/antrea/pkg/util/runtime"
"antrea.io/antrea/third_party/proxy"
)
var (
ClassifierTable = binding.NewOFTable(0, "Classification")
UplinkTable = binding.NewOFTable(5, "Uplink")
SpoofGuardTable = binding.NewOFTable(10, "SpoofGuard")
arpResponderTable = binding.NewOFTable(20, "ARPResponder")
IPv6Table = binding.NewOFTable(21, "IPv6")
ServiceHairpinTable = binding.NewOFTable(23, "ServiceHairpin")
ServiceConntrackTable = binding.NewOFTable(24, "ServiceConntrack") // serviceConntrackTable use a new ct_zone to transform SNAT'd connections.
ConntrackTable = binding.NewOFTable(30, "ConntrackZone")
ConntrackStateTable = binding.NewOFTable(31, "ConntrackState")
ServiceClassifierTable = binding.NewOFTable(35, "ServiceClassifier")
SessionAffinityTable = binding.NewOFTable(40, "SessionAffinity")
DNATTable = binding.NewOFTable(40, "DNAT(SessionAffinity)")
ServiceLBTable = binding.NewOFTable(41, "ServiceLB")
EndpointDNATTable = binding.NewOFTable(42, "EndpointDNAT")
AntreaPolicyEgressRuleTable = binding.NewOFTable(45, "AntreaPolicyEgressRule")
DefaultTierEgressRuleTable = binding.NewOFTable(49, "DefaultTierEgressRule")
EgressRuleTable = binding.NewOFTable(50, "EgressRule")
EgressDefaultTable = binding.NewOFTable(60, "EgressDefaultRule")
EgressMetricTable = binding.NewOFTable(61, "EgressMetric")
L3ForwardingTable = binding.NewOFTable(70, "L3Forwarding")
SNATTable = binding.NewOFTable(71, "SNAT")
L3DecTTLTable = binding.NewOFTable(72, "IPTTLDec")
L2ForwardingCalcTable = binding.NewOFTable(80, "L2Forwarding")
AntreaPolicyIngressRuleTable = binding.NewOFTable(85, "AntreaPolicyIngressRule")
DefaultTierIngressRuleTable = binding.NewOFTable(89, "DefaultTierIngressRule")
IngressRuleTable = binding.NewOFTable(90, "IngressRule")
IngressDefaultTable = binding.NewOFTable(100, "IngressDefaultRule")
IngressMetricTable = binding.NewOFTable(101, "IngressMetric")
ConntrackCommitTable = binding.NewOFTable(105, "ConntrackCommit")
ServiceConntrackCommitTable = binding.NewOFTable(106, "ServiceConntrackCommit")
HairpinSNATTable = binding.NewOFTable(108, "HairpinSNAT")
L2ForwardingOutTable = binding.NewOFTable(110, "Output")
// Flow priority level
priorityHigh = uint16(210)
priorityNormal = uint16(200)
priorityLow = uint16(190)
priorityMiss = uint16(0)
priorityTopAntreaPolicy = uint16(64990)
priorityDNSIntercept = uint16(64991)
priorityDNSBypass = uint16(64992)
// Index for priority cache
priorityIndex = "priority"
// IPv6 multicast prefix
ipv6MulticastAddr = "FF00::/8"
// IPv6 link-local prefix
ipv6LinkLocalAddr = "FE80::/10"
// Operation field values in ARP packets
arpOpRequest = uint16(1)
arpOpReply = uint16(2)
tableNameIndex = "tableNameIndex"
)
type ofAction int32
const (
add ofAction = iota
mod
del
)
func (a ofAction) String() string {
switch a {
case add:
return "add"
case mod:
return "modify"
case del:
return "delete"
default:
return "unknown"
}
}
var (
// egressTables map records all IDs of tables related to
// egress rules.
egressTables = map[uint8]struct{}{
AntreaPolicyEgressRuleTable.GetID(): {},
EgressRuleTable.GetID(): {},
EgressDefaultTable.GetID(): {},
}
// ofTableCache caches the OpenFlow tables used in the pipeline, and it supports using the table ID and name as the index to query the OpenFlow table.
ofTableCache = cache.NewIndexer(tableIDKeyFunc, cache.Indexers{tableNameIndex: tableNameIndexFunc})
)
func tableNameIndexFunc(obj interface{}) ([]string, error) {
ofTable := obj.(binding.Table)
return []string{ofTable.GetName()}, nil
}
func tableIDKeyFunc(obj interface{}) (string, error) {
ofTable := obj.(binding.Table)
return fmt.Sprintf("%d", ofTable.GetID()), nil
}
func getTableByID(id uint8) binding.Table {
obj, exists, _ := ofTableCache.GetByKey(fmt.Sprintf("%d", id))
if !exists {
return nil
}
return obj.(binding.Table)
}
// GetFlowTableName returns the flow table name given the table ID. An empty
// string is returned if the table cannot be found.
func GetFlowTableName(tableID uint8) string {
table := getTableByID(tableID)
if table == nil {
return ""
}
return table.GetName()
}
// GetFlowTableID does a case insensitive lookup of the table name, and
// returns the flow table number if the table is found. Otherwise TableIDAll is
// returned if the table cannot be found.
func GetFlowTableID(tableName string) uint8 {
objs, _ := ofTableCache.ByIndex(tableNameIndex, tableName)
if len(objs) == 0 {
return binding.TableIDAll
}
return objs[0].(binding.Table).GetID()
}
func GetTableList() []binding.Table {
tables := make([]binding.Table, 0)
for _, obj := range ofTableCache.List() {
t := obj.(binding.Table)
tables = append(tables, t)
}
return tables
}
func GetAntreaPolicyEgressTables() []binding.Table {
return []binding.Table{
AntreaPolicyEgressRuleTable,
EgressDefaultTable,
}
}
func GetAntreaPolicyIngressTables() []binding.Table {
return []binding.Table{
AntreaPolicyIngressRuleTable,
IngressDefaultTable,
}
}
func GetAntreaPolicyBaselineTierTables() []binding.Table {
return []binding.Table{
EgressDefaultTable,
IngressDefaultTable,
}
}
func GetAntreaPolicyMultiTierTables() []binding.Table {
return []binding.Table{
AntreaPolicyEgressRuleTable,
AntreaPolicyIngressRuleTable,
}
}
const (
CtZone = 0xfff0
CtZoneV6 = 0xffe6
SNATCtZone = 0xfff1
SNATCtZoneV6 = 0xffe7
// disposition values used in AP
DispositionAllow = 0b00
DispositionDrop = 0b01
DispositionRej = 0b10
// CustomReasonLogging is used when send packet-in to controller indicating this
// packet need logging.
CustomReasonLogging = 0b01
// CustomReasonReject is not only used when send packet-in to controller indicating
// that this packet should be rejected, but also used in the case that when
// controller send reject packet as packet-out, we want reject response to bypass
// the connTrack to avoid unexpected drop.
CustomReasonReject = 0b10
// CustomReasonDeny is used when sending packet-in message to controller indicating
// that the corresponding connection has been dropped or rejected. It can be consumed
// by the Flow Exporter to export flow records for connections denied by network
// policy rules.
CustomReasonDeny = 0b100
CustomReasonDNS = 0b1000
)
var DispositionToString = map[uint32]string{
DispositionAllow: "Allow",
DispositionDrop: "Drop",
DispositionRej: "Reject",
}
var (
// traceflowTagToSRange stores Traceflow dataplane tag to DSCP bits of
// IP header ToS field.
traceflowTagToSRange = binding.IPDSCPToSRange
// snatPktMarkRange takes an 8-bit range of pkt_mark to store the ID of
// a SNAT IP. The bit range must match SNATIPMarkMask.
snatPktMarkRange = &binding.Range{0, 7}
GlobalVirtualMAC, _ = net.ParseMAC("aa:bb:cc:dd:ee:ff")
hairpinIP = net.ParseIP("169.254.169.252").To4()
hairpinIPv6 = net.ParseIP("fc00::aabb:ccdd:eeff").To16()
)
type OFEntryOperations interface {
Add(flow binding.Flow) error
Modify(flow binding.Flow) error
Delete(flow binding.Flow) error
AddAll(flows []binding.Flow) error
ModifyAll(flows []binding.Flow) error
BundleOps(adds []binding.Flow, mods []binding.Flow, dels []binding.Flow) error
DeleteAll(flows []binding.Flow) error
AddOFEntries(ofEntries []binding.OFEntry) error
DeleteOFEntries(ofEntries []binding.OFEntry) error
}
type flowCache map[string]binding.Flow
type flowCategoryCache struct {
sync.Map
}
func portToUint16(port int) uint16 {
if port > 0 && port <= math.MaxUint16 {
return uint16(port) // lgtm[go/incorrect-integer-conversion]
}
klog.Errorf("Port value %d out-of-bounds", port)
return 0
}
type client struct {
enableProxy bool
proxyAll bool
enableAntreaPolicy bool
enableDenyTracking bool
enableEgress bool
enableWireGuard bool
roundInfo types.RoundInfo
cookieAllocator cookie.Allocator
bridge binding.Bridge
egressEntryTable uint8
ingressEntryTable uint8
// Flow caches for corresponding deletions.
nodeFlowCache, podFlowCache, serviceFlowCache, snatFlowCache, tfFlowCache *flowCategoryCache
// "fixed" flows installed by the agent after initialization and which do not change during
// the lifetime of the client.
gatewayFlows, defaultServiceFlows, defaultTunnelFlows, hostNetworkingFlows []binding.Flow
// ofEntryOperations is a wrapper interface for OpenFlow entry Add / Modify / Delete operations. It
// enables convenient mocking in unit tests.
ofEntryOperations OFEntryOperations
// policyCache is a storage that supports listing policyRuleConjunction with different indexers.
// It's guaranteed that one policyRuleConjunction is processed by at most one goroutine at any given time.
policyCache cache.Indexer
conjMatchFlowLock sync.Mutex // Lock for access globalConjMatchFlowCache
groupCache sync.Map
// globalConjMatchFlowCache is a global map for conjMatchFlowContext. The key is a string generated from the
// conjMatchFlowContext.
globalConjMatchFlowCache map[string]*conjMatchFlowContext
// replayMutex provides exclusive access to the OFSwitch to the ReplayFlows method.
replayMutex sync.RWMutex
nodeConfig *config.NodeConfig
networkConfig *config.NetworkConfig
gatewayOFPort uint32
// ovsDatapathType is the type of the datapath used by the bridge.
ovsDatapathType ovsconfig.OVSDatapathType
// ovsMetersAreSupported indicates whether the OVS datapath supports OpenFlow meters.
ovsMetersAreSupported bool
// packetInHandlers stores handler to process PacketIn event. Each packetin reason can have multiple handlers registered.
// When a packetin arrives, openflow send packet to registered handlers in this map.
packetInHandlers map[uint8]map[string]PacketInHandler
// Supported IP Protocols (IP or IPv6) on the current Node.
ipProtocols []binding.Protocol
// ovsctlClient is the interface for executing OVS "ovs-ofctl" and "ovs-appctl" commands.
ovsctlClient ovsctl.OVSCtlClient
// deterministic represents whether to generate flows deterministically.
// For example, if a flow has multiple actions, setting it to true can get consistent flow.
// Enabling it may carry a performance impact. It's disabled by default and should only be used in testing.
deterministic bool
}
func (c *client) GetTunnelVirtualMAC() net.HardwareAddr {
return GlobalVirtualMAC
}
func (c *client) changeAll(flowsMap map[ofAction][]binding.Flow) error {
if len(flowsMap) == 0 {
return nil
}
startTime := time.Now()
defer func() {
d := time.Since(startTime)
for k, v := range flowsMap {
if len(v) != 0 {
metrics.OVSFlowOpsLatency.WithLabelValues(k.String()).Observe(float64(d.Milliseconds()))
}
}
}()
if err := c.bridge.AddFlowsInBundle(flowsMap[add], flowsMap[mod], flowsMap[del]); err != nil {
for k, v := range flowsMap {
if len(v) != 0 {
metrics.OVSFlowOpsErrorCount.WithLabelValues(k.String()).Inc()
}
}
return err
}
for k, v := range flowsMap {
if len(v) != 0 {
metrics.OVSFlowOpsCount.WithLabelValues(k.String()).Inc()
}
}
return nil
}
func (c *client) Add(flow binding.Flow) error {
return c.AddAll([]binding.Flow{flow})
}
func (c *client) Modify(flow binding.Flow) error {
return c.ModifyAll([]binding.Flow{flow})
}
func (c *client) Delete(flow binding.Flow) error {
return c.DeleteAll([]binding.Flow{flow})
}
func (c *client) AddAll(flows []binding.Flow) error {
return c.changeAll(map[ofAction][]binding.Flow{add: flows})
}
func (c *client) ModifyAll(flows []binding.Flow) error {
return c.changeAll(map[ofAction][]binding.Flow{mod: flows})
}
func (c *client) DeleteAll(flows []binding.Flow) error {
return c.changeAll(map[ofAction][]binding.Flow{del: flows})
}
func (c *client) BundleOps(adds []binding.Flow, mods []binding.Flow, dels []binding.Flow) error {
return c.changeAll(map[ofAction][]binding.Flow{add: adds, mod: mods, del: dels})
}
func (c *client) changeOFEntries(ofEntries []binding.OFEntry, action ofAction) error {
if len(ofEntries) == 0 {
return nil
}
var adds, mods, dels []binding.OFEntry
if action == add {
adds = ofEntries
} else if action == mod {
mods = ofEntries
} else if action == del {
dels = ofEntries
} else {
return fmt.Errorf("OF Entries Action not exists: %s", action)
}
startTime := time.Now()
defer func() {
d := time.Since(startTime)
metrics.OVSFlowOpsLatency.WithLabelValues(action.String()).Observe(float64(d.Milliseconds()))
}()
if err := c.bridge.AddOFEntriesInBundle(adds, mods, dels); err != nil {
metrics.OVSFlowOpsErrorCount.WithLabelValues(action.String()).Inc()
return err
}
metrics.OVSFlowOpsCount.WithLabelValues(action.String()).Inc()
return nil
}
func (c *client) AddOFEntries(ofEntries []binding.OFEntry) error {
return c.changeOFEntries(ofEntries, add)
}
func (c *client) DeleteOFEntries(ofEntries []binding.OFEntry) error {
return c.changeOFEntries(ofEntries, del)
}
// defaultFlows generates the default flows of all tables.
func (c *client) defaultFlows() (flows []binding.Flow) {
for _, obj := range ofTableCache.List() {
table := obj.(binding.Table)
flowBuilder := table.BuildFlow(priorityMiss)
switch table.GetMissAction() {
case binding.TableMissActionNext:
flowBuilder = flowBuilder.Action().GotoTable(table.GetNext())
case binding.TableMissActionNormal:
flowBuilder = flowBuilder.Action().Normal()
case binding.TableMissActionDrop:
flowBuilder = flowBuilder.Action().Drop()
case binding.TableMissActionNone:
fallthrough
default:
continue
}
flows = append(flows, flowBuilder.Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).Done())
}
return flows
}
// tunnelClassifierFlow generates the flow to mark traffic comes from the tunnelOFPort.
func (c *client) tunnelClassifierFlow(tunnelOFPort uint32, category cookie.Category) binding.Flow {
nextTable := ConntrackTable
if c.proxyAll {
nextTable = ServiceConntrackTable
}
return ClassifierTable.BuildFlow(priorityNormal).
MatchInPort(tunnelOFPort).
Action().LoadRegMark(FromTunnelRegMark).
Action().LoadRegMark(RewriteMACRegMark).
Action().GotoTable(nextTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// gatewayClassifierFlow generates the flow to mark traffic comes from the gatewayOFPort.
func (c *client) gatewayClassifierFlow(category cookie.Category) binding.Flow {
return ClassifierTable.BuildFlow(priorityNormal).
MatchInPort(config.HostGatewayOFPort).
Action().LoadRegMark(FromGatewayRegMark).
Action().GotoTable(ClassifierTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// podClassifierFlow generates the flow to mark traffic comes from the podOFPort.
func (c *client) podClassifierFlow(podOFPort uint32, category cookie.Category) binding.Flow {
return ClassifierTable.BuildFlow(priorityLow).
MatchInPort(podOFPort).
Action().LoadRegMark(FromLocalRegMark).
Action().GotoTable(ClassifierTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// connectionTrackFlows generates flows that redirect traffic to ct_zone and handle traffic according to ct_state:
// 1) commit new connections to ct_zone(0xfff0) in the ConntrackCommitTable.
// 2) Add ct_mark on the packet if it is sent to the switch from the host gateway.
// 3) Allow traffic if it hits ct_mark and is sent from the host gateway.
// 4) Drop all invalid traffic.
// 5) Let other traffic go to the SessionAffinityTable first and then the ServiceLBTable.
// The SessionAffinityTable is a side-effect table which means traffic will not
// be resubmitted to any table. serviceLB does Endpoint selection for traffic
// to a Service.
// 6) Add a flow to bypass reject response packet sent by the controller.
func (c *client) connectionTrackFlows(category cookie.Category) []binding.Flow {
flows := c.conntrackBasicFlows(category)
if c.enableProxy {
// Replace the default flow with multiple resubmits actions.
if c.proxyAll {
flows = append(flows, ConntrackStateTable.BuildFlow(priorityMiss).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().ResubmitToTable(ServiceClassifierTable.GetID()).
Action().ResubmitToTable(SessionAffinityTable.GetID()).
Action().ResubmitToTable(ServiceLBTable.GetID()).
Done())
} else {
flows = append(flows, ConntrackStateTable.BuildFlow(priorityMiss).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().ResubmitToTable(SessionAffinityTable.GetID()).
Action().ResubmitToTable(ServiceLBTable.GetID()).
Done())
}
for _, proto := range c.ipProtocols {
gatewayIP := c.nodeConfig.GatewayConfig.IPv4
serviceVirtualIP := config.VirtualServiceIPv4
snatZone := SNATCtZone
ctZone := CtZone
if proto == binding.ProtocolIPv6 {
gatewayIP = c.nodeConfig.GatewayConfig.IPv6
serviceVirtualIP = config.VirtualServiceIPv6
snatZone = SNATCtZoneV6
ctZone = CtZoneV6
}
flows = append(flows,
// This flow is used to maintain DNAT conntrack for Service traffic.
ConntrackTable.BuildFlow(priorityNormal).MatchProtocol(proto).
Action().CT(false, ConntrackTable.GetNext(), ctZone).NAT().CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
ConntrackCommitTable.BuildFlow(priorityLow).MatchProtocol(proto).
MatchCTStateTrk(true).
MatchCTMark(ServiceCTMark).
MatchRegMark(EpSelectedRegMark).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().GotoTable(ConntrackCommitTable.GetNext()).
Done(),
)
if c.proxyAll {
flows = append(flows,
// This flow is used to match the Service traffic from Antrea gateway. The Service traffic from gateway
// should enter table serviceConntrackCommitTable, otherwise it will be matched by other flows in
// table connectionTrackCommit.
ConntrackCommitTable.BuildFlow(priorityHigh).MatchProtocol(proto).
MatchCTMark(ServiceCTMark).
MatchRegMark(FromGatewayRegMark).
Action().GotoTable(ServiceConntrackCommitTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// This flow is used to maintain SNAT conntrack for Service traffic.
ServiceConntrackTable.BuildFlow(priorityNormal).MatchProtocol(proto).
Action().CT(false, ServiceConntrackTable.GetNext(), snatZone).NAT().CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// This flow is used to match the following cases:
// - The first packet of NodePort/LoadBalancer whose Endpoint is not on local Pod CIDR or any remote
// Pod CIDRs. Note that, this flow will change the behavior of the packet that NodePort/LoadBalancer
// whose externalTrafficPolicy is Local and the Endpoint is on host network. According to the definition
// of externalTrafficPolicy Local, the source IP should be retained. If the Endpoint is on host network,
// there should be only one backend Pod of the Service on a Node (It is impossible to have more than
// one Pods which listen on the same port on host network), so it is not useful to expose the Pod as
// NodePort Service, as it makes no difference to access it directly.
// - The first packet of ClusterIP and the Endpoint is not on local Pod CIDR or any remote Pod CIDRs.
// As the packet is from Antrea gateway, and it will pass through Antrea gateway, a virtual IP is used
// to perform SNAT for the packet, rather than Antrea gateway's IP.
ServiceConntrackCommitTable.BuildFlow(priorityHigh).MatchProtocol(proto).
MatchRegMark(ToGatewayRegMark).
Cookie(c.cookieAllocator.Request(category).Raw()).
MatchCTStateNew(true).
MatchCTStateTrk(true).
Action().CT(true, ServiceConntrackCommitTable.GetNext(), snatZone).
SNAT(&binding.IPRange{StartIP: serviceVirtualIP, EndIP: serviceVirtualIP}, nil).
CTDone().
Done(),
// This flow is used to match the first packet of NodePort/LoadBalancer whose output port is not
// Antrea gateway, and externalTrafficPolicy is Cluster. This packet requires SNAT. Antrea gateway
// IP is used to perform SNAT for the packet.
ServiceConntrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto).
MatchRegMark(ServiceNeedSNATRegMark).
Cookie(c.cookieAllocator.Request(category).Raw()).
MatchCTStateNew(true).
MatchCTStateTrk(true).
Action().CT(true, ServiceConntrackCommitTable.GetNext(), snatZone).
SNAT(&binding.IPRange{StartIP: gatewayIP, EndIP: gatewayIP}, nil).
CTDone().
Done(),
// This flow is used to match the consequent request packets of Service traffic whose first request packet has been committed
// and performed SNAT. For example:
/*
* 192.168.77.1 is the IP address of client.
* 192.168.77.100 is the IP address of k8s node.
* 30001 is a NodePort port.
* 10.10.0.1 is the IP address of Antrea gateway.
* 10.10.0.3 is the Endpoint of NodePort Service.
* pkt 1 (request)
* client 192.168.77.1:12345->192.168.77.100:30001
* ct zone SNAT 65521 192.168.77.1:12345->192.168.77.100:30001
* ct zone DNAT 65520 192.168.77.1:12345->192.168.77.100:30001
* ct commit DNAT zone 65520 192.168.77.1:12345->192.168.77.100:30001 => 192.168.77.1:12345->10.10.0.3:80
* ct commit SNAT zone 65521 192.168.77.1:12345->10.10.0.3:80 => 10.10.0.1:12345->10.10.0.3:80
* output
* pkt 2 (response)
* Pod 10.10.0.3:80->10.10.0.1:12345
* ct zone SNAT 65521 10.10.0.3:80->10.10.0.1:12345 => 10.10.0.3:80->192.168.77.1:12345
* ct zone DNAT 65520 10.10.0.3:80->192.168.77.1:12345 => 192.168.77.1:30001->192.168.77.1:12345
* output
* pkt 3 (request)
* client 192.168.77.1:12345->192.168.77.100:30001
* ct zone SNAT 65521 192.168.77.1:12345->192.168.77.100:30001
* ct zone DNAT 65520 192.168.77.1:12345->10.10.0.3:80
* ct zone SNAT 65521 192.168.77.1:12345->10.10.0.3:80 => 10.10.0.1:12345->10.10.0.3:80
* output
* pkt ...
The source IP address of pkt 3 cannot be transformed through zone 65521 as there is no connection track about
192.168.77.1:12345<->192.168.77.100:30001, and the source IP is still 192.168.77.100.
Before output, pkt 3 needs SNAT, but the connection has been committed. The flow is for pkt 3 to perform SNAT.
*/
ServiceConntrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto).
Cookie(c.cookieAllocator.Request(category).Raw()).
MatchCTStateNew(false).
MatchCTStateTrk(true).
Action().CT(false, ServiceConntrackCommitTable.GetNext(), snatZone).
NAT().
CTDone().
Done(),
)
}
}
} else {
flows = append(flows, c.kubeProxyFlows(category)...)
}
// TODO: following flows should move to function "kubeProxyFlows". Since another PR(#1198) is trying
// to polish the relevant logic, code refactoring is needed after that PR is merged.
for _, proto := range c.ipProtocols {
ctZone := CtZone
if proto == binding.ProtocolIPv6 {
ctZone = CtZoneV6
}
flows = append(flows,
// Connections initiated through the gateway are marked with FromGatewayCTMark.
ConntrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto).
MatchRegMark(FromGatewayRegMark).
MatchCTStateNew(true).MatchCTStateTrk(true).
Action().CT(true, ConntrackCommitTable.GetNext(), ctZone).LoadToCtMark(FromGatewayCTMark).CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Add reject response packet bypass flow.
c.conntrackBypassRejectFlow(proto),
)
}
return flows
}
// conntrackBypassRejectFlow generates a flow which is used to bypass the reject
// response packet sent by the controller to avoid unexpected packet drop.
func (c *client) conntrackBypassRejectFlow(proto binding.Protocol) binding.Flow {
return ConntrackStateTable.BuildFlow(priorityHigh).
MatchProtocol(proto).
MatchRegMark(CustomReasonRejectRegMark).
Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).
Action().ResubmitToTable(ConntrackStateTable.GetNext()).
Done()
}
// dnsResponseBypassConntrackFlow generates a flow which is used to bypass the
// dns response packetout from conntrack, to avoid unexpected packet drop.
func (c *client) dnsResponseBypassConntrackFlow() binding.Flow {
table := ConntrackTable
if c.proxyAll {
table = ServiceConntrackTable
}
return table.BuildFlow(priorityHigh).
MatchRegFieldWithValue(CustomReasonField, CustomReasonDNS).
Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).
Action().ResubmitToTable(L2ForwardingCalcTable.GetID()).
Done()
}
// dnsResponseBypassPacketInFlow generates a flow which is used to bypass the
// dns packetIn conjunction flow for dns response packetOut. This packetOut
// should be sent directly to the requesting client without being intercepted
// again.
func (c *client) dnsResponseBypassPacketInFlow() binding.Flow {
// TODO: use a unified register bit to mark packetOuts. The pipeline does not need to be
// aware of why the packetOut is being set by the controller, it just needs to be aware that
// this is a packetOut message and that some pipeline stages (conntrack, policy enforcement)
// should therefore be skipped.
return AntreaPolicyIngressRuleTable.BuildFlow(priorityDNSBypass).
MatchRegFieldWithValue(CustomReasonField, CustomReasonDNS).
Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).
Action().ResubmitToTable(L2ForwardingOutTable.GetID()).
Done()
}
func (c *client) conntrackBasicFlows(category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, proto := range c.ipProtocols {
ctZone := CtZone
if proto == binding.ProtocolIPv6 {
ctZone = CtZoneV6
}
flows = append(flows,
ConntrackStateTable.BuildFlow(priorityLow).MatchProtocol(proto).
MatchCTStateInv(true).MatchCTStateTrk(true).
Action().Drop().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
ConntrackCommitTable.BuildFlow(priorityLow).MatchProtocol(proto).
MatchCTStateNew(true).MatchCTStateTrk(true).
Action().CT(true, ConntrackCommitTable.GetNext(), ctZone).CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
}
return flows
}
func (c *client) kubeProxyFlows(category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, proto := range c.ipProtocols {
ctZone := CtZone
if proto == binding.ProtocolIPv6 {
ctZone = CtZoneV6
}
flows = append(flows,
ConntrackTable.BuildFlow(priorityNormal).MatchProtocol(proto).
Action().CT(false, ConntrackTable.GetNext(), ctZone).CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
}
return flows
}
// TODO: Use DuplicateToBuilder or integrate this function into original one to avoid unexpected
// difference.
// traceflowConnectionTrackFlows generates Traceflow specific flows in the
// connectionTrackStateTable or L2ForwardingCalcTable. When packet is not
// provided, the flows bypass the drop flow in connectionTrackFlows to avoid
// unexpected drop of the injected Traceflow packet, and to drop any Traceflow
// packet that has ct_state +rpl, which may happen when the Traceflow request
// destination is the Node's IP.
// When packet is provided, a flow is added to mark - the first packet of the
// first connection that matches the provided packet - as the Traceflow packet.
// The flow is added in connectionTrackStateTable when receiverOnly is false and
// it also matches in_port to be the provided ofPort (the sender Pod); otherwise
// when receiverOnly is true, the flow is added into L2ForwardingCalcTable and
// matches the destination MAC (the receiver Pod MAC).
func (c *client) traceflowConnectionTrackFlows(dataplaneTag uint8, receiverOnly bool, packet *binding.Packet, ofPort uint32, timeout uint16, category cookie.Category) []binding.Flow {
var flows []binding.Flow
if packet == nil {
for _, ipProtocol := range c.ipProtocols {
flowBuilder := ConntrackStateTable.BuildFlow(priorityLow + 1).
MatchProtocol(ipProtocol).
MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
Cookie(c.cookieAllocator.Request(category).Raw())
if c.enableProxy {
flowBuilder = flowBuilder.
Action().ResubmitToTable(SessionAffinityTable.GetID()).
Action().ResubmitToTable(ServiceLBTable.GetID())
} else {
flowBuilder = flowBuilder.
Action().ResubmitToTable(ConntrackStateTable.GetNext())
}
flows = append(flows, flowBuilder.Done())
flows = append(flows, ConntrackStateTable.BuildFlow(priorityLow+2).
MatchProtocol(ipProtocol).
MatchIPDSCP(dataplaneTag).
MatchCTStateTrk(true).MatchCTStateRpl(true).
SetHardTimeout(timeout).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().Drop().
Done())
}
} else {
var flowBuilder binding.FlowBuilder
if !receiverOnly {
flowBuilder = ConntrackStateTable.BuildFlow(priorityLow).
MatchInPort(ofPort).
Action().LoadIPDSCP(dataplaneTag)
if packet.DestinationIP != nil {
flowBuilder = flowBuilder.MatchDstIP(packet.DestinationIP)
}
if c.enableProxy {
flowBuilder = flowBuilder.
Action().ResubmitToTable(SessionAffinityTable.GetID()).
Action().ResubmitToTable(ServiceLBTable.GetID())
} else {
flowBuilder = flowBuilder.
Action().ResubmitToTable(ConntrackStateTable.GetNext())
}
} else {
nextTable := c.ingressEntryTable
flowBuilder = L2ForwardingCalcTable.BuildFlow(priorityHigh).
MatchDstMAC(packet.DestinationMAC).
Action().LoadToRegField(TargetOFPortField, ofPort).
Action().LoadRegMark(OFPortFoundRegMark).
Action().LoadIPDSCP(dataplaneTag).
Action().GotoTable(nextTable)
if packet.SourceIP != nil {
flowBuilder = flowBuilder.MatchSrcIP(packet.SourceIP)
}
}
flowBuilder = flowBuilder.MatchCTStateNew(true).MatchCTStateTrk(true).
SetHardTimeout(timeout).
Cookie(c.cookieAllocator.Request(category).Raw())
// Match transport header
switch packet.IPProto {
case protocol.Type_ICMP:
flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolICMP)
case protocol.Type_IPv6ICMP:
flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolICMPv6)
case protocol.Type_TCP:
if packet.IsIPv6 {
flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolTCPv6)
} else {
flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolTCP)
}
case protocol.Type_UDP:
if packet.IsIPv6 {
flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolUDPv6)
} else {
flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolUDP)
}
default:
flowBuilder = flowBuilder.MatchIPProtocolValue(packet.IsIPv6, packet.IPProto)
}
if packet.IPProto == protocol.Type_TCP || packet.IPProto == protocol.Type_UDP {
if packet.DestinationPort != 0 {
flowBuilder = flowBuilder.MatchDstPort(packet.DestinationPort, nil)
}
if packet.SourcePort != 0 {
flowBuilder = flowBuilder.MatchSrcPort(packet.SourcePort, nil)
}
}
flows = []binding.Flow{flowBuilder.Done()}
}
return flows
}
func (c *client) traceflowNetworkPolicyFlows(dataplaneTag uint8, timeout uint16, category cookie.Category) []binding.Flow {
flows := []binding.Flow{}
c.conjMatchFlowLock.Lock()
defer c.conjMatchFlowLock.Unlock()
// Copy default drop rules.
for _, ctx := range c.globalConjMatchFlowCache {
if ctx.dropFlow != nil {
copyFlowBuilder := ctx.dropFlow.CopyToBuilder(priorityNormal+2, false)
if ctx.dropFlow.FlowProtocol() == "" {
copyFlowBuilderIPv6 := ctx.dropFlow.CopyToBuilder(priorityNormal+2, false)
copyFlowBuilderIPv6 = copyFlowBuilderIPv6.MatchProtocol(binding.ProtocolIPv6)
if c.ovsMetersAreSupported {
copyFlowBuilderIPv6 = copyFlowBuilderIPv6.Action().Meter(PacketInMeterIDTF)
}
flows = append(flows, copyFlowBuilderIPv6.MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
copyFlowBuilder = copyFlowBuilder.MatchProtocol(binding.ProtocolIP)
}
if c.ovsMetersAreSupported {
copyFlowBuilder = copyFlowBuilder.Action().Meter(PacketInMeterIDTF)
}
flows = append(flows, copyFlowBuilder.MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
}
}
// Copy Antrea NetworkPolicy drop rules.
for _, conj := range c.policyCache.List() {
for _, flow := range conj.(*policyRuleConjunction).metricFlows {
if flow.IsDropFlow() {
copyFlowBuilder := flow.CopyToBuilder(priorityNormal+2, false)
// Generate both IPv4 and IPv6 flows if the original drop flow doesn't match IP/IPv6.
// DSCP field is in IP/IPv6 headers so IP/IPv6 match is required in a flow.
if flow.FlowProtocol() == "" {
copyFlowBuilderIPv6 := flow.CopyToBuilder(priorityNormal+2, false)
copyFlowBuilderIPv6 = copyFlowBuilderIPv6.MatchProtocol(binding.ProtocolIPv6)
if c.ovsMetersAreSupported {
copyFlowBuilderIPv6 = copyFlowBuilderIPv6.Action().Meter(PacketInMeterIDTF)
}
flows = append(flows, copyFlowBuilderIPv6.MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
copyFlowBuilder = copyFlowBuilder.MatchProtocol(binding.ProtocolIP)
}
if c.ovsMetersAreSupported {
copyFlowBuilder = copyFlowBuilder.Action().Meter(PacketInMeterIDTF)
}
flows = append(flows, copyFlowBuilder.MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
}
}
}
return flows
}
// serviceLBBypassFlows makes packets that belong to a tracked connection bypass
// service LB tables and enter egressRuleTable directly.
func (c *client) serviceLBBypassFlows(ipProtocol binding.Protocol) []binding.Flow {
flows := []binding.Flow{
// Tracked connections with the ServiceCTMark (load-balanced by AntreaProxy) receive
// the macRewriteMark and are sent to egressRuleTable.
ConntrackStateTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchCTMark(ServiceCTMark).
MatchCTStateNew(false).MatchCTStateTrk(true).
Action().LoadRegMark(RewriteMACRegMark).
Action().GotoTable(EgressRuleTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done(),
// Tracked connections without the ServiceCTMark are sent to egressRuleTable
// directly. This is meant to match connections which were load-balanced by
// kube-proxy before AntreaProxy got enabled.
ConntrackStateTable.BuildFlow(priorityLow).MatchProtocol(ipProtocol).
MatchCTStateNew(false).MatchCTStateTrk(true).
Action().GotoTable(EgressRuleTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done(),
}
return flows
}
// l2ForwardCalcFlow generates the flow that matches dst MAC and loads ofPort to reg.
func (c *client) l2ForwardCalcFlow(dstMAC net.HardwareAddr, ofPort uint32, skipIngressRules bool, category cookie.Category) binding.Flow {
nextTable := L2ForwardingCalcTable.GetNext()
if !skipIngressRules {
// Go to ingress NetworkPolicy tables for traffic to local Pods.
nextTable = c.ingressEntryTable
}
return L2ForwardingCalcTable.BuildFlow(priorityNormal).
MatchDstMAC(dstMAC).
Action().LoadToRegField(TargetOFPortField, ofPort).
Action().LoadRegMark(OFPortFoundRegMark).
Action().GotoTable(nextTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
// Broadcast, multicast, and unknown unicast packets will be dropped by
// the default flow of L2ForwardingOutTable.
}
// traceflowL2ForwardOutputFlows generates Traceflow specific flows that outputs traceflow packets
// to OVS port and Antrea Agent after L2forwarding calculation.
func (c *client) traceflowL2ForwardOutputFlows(dataplaneTag uint8, liveTraffic, droppedOnly bool, timeout uint16, category cookie.Category) []binding.Flow {
flows := []binding.Flow{}
for _, ipProtocol := range c.ipProtocols {
if c.networkConfig.TrafficEncapMode.SupportsEncap() {
// SendToController and Output if output port is tunnel port.
fb1 := L2ForwardingOutTable.BuildFlow(priorityNormal+3).
MatchRegFieldWithValue(TargetOFPortField, config.DefaultTunOFPort).
MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
MatchProtocol(ipProtocol).
MatchRegMark(OFPortFoundRegMark).
Action().OutputToRegField(TargetOFPortField).
Cookie(c.cookieAllocator.Request(category).Raw())
// For injected packets, only SendToController if output port is local
// gateway. In encapMode, a Traceflow packet going out of the gateway
// port (i.e. exiting the overlay) essentially means that the Traceflow
// request is complete.
fb2 := L2ForwardingOutTable.BuildFlow(priorityNormal+2).
MatchRegFieldWithValue(TargetOFPortField, config.HostGatewayOFPort).
MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
MatchProtocol(ipProtocol).
MatchRegMark(OFPortFoundRegMark).
Cookie(c.cookieAllocator.Request(category).Raw())
// Do not send to controller if captures only dropped packet.
if !droppedOnly {
if c.ovsMetersAreSupported {
fb1 = fb1.Action().Meter(PacketInMeterIDTF)
fb2 = fb2.Action().Meter(PacketInMeterIDTF)
}
fb1 = fb1.Action().SendToController(uint8(PacketInReasonTF))
fb2 = fb2.Action().SendToController(uint8(PacketInReasonTF))
}
if liveTraffic {
// Clear the loaded DSCP bits before output.
fb2 = fb2.Action().LoadIPDSCP(0).
Action().OutputToRegField(TargetOFPortField)
}
flows = append(flows, fb1.Done(), fb2.Done())
} else {
// SendToController and Output if output port is local gateway. Unlike in
// encapMode, inter-Node Pod-to-Pod traffic is expected to go out of the
// gateway port on the way to its destination.
fb1 := L2ForwardingOutTable.BuildFlow(priorityNormal+2).
MatchRegFieldWithValue(TargetOFPortField, config.HostGatewayOFPort).
MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
MatchProtocol(ipProtocol).
MatchRegMark(OFPortFoundRegMark).
Action().OutputToRegField(TargetOFPortField).
Cookie(c.cookieAllocator.Request(category).Raw())
if !droppedOnly {
if c.ovsMetersAreSupported {
fb1 = fb1.Action().Meter(PacketInMeterIDTF)
}
fb1 = fb1.Action().SendToController(uint8(PacketInReasonTF))
}
flows = append(flows, fb1.Done())
}
// Only SendToController if output port is local gateway and destination IP is gateway.
gatewayIP := c.nodeConfig.GatewayConfig.IPv4
if ipProtocol == binding.ProtocolIPv6 {
gatewayIP = c.nodeConfig.GatewayConfig.IPv6
}
if gatewayIP != nil {
fb := L2ForwardingOutTable.BuildFlow(priorityNormal+3).
MatchRegFieldWithValue(TargetOFPortField, config.HostGatewayOFPort).
MatchDstIP(gatewayIP).
MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
MatchProtocol(ipProtocol).
MatchRegMark(OFPortFoundRegMark).
Cookie(c.cookieAllocator.Request(category).Raw())
if !droppedOnly {
if c.ovsMetersAreSupported {
fb = fb.Action().Meter(PacketInMeterIDTF)
}
fb = fb.Action().SendToController(uint8(PacketInReasonTF))
}
if liveTraffic {
fb = fb.Action().LoadIPDSCP(0).
Action().OutputToRegField(TargetOFPortField)
}
flows = append(flows, fb.Done())
}
// Only SendToController if output port is Pod port.
fb := L2ForwardingOutTable.BuildFlow(priorityNormal + 2).
MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
MatchProtocol(ipProtocol).
MatchRegMark(OFPortFoundRegMark).
Cookie(c.cookieAllocator.Request(category).Raw())
if !droppedOnly {
if c.ovsMetersAreSupported {
fb = fb.Action().Meter(PacketInMeterIDTF)
}
fb = fb.Action().SendToController(uint8(PacketInReasonTF))
}
if liveTraffic {
fb = fb.Action().LoadIPDSCP(0).
Action().OutputToRegField(TargetOFPortField)
}
flows = append(flows, fb.Done())
if c.enableProxy {
// Only SendToController for hairpin traffic.
// This flow must have higher priority than the one installed by l2ForwardOutputServiceHairpinFlow
fbHairpin := L2ForwardingOutTable.BuildFlow(priorityHigh + 2).
MatchIPDSCP(dataplaneTag).
SetHardTimeout(timeout).
MatchProtocol(ipProtocol).
MatchRegMark(HairpinRegMark).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw())
if !droppedOnly {
if c.ovsMetersAreSupported {
fbHairpin = fbHairpin.Action().Meter(PacketInMeterIDTF)
}
fbHairpin = fbHairpin.Action().SendToController(uint8(PacketInReasonTF))
}
if liveTraffic {
fbHairpin = fbHairpin.Action().LoadIPDSCP(0).
Action().OutputInPort()
}
flows = append(flows, fbHairpin.Done())
}
}
return flows
}
// l2ForwardOutputServiceHairpinFlow uses in_port action for Service
// hairpin packets to avoid packets from being dropped by OVS.
func (c *client) l2ForwardOutputServiceHairpinFlow() binding.Flow {
return L2ForwardingOutTable.BuildFlow(priorityHigh).
MatchRegMark(HairpinRegMark).
Action().OutputInPort().
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// l2ForwardOutputFlows generates the flows that output packets to OVS port after L2 forwarding calculation.
func (c *client) l2ForwardOutputFlows(category cookie.Category) []binding.Flow {
var flows []binding.Flow
flows = append(flows,
L2ForwardingOutTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchRegMark(OFPortFoundRegMark).
Action().OutputToRegField(TargetOFPortField).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
L2ForwardingOutTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIPv6).
MatchRegMark(OFPortFoundRegMark).
Action().OutputToRegField(TargetOFPortField).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
return flows
}
// l3FwdFlowToPod generates the L3 forward flows for traffic from tunnel to a
// local Pod. It rewrites the destination MAC (should be GlobalVirtualMAC) to
// the Pod interface MAC, and rewrites the source MAC to the gateway interface
// MAC.
func (c *client) l3FwdFlowToPod(localGatewayMAC net.HardwareAddr, podInterfaceIPs []net.IP, podInterfaceMAC net.HardwareAddr, category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, ip := range podInterfaceIPs {
ipProtocol := getIPProtocol(ip)
flows = append(flows, L3ForwardingTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchRegMark(RewriteMACRegMark).
MatchDstIP(ip).
Action().SetSrcMAC(localGatewayMAC).
// Rewrite src MAC to local gateway MAC, and rewrite dst MAC to pod MAC
Action().SetDstMAC(podInterfaceMAC).
Action().GotoTable(L3DecTTLTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
}
return flows
}
// l3FwdFlowRouteToPod generates the flows to route the traffic to a Pod based on
// the destination IP. It rewrites the destination MAC of the packets to the Pod
// interface MAC. The flow is used in the networkPolicyOnly mode for the traffic
// from the gateway to a local Pod.
func (c *client) l3FwdFlowRouteToPod(podInterfaceIPs []net.IP, podInterfaceMAC net.HardwareAddr, category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, ip := range podInterfaceIPs {
ipProtocol := getIPProtocol(ip)
flows = append(flows, L3ForwardingTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchDstIP(ip).
Action().SetDstMAC(podInterfaceMAC).
Action().GotoTable(L3DecTTLTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
}
return flows
}
// l3FwdFlowRouteToGW generates the flows to route the traffic to the gateway
// interface. It rewrites the destination MAC of the packets to the gateway
// interface MAC. The flow is used in the networkPolicyOnly mode for the traffic
// from a local Pod to remote Pods, Nodes, or external network.
func (c *client) l3FwdFlowRouteToGW(gwMAC net.HardwareAddr, category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, ipProto := range c.ipProtocols {
flows = append(flows, L3ForwardingTable.BuildFlow(priorityLow).MatchProtocol(ipProto).
Action().SetDstMAC(gwMAC).
Action().GotoTable(L3ForwardingTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
}
return flows
}
// l3FwdFlowToGateway generates the L3 forward flows to rewrite the destination MAC of the packets to the gateway interface
// MAC if the destination IP is the gateway IP or the connection was initiated through the gateway interface.
func (c *client) l3FwdFlowToGateway(localGatewayIPs []net.IP, localGatewayMAC net.HardwareAddr, category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, ip := range localGatewayIPs {
ipProtocol := getIPProtocol(ip)
flows = append(flows, L3ForwardingTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchRegMark(RewriteMACRegMark).
MatchDstIP(ip).
Action().SetDstMAC(localGatewayMAC).
Action().GotoTable(L3ForwardingTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
}
// Rewrite the destination MAC address with the local host gateway MAC if the packet is in the reply direction and
// is marked with FromGatewayCTMark. This is for connections which were initiated through the gateway, to ensure that
// this reply traffic gets forwarded correctly (back to the host network namespace, through the gateway). In
// particular, it is necessary in the following 2 cases:
// 1) reply traffic for connections from a local Pod to a ClusterIP Service (when AntreaProxy is disabled and
// kube-proxy is used). In this case the destination IP address of the reply traffic is the Pod which initiated the
// connection to the Service (no SNAT). We need to make sure that these packets are sent back through the gateway
// so that the source IP can be rewritten (Service backend IP -> Service ClusterIP).
// 2) when hair-pinning is involved, i.e. connections between 2 local Pods, for which NAT is performed. This
// applies regardless of whether AntreaProxy is enabled or not, and thus also applies to Windows Nodes (for which
// AntreaProxy is enabled by default). One example is a Pod accessing a NodePort Service for which
// externalTrafficPolicy is set to Local, using the local Node's IP address.
for _, proto := range c.ipProtocols {
flows = append(flows, L3ForwardingTable.BuildFlow(priorityHigh).MatchProtocol(proto).
MatchCTMark(FromGatewayCTMark).
MatchCTStateRpl(true).MatchCTStateTrk(true).
Action().SetDstMAC(localGatewayMAC).
Action().GotoTable(L3ForwardingTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
}
return flows
}
// l3FwdFlowToRemote generates the L3 forward flow for traffic to a remote Node
// (Pods or gateway) through the tunnel.
func (c *client) l3FwdFlowToRemote(
localGatewayMAC net.HardwareAddr,
peerSubnet net.IPNet,
tunnelPeer net.IP,
category cookie.Category) binding.Flow {
ipProto := getIPProtocol(peerSubnet.IP)
return L3ForwardingTable.BuildFlow(priorityNormal).MatchProtocol(ipProto).
MatchDstIPNet(peerSubnet).
// Rewrite src MAC to local gateway MAC and rewrite dst MAC to virtual MAC.
Action().SetSrcMAC(localGatewayMAC).
Action().SetDstMAC(GlobalVirtualMAC).
// Flow based tunnel. Set tunnel destination.
Action().SetTunnelDst(tunnelPeer).
Action().GotoTable(L3DecTTLTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3FwdFlowToRemoteViaGW generates the L3 forward flow to support traffic to
// remote via gateway. It is used when the cross-Node traffic does not require
// encapsulation (in noEncap, networkPolicyOnly, or hybrid mode).
func (c *client) l3FwdFlowToRemoteViaGW(
localGatewayMAC net.HardwareAddr,
peerSubnet net.IPNet,
category cookie.Category) binding.Flow {
ipProto := getIPProtocol(peerSubnet.IP)
return L3ForwardingTable.BuildFlow(priorityNormal).MatchProtocol(ipProto).
MatchDstIPNet(peerSubnet).
Action().SetDstMAC(localGatewayMAC).
Action().GotoTable(L3ForwardingTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3FwdServiceDefaultFlowsViaGW generates the default L3 forward flows to support Service traffic to pass through Antrea gateway.
func (c *client) l3FwdServiceDefaultFlowsViaGW(ipProto binding.Protocol, category cookie.Category) []binding.Flow {
gatewayMAC := c.nodeConfig.GatewayConfig.MAC
flows := []binding.Flow{
// This flow is used to match the packets of Service traffic:
// - NodePort/LoadBalancer request packets which pass through Antrea gateway and the Service Endpoint is not on
// local Pod CIDR or any remote Pod CIDRs.
// - ClusterIP request packets which are from Antrea gateway and the Service Endpoint is not on local Pod CIDR
// or any remote Pod CIDRs.
// - NodePort/LoadBalancer/ClusterIP response packets.
// The matched packets should leave through Antrea gateway, however, they also enter through Antrea gateway. This
// is hairpin traffic.
L3ForwardingTable.BuildFlow(priorityLow).MatchProtocol(ipProto).
MatchCTMark(ServiceCTMark).
MatchCTStateTrk(true).
MatchRegMark(RewriteMACRegMark).
Action().SetDstMAC(gatewayMAC).
Action().GotoTable(L3DecTTLTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
}
return flows
}
// arpResponderFlow generates the ARP responder flow entry that replies request comes from local gateway for peer
// gateway MAC.
func (c *client) arpResponderFlow(peerGatewayIP net.IP, category cookie.Category) binding.Flow {
return arpResponderTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchARPOp(arpOpRequest).
MatchARPTpa(peerGatewayIP).
Action().Move(binding.NxmFieldSrcMAC, binding.NxmFieldDstMAC).
Action().SetSrcMAC(GlobalVirtualMAC).
Action().LoadARPOperation(arpOpReply).
Action().Move(binding.NxmFieldARPSha, binding.NxmFieldARPTha).
Action().SetARPSha(GlobalVirtualMAC).
Action().Move(binding.NxmFieldARPSpa, binding.NxmFieldARPTpa).
Action().SetARPSpa(peerGatewayIP).
Action().OutputInPort().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// arpResponderStaticFlow generates ARP reply for any ARP request with the same global virtual MAC.
// This flow is used in policy-only mode, where traffic are routed via IP not MAC.
func (c *client) arpResponderStaticFlow(category cookie.Category) binding.Flow {
return arpResponderTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchARPOp(arpOpRequest).
Action().Move(binding.NxmFieldSrcMAC, binding.NxmFieldDstMAC).
Action().SetSrcMAC(GlobalVirtualMAC).
Action().LoadARPOperation(arpOpReply).
Action().Move(binding.NxmFieldARPSha, binding.NxmFieldARPTha).
Action().SetARPSha(GlobalVirtualMAC).
Action().Move(binding.NxmFieldARPTpa, SwapField.GetNXFieldName()).
Action().Move(binding.NxmFieldARPSpa, binding.NxmFieldARPTpa).
Action().Move(SwapField.GetNXFieldName(), binding.NxmFieldARPSpa).
Action().OutputInPort().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// podIPSpoofGuardFlow generates the flow to check IP traffic sent out from local pod. Traffic from host gateway interface
// will not be checked, since it might be pod to service traffic or host namespace traffic.
func (c *client) podIPSpoofGuardFlow(ifIPs []net.IP, ifMAC net.HardwareAddr, ifOFPort uint32, category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, ifIP := range ifIPs {
ipProtocol := getIPProtocol(ifIP)
if ipProtocol == binding.ProtocolIP {
flows = append(flows, SpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchInPort(ifOFPort).
MatchSrcMAC(ifMAC).
MatchSrcIP(ifIP).
Action().GotoTable(SpoofGuardTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
} else if ipProtocol == binding.ProtocolIPv6 {
flows = append(flows, SpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchInPort(ifOFPort).
MatchSrcMAC(ifMAC).
MatchSrcIP(ifIP).
Action().GotoTable(IPv6Table.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
}
}
return flows
}
func getIPProtocol(ip net.IP) binding.Protocol {
var ipProtocol binding.Protocol
if ip.To4() != nil {
ipProtocol = binding.ProtocolIP
} else {
ipProtocol = binding.ProtocolIPv6
}
return ipProtocol
}
// serviceHairpinResponseDNATFlow generates the flow which transforms destination
// IP of the hairpin packet to the source IP.
func (c *client) serviceHairpinResponseDNATFlow(ipProtocol binding.Protocol) binding.Flow {
hpIP := hairpinIP
from := binding.NxmFieldSrcIPv4
to := binding.NxmFieldDstIPv4
if ipProtocol == binding.ProtocolIPv6 {
hpIP = hairpinIPv6
from = binding.NxmFieldSrcIPv6
to = binding.NxmFieldDstIPv6
}
return ServiceHairpinTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchDstIP(hpIP).
Action().Move(from, to).
Action().LoadRegMark(HairpinRegMark).
Action().GotoTable(ServiceHairpinTable.GetNext()).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// serviceHairpinRegSetFlows generates the flow to set the hairpin mark for the packet which is from Antrea gateway and
// its output interface is also Antrea gateway. In table L2ForwardingOutTable #110, a packet with hairpin mark will be
// sent out with action IN_PORT, otherwise the packet with action output will be dropped.
func (c *client) serviceHairpinRegSetFlows(ipProtocol binding.Protocol) binding.Flow {
return HairpinSNATTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol).
MatchRegMark(FromGatewayRegMark).
MatchRegMark(ToGatewayRegMark).
Action().LoadRegMark(HairpinRegMark).
Action().GotoTable(L2ForwardingOutTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// gatewayARPSpoofGuardFlow generates the flow to check ARP traffic sent out from the local gateway interface.
func (c *client) gatewayARPSpoofGuardFlow(gatewayIP net.IP, gatewayMAC net.HardwareAddr, category cookie.Category) binding.Flow {
return SpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchInPort(config.HostGatewayOFPort).
MatchARPSha(gatewayMAC).
MatchARPSpa(gatewayIP).
Action().GotoTable(arpResponderTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// arpSpoofGuardFlow generates the flow to check ARP traffic sent out from local pods interfaces.
func (c *client) arpSpoofGuardFlow(ifIP net.IP, ifMAC net.HardwareAddr, ifOFPort uint32, category cookie.Category) binding.Flow {
return SpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchInPort(ifOFPort).
MatchARPSha(ifMAC).
MatchARPSpa(ifIP).
Action().GotoTable(arpResponderTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// sessionAffinityReselectFlow generates the flow which resubmits the service accessing
// packet back to ServiceLBTable if there is no endpointDNAT flow matched. This
// case will occur if an Endpoint is removed and is the learned Endpoint
// selection of the Service.
func (c *client) sessionAffinityReselectFlow() binding.Flow {
return EndpointDNATTable.BuildFlow(priorityLow).
MatchRegMark(EpSelectedRegMark).
Action().LoadRegMark(EpToSelectRegMark).
Action().ResubmitToTable(ServiceLBTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// gatewayIPSpoofGuardFlow generates the flow to skip spoof guard checking for traffic sent from gateway interface.
func (c *client) gatewayIPSpoofGuardFlows(category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, proto := range c.ipProtocols {
nextTable := SpoofGuardTable.GetNext()
if proto == binding.ProtocolIPv6 {
nextTable = IPv6Table.GetID()
}
flows = append(flows,
SpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(proto).
MatchInPort(config.HostGatewayOFPort).
Action().GotoTable(nextTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
}
return flows
}
// serviceCIDRDNATFlow generates flows to match dst IP in service CIDR and output to host gateway interface directly.
func (c *client) serviceCIDRDNATFlows(serviceCIDRs []*net.IPNet) []binding.Flow {
var flows []binding.Flow
for _, serviceCIDR := range serviceCIDRs {
if serviceCIDR != nil {
ipProto := getIPProtocol(serviceCIDR.IP)
flows = append(flows, DNATTable.BuildFlow(priorityNormal).MatchProtocol(ipProto).
MatchDstIPNet(*serviceCIDR).
Action().LoadToRegField(TargetOFPortField, config.HostGatewayOFPort).
Action().LoadRegMark(OFPortFoundRegMark).
Action().GotoTable(ConntrackCommitTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done())
}
}
return flows
}
// serviceNeedLBFlow generates flows to mark packets as LB needed.
func (c *client) serviceNeedLBFlow() binding.Flow {
return SessionAffinityTable.BuildFlow(priorityMiss).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Action().LoadRegMark(EpToSelectRegMark).
Done()
}
// arpNormalFlow generates the flow to response arp in normal way if no flow in arpResponderTable is matched.
func (c *client) arpNormalFlow(category cookie.Category) binding.Flow {
return arpResponderTable.BuildFlow(priorityLow).MatchProtocol(binding.ProtocolARP).
Action().Normal().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
func (c *client) allowRulesMetricFlows(conjunctionID uint32, ingress bool) []binding.Flow {
metricTable := IngressMetricTable
offset := 0
// We use the 0..31 bits of the ct_label to store the ingress rule ID and use the 32..63 bits to store the
// egress rule ID.
field := IngressRuleCTLabel
if !ingress {
metricTable = EgressMetricTable
offset = 32
field = EgressRuleCTLabel
}
metricFlow := func(isCTNew bool, protocol binding.Protocol) binding.Flow {
return metricTable.BuildFlow(priorityNormal).
MatchProtocol(protocol).
MatchCTStateNew(isCTNew).
MatchCTLabelField(0, uint64(conjunctionID)<<offset, field).
Action().GotoTable(metricTable.GetNext()).
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
var flows []binding.Flow
// These two flows track the number of sessions in addition to the packet and byte counts.
// The flow matching 'ct_state=+new' tracks the number of sessions and byte count of the first packet for each
// session.
// The flow matching 'ct_state=-new' tracks the byte/packet count of an established connection (both directions).
for _, proto := range c.ipProtocols {
flows = append(flows, metricFlow(true, proto), metricFlow(false, proto))
}
return flows
}
func (c *client) denyRuleMetricFlow(conjunctionID uint32, ingress bool) binding.Flow {
metricTable := IngressMetricTable
if !ingress {
metricTable = EgressMetricTable
}
return metricTable.BuildFlow(priorityNormal).
MatchRegMark(CnpDenyRegMark).
MatchRegFieldWithValue(CNPDenyConjIDField, conjunctionID).
Action().Drop().
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
// ipv6Flows generates the flows to allow IPv6 packets from link-local addresses and
// handle multicast packets, Neighbor Solicitation and ND Advertisement packets properly.
func (c *client) ipv6Flows(category cookie.Category) []binding.Flow {
var flows []binding.Flow
_, ipv6LinkLocalIpnet, _ := net.ParseCIDR(ipv6LinkLocalAddr)
_, ipv6MulticastIpnet, _ := net.ParseCIDR(ipv6MulticastAddr)
flows = append(flows,
// Allow IPv6 packets (e.g. Multicast Listener Report Message V2) which are sent from link-local addresses in SpoofGuardTable,
// so that these packets will not be dropped.
SpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIPv6).
MatchSrcIPNet(*ipv6LinkLocalIpnet).
Action().GotoTable(IPv6Table.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Handle IPv6 Neighbor Solicitation and Neighbor Advertisement as a regular L2 learning Switch by using normal.
IPv6Table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolICMPv6).
MatchICMPv6Type(135).
MatchICMPv6Code(0).
Action().Normal().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
IPv6Table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolICMPv6).
MatchICMPv6Type(136).
MatchICMPv6Code(0).
Action().Normal().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Handle IPv6 multicast packets as a regular L2 learning Switch by using normal.
// It is used to ensure that all kinds of IPv6 multicast packets are properly handled (e.g. Multicast Listener Report Message V2).
IPv6Table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIPv6).
MatchDstIPNet(*ipv6MulticastIpnet).
Action().Normal().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
return flows
}
// conjunctionActionFlow generates the flow to jump to a specific table if policyRuleConjunction ID is matched. Priority of
// conjunctionActionFlow is created at priorityLow for k8s network policies, and *priority assigned by PriorityAssigner for AntreaPolicy.
func (c *client) conjunctionActionFlow(conjunctionID uint32, table binding.Table, nextTable uint8, priority *uint16, enableLogging bool) []binding.Flow {
var ofPriority uint16
if priority == nil {
ofPriority = priorityLow
} else {
ofPriority = *priority
}
conjReg := TFIngressConjIDField
labelField := IngressRuleCTLabel
tableID := table.GetID()
if _, ok := egressTables[tableID]; ok {
conjReg = TFEgressConjIDField
labelField = EgressRuleCTLabel
}
conjActionFlow := func(proto binding.Protocol) binding.Flow {
ctZone := CtZone
if proto == binding.ProtocolIPv6 {
ctZone = CtZoneV6
}
if enableLogging {
fb := table.BuildFlow(ofPriority).MatchProtocol(proto).
MatchConjID(conjunctionID)
if c.ovsMetersAreSupported {
fb = fb.Action().Meter(PacketInMeterIDNP)
}
return fb.
Action().LoadToRegField(conjReg, conjunctionID). // Traceflow.
Action().LoadRegMark(DispositionAllowRegMark). // AntreaPolicy.
Action().LoadRegMark(CustomReasonLoggingRegMark). // Enable logging.
Action().SendToController(uint8(PacketInReasonNP)).
Action().CT(true, nextTable, ctZone). // CT action requires commit flag if actions other than NAT without arguments are specified.
LoadToLabelField(uint64(conjunctionID), labelField).
CTDone().
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
return table.BuildFlow(ofPriority).MatchProtocol(proto).
MatchConjID(conjunctionID).
Action().LoadToRegField(conjReg, conjunctionID). // Traceflow.
Action().CT(true, nextTable, ctZone). // CT action requires commit flag if actions other than NAT without arguments are specified.
LoadToLabelField(uint64(conjunctionID), labelField).
CTDone().
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
var flows []binding.Flow
for _, proto := range c.ipProtocols {
flows = append(flows, conjActionFlow(proto))
}
return flows
}
// conjunctionActionDenyFlow generates the flow to mark the packet to be denied
// (dropped or rejected) if policyRuleConjunction ID is matched.
// Any matched flow will be dropped in corresponding metric tables.
func (c *client) conjunctionActionDenyFlow(conjunctionID uint32, table binding.Table, priority *uint16, disposition uint32, enableLogging bool) binding.Flow {
ofPriority := *priority
metricTable := IngressMetricTable
tableID := table.GetID()
if _, ok := egressTables[tableID]; ok {
metricTable = EgressMetricTable
}
flowBuilder := table.BuildFlow(ofPriority).
MatchConjID(conjunctionID).
Action().LoadToRegField(CNPDenyConjIDField, conjunctionID).
Action().LoadRegMark(CnpDenyRegMark)
var customReason int
if c.enableDenyTracking {
customReason += CustomReasonDeny
flowBuilder = flowBuilder.
Action().LoadToRegField(APDispositionField, disposition)
}
if enableLogging {
customReason += CustomReasonLogging
flowBuilder = flowBuilder.
Action().LoadToRegField(APDispositionField, disposition)
}
if disposition == DispositionRej {
customReason += CustomReasonReject
}
if enableLogging || c.enableDenyTracking || disposition == DispositionRej {
if c.ovsMetersAreSupported {
flowBuilder = flowBuilder.Action().Meter(PacketInMeterIDNP)
}
flowBuilder = flowBuilder.
Action().LoadToRegField(CustomReasonField, uint32(customReason)).
Action().SendToController(uint8(PacketInReasonNP))
}
// We do not drop the packet immediately but send the packet to the metric table to update the rule metrics.
return flowBuilder.Action().GotoTable(metricTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
func (c *client) Disconnect() error {
return c.bridge.Disconnect()
}
func newFlowCategoryCache() *flowCategoryCache {
return &flowCategoryCache{}
}
// establishedConnectionFlows generates flows to ensure established connections skip the NetworkPolicy rules.
func (c *client) establishedConnectionFlows(category cookie.Category) (flows []binding.Flow) {
// egressDropTable checks the source address of packets, and drops packets sent from the AppliedToGroup but not
// matching the NetworkPolicy rules. Packets in the established connections need not to be checked with the
// egressRuleTable or the egressDropTable.
egressDropTable := EgressDefaultTable
// ingressDropTable checks the destination address of packets, and drops packets sent to the AppliedToGroup but not
// matching the NetworkPolicy rules. Packets in the established connections need not to be checked with the
// ingressRuleTable or ingressDropTable.
ingressDropTable := IngressDefaultTable
var allEstFlows []binding.Flow
for _, ipProto := range c.ipProtocols {
egressEstFlow := EgressRuleTable.BuildFlow(priorityHigh).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
ingressEstFlow := IngressRuleTable.BuildFlow(priorityHigh).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
allEstFlows = append(allEstFlows, egressEstFlow, ingressEstFlow)
}
if !c.enableAntreaPolicy {
return allEstFlows
}
apFlows := make([]binding.Flow, 0)
for _, table := range GetAntreaPolicyEgressTables() {
for _, ipProto := range c.ipProtocols {
apEgressEstFlow := table.BuildFlow(priorityTopAntreaPolicy).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
apFlows = append(apFlows, apEgressEstFlow)
}
}
for _, table := range GetAntreaPolicyIngressTables() {
for _, ipProto := range c.ipProtocols {
apIngressEstFlow := table.BuildFlow(priorityTopAntreaPolicy).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
apFlows = append(apFlows, apIngressEstFlow)
}
}
allEstFlows = append(allEstFlows, apFlows...)
return allEstFlows
}
// relatedConnectionFlows generates flows to ensure related connections skip the NetworkPolicy rules.
func (c *client) relatedConnectionFlows(category cookie.Category) (flows []binding.Flow) {
// egressDropTable checks the source address of packets, and drops packets sent from the AppliedToGroup but not
// matching the NetworkPolicy rules. Packets in the related connections need not to be checked with the
// egressRuleTable or the egressDropTable.
egressDropTable := EgressDefaultTable
// ingressDropTable checks the destination address of packets, and drops packets sent to the AppliedToGroup but not
// matching the NetworkPolicy rules. Packets in the related connections need not to be checked with the
// ingressRuleTable or ingressDropTable.
ingressDropTable := IngressDefaultTable
var allRelFlows []binding.Flow
for _, ipProto := range c.ipProtocols {
egressRelFlow := EgressRuleTable.BuildFlow(priorityHigh).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateRel(true).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
ingressRelFlow := IngressRuleTable.BuildFlow(priorityHigh).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateRel(true).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
allRelFlows = append(allRelFlows, egressRelFlow, ingressRelFlow)
}
if !c.enableAntreaPolicy {
return allRelFlows
}
apFlows := make([]binding.Flow, 0)
for _, table := range GetAntreaPolicyEgressTables() {
for _, ipProto := range c.ipProtocols {
apEgressRelFlow := table.BuildFlow(priorityTopAntreaPolicy).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateRel(true).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
apFlows = append(apFlows, apEgressRelFlow)
}
}
for _, table := range GetAntreaPolicyIngressTables() {
for _, ipProto := range c.ipProtocols {
apIngressRelFlow := table.BuildFlow(priorityTopAntreaPolicy).MatchProtocol(ipProto).
MatchCTStateNew(false).MatchCTStateRel(true).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
apFlows = append(apFlows, apIngressRelFlow)
}
}
allRelFlows = append(allRelFlows, apFlows...)
return allRelFlows
}
// rejectBypassNetworkpolicyFlows generates flows to ensure reject responses generated
// by the controller skip the NetworkPolicy rules.
func (c *client) rejectBypassNetworkpolicyFlows(category cookie.Category) (flows []binding.Flow) {
// egressDropTable checks the source address of packets, and drops packets sent from the AppliedToGroup but not
// matching the NetworkPolicy rules. Generated reject responses need not to be checked with the
// egressRuleTable or the egressDropTable.
egressDropTable := EgressDefaultTable
// ingressDropTable checks the destination address of packets, and drops packets sent to the AppliedToGroup but not
// matching the NetworkPolicy rules. Generated reject responses need not to be checked with the
// ingressRuleTable or ingressDropTable.
ingressDropTable := IngressDefaultTable
var allRejFlows []binding.Flow
for _, ipProto := range c.ipProtocols {
egressRejFlow := EgressRuleTable.BuildFlow(priorityHigh).MatchProtocol(ipProto).
MatchRegFieldWithValue(CustomReasonField, CustomReasonReject).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
ingressRejFlow := IngressRuleTable.BuildFlow(priorityHigh).MatchProtocol(ipProto).
MatchRegFieldWithValue(CustomReasonField, CustomReasonReject).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
allRejFlows = append(allRejFlows, egressRejFlow, ingressRejFlow)
}
if !c.enableAntreaPolicy {
return allRejFlows
}
apFlows := make([]binding.Flow, 0)
for _, table := range GetAntreaPolicyEgressTables() {
for _, ipProto := range c.ipProtocols {
apEgressRejFlow := table.BuildFlow(priorityTopAntreaPolicy).MatchProtocol(ipProto).
MatchRegFieldWithValue(CustomReasonField, CustomReasonReject).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
apFlows = append(apFlows, apEgressRejFlow)
}
}
for _, table := range GetAntreaPolicyIngressTables() {
for _, ipProto := range c.ipProtocols {
apIngressRejFlow := table.BuildFlow(priorityTopAntreaPolicy).MatchProtocol(ipProto).
MatchRegFieldWithValue(CustomReasonField, CustomReasonReject).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
apFlows = append(apFlows, apIngressRejFlow)
}
}
allRejFlows = append(allRejFlows, apFlows...)
return allRejFlows
}
func (c *client) addFlowMatch(fb binding.FlowBuilder, matchKey *types.MatchKey, matchValue interface{}) binding.FlowBuilder {
switch matchKey {
case MatchDstOFPort:
// ofport number in NXM_NX_REG1 is used in ingress rule to match packets sent to local Pod.
fb = fb.MatchRegFieldWithValue(TargetOFPortField, uint32(matchValue.(int32)))
case MatchSrcOFPort:
fb = fb.MatchInPort(uint32(matchValue.(int32)))
case MatchDstIP:
fallthrough
case MatchDstIPv6:
fb = fb.MatchProtocol(matchKey.GetOFProtocol()).MatchDstIP(matchValue.(net.IP))
case MatchDstIPNet:
fallthrough
case MatchDstIPNetv6:
fb = fb.MatchProtocol(matchKey.GetOFProtocol()).MatchDstIPNet(matchValue.(net.IPNet))
case MatchSrcIP:
fallthrough
case MatchSrcIPv6:
fb = fb.MatchProtocol(matchKey.GetOFProtocol()).MatchSrcIP(matchValue.(net.IP))
case MatchSrcIPNet:
fb = fb.MatchProtocol(matchKey.GetOFProtocol()).MatchSrcIPNet(matchValue.(net.IPNet))
case MatchSrcIPNetv6:
fb = fb.MatchProtocol(matchKey.GetOFProtocol()).MatchSrcIPNet(matchValue.(net.IPNet))
case MatchTCPDstPort:
fallthrough
case MatchTCPv6DstPort:
fallthrough
case MatchUDPDstPort:
fallthrough
case MatchUDPv6DstPort:
fallthrough
case MatchSCTPDstPort:
fallthrough
case MatchSCTPv6DstPort:
fb = fb.MatchProtocol(matchKey.GetOFProtocol())
portValue := matchValue.(types.BitRange)
if portValue.Value > 0 {
fb = fb.MatchDstPort(portValue.Value, portValue.Mask)
}
case MatchTCPSrcPort:
fallthrough
case MatchTCPv6SrcPort:
fallthrough
case MatchUDPSrcPort:
fallthrough
case MatchUDPv6SrcPort:
fb = fb.MatchProtocol(matchKey.GetOFProtocol())
portValue := matchValue.(types.BitRange)
if portValue.Value > 0 {
fb = fb.MatchSrcPort(portValue.Value, portValue.Mask)
}
case MatchServiceGroupID:
fb = fb.MatchRegFieldWithValue(ServiceGroupIDField, matchValue.(uint32))
}
return fb
}
// conjunctionExceptionFlow generates the flow to jump to a specific table if both policyRuleConjunction ID and except address are matched.
// Keeping this for reference to generic exception flow.
func (c *client) conjunctionExceptionFlow(conjunctionID uint32, tableID uint8, nextTable uint8, matchKey *types.MatchKey, matchValue interface{}) binding.Flow {
conjReg := TFIngressConjIDField
if tableID == EgressRuleTable.GetID() {
conjReg = TFEgressConjIDField
}
fb := getTableByID(tableID).BuildFlow(priorityNormal).MatchConjID(conjunctionID)
return c.addFlowMatch(fb, matchKey, matchValue).
Action().LoadToRegField(conjReg, conjunctionID). // Traceflow.
Action().GotoTable(nextTable).
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
// conjunctiveMatchFlow generates the flow to set conjunctive actions if the match condition is matched.
func (c *client) conjunctiveMatchFlow(tableID uint8, matchKey *types.MatchKey, matchValue interface{}, priority *uint16, actions []*conjunctiveAction) binding.Flow {
var ofPriority uint16
if priority != nil {
ofPriority = *priority
} else {
ofPriority = priorityNormal
}
fb := getTableByID(tableID).BuildFlow(ofPriority)
fb = c.addFlowMatch(fb, matchKey, matchValue)
if c.deterministic {
sort.Sort(conjunctiveActionsInOrder(actions))
}
for _, act := range actions {
fb.Action().Conjunction(act.conjID, act.clauseID, act.nClause)
}
return fb.Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).Done()
}
// defaultDropFlow generates the flow to drop packets if the match condition is matched.
func (c *client) defaultDropFlow(table binding.Table, matchKey *types.MatchKey, matchValue interface{}) binding.Flow {
fb := table.BuildFlow(priorityNormal)
if c.enableDenyTracking {
return c.addFlowMatch(fb, matchKey, matchValue).
Action().Drop().
Action().LoadRegMark(DispositionDropRegMark).
Action().LoadRegMark(CustomReasonDenyRegMark).
Action().SendToController(uint8(PacketInReasonNP)).
Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).
Done()
}
return c.addFlowMatch(fb, matchKey, matchValue).
Action().Drop().
Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).
Done()
}
// dnsPacketInFlow generates the flow to send dns response packets of fqdn policy selected
// Pods to the fqdnController for processing.
func (c *client) dnsPacketInFlow(conjunctionID uint32) binding.Flow {
return AntreaPolicyIngressRuleTable.BuildFlow(priorityDNSIntercept).
MatchConjID(conjunctionID).
Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).
Action().LoadToRegField(CustomReasonField, CustomReasonDNS).
Action().SendToController(uint8(PacketInReasonNP)).
Done()
}
// localProbeFlow generates the flow to forward locally generated packets to ConntrackCommitTable, bypassing ingress
// rules of Network Policies. The packets are sent by kubelet to probe the liveness/readiness of local Pods.
// On Linux and when OVS kernel datapath is used, it identifies locally generated packets by matching the
// HostLocalSourceMark, otherwise it matches the source IP. The difference is because:
// 1. On Windows, kube-proxy userspace mode is used, and currently there is no way to distinguish kubelet generated
// traffic from kube-proxy proxied traffic.
// 2. pkt_mark field is not properly supported for OVS userspace (netdev) datapath.
// Note that there is a defect in the latter way that NodePort Service access by external clients will be masqueraded as
// a local gateway IP to bypass Network Policies. See https://github.com/antrea-io/antrea/issues/280.
// TODO: Fix it after replacing kube-proxy with AntreaProxy.
func (c *client) localProbeFlow(localGatewayIPs []net.IP, category cookie.Category) []binding.Flow {
var flows []binding.Flow
if runtime.IsWindowsPlatform() || c.ovsDatapathType == ovsconfig.OVSDatapathNetdev {
for _, ip := range localGatewayIPs {
ipProtocol := getIPProtocol(ip)
flows = append(flows, IngressRuleTable.BuildFlow(priorityHigh).
MatchProtocol(ipProtocol).
MatchSrcIP(ip).
Action().GotoTable(ConntrackCommitTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
}
} else {
flows = append(flows, IngressRuleTable.BuildFlow(priorityHigh).
MatchPktMark(types.HostLocalSourceMark, &types.HostLocalSourceMark).
Action().GotoTable(ConntrackCommitTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done())
}
return flows
}
// snatSkipNodeFlow installs a flow to skip SNAT for traffic to the transport IP of the a remote Node.
func (c *client) snatSkipNodeFlow(nodeIP net.IP, category cookie.Category) binding.Flow {
ipProto := getIPProtocol(nodeIP)
// This flow is for the traffic to the remote Node IP.
return L3ForwardingTable.BuildFlow(priorityNormal).
MatchProtocol(ipProto).
MatchRegMark(FromLocalRegMark).
MatchDstIP(nodeIP).
Action().GotoTable(L3ForwardingTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// snatCommonFlows installs the default flows for performing SNAT for traffic to
// the external network. The flows identify the packets to external, and send
// them to SNATTable, where SNAT IPs are looked up for the packets.
func (c *client) snatCommonFlows(nodeIP net.IP, localSubnet net.IPNet, localGatewayMAC net.HardwareAddr, category cookie.Category) []binding.Flow {
nextTable := L3ForwardingTable.GetNext()
ipProto := getIPProtocol(localSubnet.IP)
flows := []binding.Flow{
// First install flows for traffic that should bypass SNAT.
// This flow is for traffic to the local Pod subnet that don't need MAC rewriting (L2 forwarding case). Other
// traffic to the local Pod subnet will be handled by L3 forwarding rules.
L3ForwardingTable.BuildFlow(priorityNormal).
MatchProtocol(ipProto).
MatchRegFieldWithValue(RewriteMACRegMark.GetField(), 0).
MatchDstIPNet(localSubnet).
Action().GotoTable(nextTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// This flow is for the traffic to the local Node IP.
L3ForwardingTable.BuildFlow(priorityNormal).
MatchProtocol(ipProto).
MatchRegMark(FromLocalRegMark).
MatchDstIP(nodeIP).
Action().GotoTable(nextTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// The return traffic of connections to a local Pod through the gateway interface (so FromGatewayCTMark is set)
// should bypass SNAT too. But it has been covered by the gatewayCT related flow generated in l3FwdFlowToGateway
// which forwards all reply traffic for such connections back to the gateway interface with the high priority.
// Send the traffic to external to SNATTable.
L3ForwardingTable.BuildFlow(priorityLow).
MatchProtocol(ipProto).
MatchRegMark(FromLocalRegMark).
Action().GotoTable(SNATTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// For the traffic tunneled from remote Nodes, rewrite the
// destination MAC to the gateway interface MAC.
L3ForwardingTable.BuildFlow(priorityLow).
MatchProtocol(ipProto).
MatchRegMark(FromTunnelRegMark).
Action().SetDstMAC(localGatewayMAC).
Action().GotoTable(SNATTable.GetID()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Drop the traffic from remote Nodes if no matched SNAT policy.
SNATTable.BuildFlow(priorityLow).
MatchProtocol(ipProto).
MatchCTStateNew(true).MatchCTStateTrk(true).
MatchRegMark(FromTunnelRegMark).
Action().Drop().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
}
return flows
}
// snatIPFromTunnelFlow generates a flow that marks SNAT packets tunnelled from
// remote Nodes. The SNAT IP matches the packet's tunnel destination IP.
func (c *client) snatIPFromTunnelFlow(snatIP net.IP, mark uint32) binding.Flow {
ipProto := getIPProtocol(snatIP)
return SNATTable.BuildFlow(priorityNormal).
MatchProtocol(ipProto).
MatchCTStateNew(true).MatchCTStateTrk(true).
MatchTunnelDst(snatIP).
Action().LoadPktMarkRange(mark, snatPktMarkRange).
Action().GotoTable(L3DecTTLTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.SNAT).Raw()).
Done()
}
// snatRuleFlow generates a flow that applies the SNAT rule for a local Pod. If
// the SNAT IP exists on the local Node, it sets the packet mark with the ID of
// the SNAT IP, for the traffic from the ofPort to external; if the SNAT IP is
// on a remote Node, it tunnels the packets to the SNAT IP.
func (c *client) snatRuleFlow(ofPort uint32, snatIP net.IP, snatMark uint32, localGatewayMAC net.HardwareAddr) binding.Flow {
ipProto := getIPProtocol(snatIP)
if snatMark != 0 {
// Local SNAT IP.
return SNATTable.BuildFlow(priorityNormal).
MatchProtocol(ipProto).
MatchCTStateNew(true).MatchCTStateTrk(true).
MatchInPort(ofPort).
Action().LoadPktMarkRange(snatMark, snatPktMarkRange).
Action().GotoTable(SNATTable.GetNext()).
Cookie(c.cookieAllocator.Request(cookie.SNAT).Raw()).
Done()
}
// SNAT IP should be on a remote Node.
return SNATTable.BuildFlow(priorityNormal).
MatchProtocol(ipProto).
MatchInPort(ofPort).
Action().SetSrcMAC(localGatewayMAC).
Action().SetDstMAC(GlobalVirtualMAC).
// Set tunnel destination to the SNAT IP.
Action().SetTunnelDst(snatIP).
Action().GotoTable(L3DecTTLTable.GetID()).
Cookie(c.cookieAllocator.Request(cookie.SNAT).Raw()).
Done()
}
// loadBalancerServiceFromOutsideFlow generates the flow to forward LoadBalancer service traffic from outside node
// to gateway. kube-proxy will then handle the traffic.
// This flow is for Windows Node only.
func (c *client) loadBalancerServiceFromOutsideFlow(svcIP net.IP, svcPort uint16, protocol binding.Protocol) binding.Flow {
return UplinkTable.BuildFlow(priorityHigh).
MatchProtocol(protocol).
MatchDstPort(svcPort, nil).
MatchRegMark(FromUplinkRegMark).
MatchDstIP(svcIP).
Action().Output(config.HostGatewayOFPort).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// serviceClassifierFlows generate the flows to match the first packet of Service NodePort and set a bit of a register
// to mark the Service type as NodePort.
func (c *client) serviceClassifierFlows(nodePortAddresses []net.IP, ipProtocol binding.Protocol) []binding.Flow {
virtualServiceIP := config.VirtualServiceIPv4
if ipProtocol == binding.ProtocolIPv6 {
virtualServiceIP = config.VirtualServiceIPv6
}
// Generate flows for every NodePort IP address. The flows are used to match the first packet of Service NodePort from
// Pod.
var flows []binding.Flow
for i := range nodePortAddresses {
flows = append(flows,
ServiceClassifierTable.BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchProtocol(ipProtocol).
MatchDstIP(nodePortAddresses[i]).
Action().LoadRegMark(ToNodePortAddressRegMark).
Done())
}
// Generate flow for the virtual IP. The flow is used to match the first packet of Service NodePort from Antrea gateway,
// because the destination IP of the packet has already performed DNAT with the virtual IP on host.
flows = append(flows,
ServiceClassifierTable.BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchProtocol(ipProtocol).
MatchDstIP(virtualServiceIP).
Action().LoadRegMark(ToNodePortAddressRegMark).
Done())
return flows
}
// serviceLearnFlow generates the flow with learn action which adds new flows in
// sessionAffinityTable according to the Endpoint selection decision.
func (c *client) serviceLearnFlow(groupID binding.GroupIDType, svcIP net.IP, svcPort uint16, protocol binding.Protocol, affinityTimeout uint16, nodeLocalExternal bool, svcType v1.ServiceType) binding.Flow {
// Using unique cookie ID here to avoid learned flow cascade deletion.
cookieID := c.cookieAllocator.RequestWithObjectID(cookie.Service, uint32(groupID)).Raw()
var flowBuilder binding.FlowBuilder
if svcType == v1.ServiceTypeNodePort {
unionVal := (ToNodePortAddressRegMark.GetValue() << ServiceEPStateField.GetRange().Length()) + EpToLearnRegMark.GetValue()
flowBuilder = ServiceLBTable.BuildFlow(priorityLow).
Cookie(cookieID).
MatchRegFieldWithValue(NodePortUnionField, unionVal).
MatchProtocol(protocol).
MatchDstPort(svcPort, nil)
} else {
flowBuilder = ServiceLBTable.BuildFlow(priorityLow).
Cookie(cookieID).
MatchRegMark(EpToLearnRegMark).
MatchDstIP(svcIP).
MatchProtocol(protocol).
MatchDstPort(svcPort, nil)
}
// affinityTimeout is used as the OpenFlow "hard timeout": learned flow will be removed from
// OVS after that time regarding of whether traffic is still hitting the flow. This is the
// desired behavior based on the K8s spec. Note that existing connections will keep going to
// the same endpoint because of connection tracking; and that is also the desired behavior.
learnFlowBuilderLearnAction := flowBuilder.
Action().Learn(SessionAffinityTable.GetID(), priorityNormal, 0, affinityTimeout, cookieID).
DeleteLearned()
ipProtocol := binding.ProtocolIP
switch protocol {
case binding.ProtocolTCP:
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedTCPDstPort()
case binding.ProtocolUDP:
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedUDPDstPort()
case binding.ProtocolSCTP:
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedSCTPDstPort()
case binding.ProtocolTCPv6:
ipProtocol = binding.ProtocolIPv6
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedTCPv6DstPort()
case binding.ProtocolUDPv6:
ipProtocol = binding.ProtocolIPv6
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedUDPv6DstPort()
case binding.ProtocolSCTPv6:
ipProtocol = binding.ProtocolIPv6
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedSCTPv6DstPort()
}
// If externalTrafficPolicy of NodePort/LoadBalancer is Cluster, the learned flow which
// is used to match the first packet of NodePort/LoadBalancer also requires SNAT.
if (svcType == v1.ServiceTypeNodePort || svcType == v1.ServiceTypeLoadBalancer) && !nodeLocalExternal {
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.LoadRegMark(ServiceNeedSNATRegMark)
}
if ipProtocol == binding.ProtocolIP {
return learnFlowBuilderLearnAction.
MatchLearnedDstIP().
MatchLearnedSrcIP().
LoadFieldToField(EndpointIPField, EndpointIPField).
LoadFieldToField(EndpointPortField, EndpointPortField).
LoadRegMark(EpSelectedRegMark).
LoadRegMark(RewriteMACRegMark).
Done().
Action().LoadRegMark(EpSelectedRegMark).
Action().GotoTable(EndpointDNATTable.GetID()).
Done()
} else if ipProtocol == binding.ProtocolIPv6 {
return learnFlowBuilderLearnAction.
MatchLearnedDstIPv6().
MatchLearnedSrcIPv6().
LoadXXRegToXXReg(EndpointIP6Field, EndpointIP6Field).
LoadFieldToField(EndpointPortField, EndpointPortField).
LoadRegMark(EpSelectedRegMark).
LoadRegMark(RewriteMACRegMark).
Done().
Action().LoadRegMark(EpSelectedRegMark).
Action().GotoTable(EndpointDNATTable.GetID()).
Done()
}
return nil
}
// serviceLBFlow generates the flow which uses the specific group to do Endpoint
// selection.
func (c *client) serviceLBFlow(groupID binding.GroupIDType, svcIP net.IP, svcPort uint16, protocol binding.Protocol, withSessionAffinity, nodeLocalExternal bool, svcType v1.ServiceType) binding.Flow {
var lbResultMark *binding.RegMark
if withSessionAffinity {
lbResultMark = EpToLearnRegMark
} else {
lbResultMark = EpSelectedRegMark
}
var flowBuilder binding.FlowBuilder
if svcType == v1.ServiceTypeNodePort {
// If externalTrafficPolicy of NodePort is Cluster, the first packet of NodePort requires SNAT, so nodeLocalExternal
// will be false, and ServiceNeedSNATRegMark will be set. If externalTrafficPolicy of NodePort is Local, the first
// packet of NodePort doesn't require SNAT, ServiceNeedSNATRegMark won't be set.
unionVal := (ToNodePortAddressRegMark.GetValue() << ServiceEPStateField.GetRange().Length()) + EpToSelectRegMark.GetValue()
flowBuilder = ServiceLBTable.BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchProtocol(protocol).
MatchRegFieldWithValue(NodePortUnionField, unionVal).
MatchDstPort(svcPort, nil).
Action().LoadRegMark(lbResultMark).
Action().LoadRegMark(RewriteMACRegMark)
if !nodeLocalExternal {
flowBuilder = flowBuilder.Action().LoadRegMark(ServiceNeedSNATRegMark)
}
} else {
// If Service type is LoadBalancer, as above NodePort.
flowBuilder = ServiceLBTable.BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchProtocol(protocol).
MatchDstPort(svcPort, nil).
MatchDstIP(svcIP).
MatchRegMark(EpToSelectRegMark).
Action().LoadRegMark(lbResultMark).
Action().LoadRegMark(RewriteMACRegMark)
if svcType == v1.ServiceTypeLoadBalancer && !nodeLocalExternal {
flowBuilder = flowBuilder.Action().LoadRegMark(ServiceNeedSNATRegMark)
}
}
return flowBuilder.
Action().LoadToRegField(ServiceGroupIDField, uint32(groupID)).
Action().Group(groupID).Done()
}
// endpointDNATFlow generates the flow which transforms the Service Cluster IP
// to the Endpoint IP according to the Endpoint selection decision which is stored
// in regs.
func (c *client) endpointDNATFlow(endpointIP net.IP, endpointPort uint16, protocol binding.Protocol) binding.Flow {
ipProtocol := getIPProtocol(endpointIP)
unionVal := (EpSelectedRegMark.GetValue() << EndpointPortField.GetRange().Length()) + uint32(endpointPort)
flowBuilder := EndpointDNATTable.BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchRegFieldWithValue(EpUnionField, unionVal).
MatchProtocol(protocol)
ctZone := CtZone
if ipProtocol == binding.ProtocolIP {
ipVal := binary.BigEndian.Uint32(endpointIP.To4())
flowBuilder = flowBuilder.MatchRegFieldWithValue(EndpointIPField, ipVal)
} else {
ctZone = CtZoneV6
ipVal := []byte(endpointIP)
flowBuilder = flowBuilder.MatchXXReg(EndpointIP6Field.GetRegID(), ipVal)
}
return flowBuilder.Action().CT(true, EndpointDNATTable.GetNext(), ctZone).
DNAT(
&binding.IPRange{StartIP: endpointIP, EndIP: endpointIP},
&binding.PortRange{StartPort: endpointPort, EndPort: endpointPort},
).
LoadToCtMark(ServiceCTMark).
CTDone().
Done()
}
// hairpinSNATFlow generates the flow which does SNAT for Service
// hairpin packets and loads the hairpin mark to markReg.
func (c *client) hairpinSNATFlow(endpointIP net.IP) binding.Flow {
ipProtocol := getIPProtocol(endpointIP)
hpIP := hairpinIP
if ipProtocol == binding.ProtocolIPv6 {
hpIP = hairpinIPv6
}
return HairpinSNATTable.BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchProtocol(ipProtocol).
MatchDstIP(endpointIP).
MatchSrcIP(endpointIP).
Action().SetSrcIP(hpIP).
Action().LoadRegMark(HairpinRegMark).
Action().GotoTable(L2ForwardingOutTable.GetID()).
Done()
}
// serviceEndpointGroup creates/modifies the group/buckets of Endpoints. If the
// withSessionAffinity is true, then buckets will resubmit packets back to
// ServiceLBTable to trigger the learn flow, the learn flow will then send packets
// to EndpointDNATTable. Otherwise, buckets will resubmit packets to
// EndpointDNATTable directly.
func (c *client) serviceEndpointGroup(groupID binding.GroupIDType, withSessionAffinity bool, endpoints ...proxy.Endpoint) binding.Group {
group := c.bridge.CreateGroup(groupID).ResetBuckets()
var resubmitTableID uint8
if withSessionAffinity {
resubmitTableID = ServiceLBTable.GetID()
} else {
resubmitTableID = EndpointDNATTable.GetID()
}
for _, endpoint := range endpoints {
endpointPort, _ := endpoint.Port()
endpointIP := net.ParseIP(endpoint.IP())
portVal := portToUint16(endpointPort)
ipProtocol := getIPProtocol(endpointIP)
if ipProtocol == binding.ProtocolIP {
ipVal := binary.BigEndian.Uint32(endpointIP.To4())
group = group.Bucket().Weight(100).
LoadToRegField(EndpointIPField, ipVal).
LoadToRegField(EndpointPortField, uint32(portVal)).
ResubmitToTable(resubmitTableID).
Done()
} else if ipProtocol == binding.ProtocolIPv6 {
ipVal := []byte(endpointIP)
group = group.Bucket().Weight(100).
LoadXXReg(EndpointIP6Field.GetRegID(), ipVal).
LoadToRegField(EndpointPortField, uint32(portVal)).
ResubmitToTable(resubmitTableID).
Done()
}
}
return group
}
// decTTLFlows decrements TTL by one for the packets forwarded across Nodes.
// The TTL decrement should be skipped for the packets which enter OVS pipeline
// from the gateway interface, as the host IP stack should have decremented the
// TTL already for such packets.
func (c *client) decTTLFlows(category cookie.Category) []binding.Flow {
var flows []binding.Flow
for _, proto := range c.ipProtocols {
flows = append(flows,
// Skip packets from the gateway interface.
L3DecTTLTable.BuildFlow(priorityHigh).
Cookie(c.cookieAllocator.Request(category).Raw()).
MatchProtocol(proto).
MatchRegMark(FromGatewayRegMark).
Action().GotoTable(L3DecTTLTable.GetNext()).
Done(),
L3DecTTLTable.BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(category).Raw()).
MatchProtocol(proto).
Action().DecTTL().
Action().GotoTable(L3DecTTLTable.GetNext()).
Done(),
)
}
return flows
}
// externalFlows returns the flows needed to enable SNAT for external traffic.
func (c *client) externalFlows(nodeIP net.IP, localSubnet net.IPNet, localGatewayMAC net.HardwareAddr) []binding.Flow {
if !c.enableEgress {
return nil
}
return c.snatCommonFlows(nodeIP, localSubnet, localGatewayMAC, cookie.SNAT)
}
// policyConjKeyFuncKeyFunc knows how to get key of a *policyRuleConjunction.
func policyConjKeyFunc(obj interface{}) (string, error) {
conj := obj.(*policyRuleConjunction)
return fmt.Sprint(conj.id), nil
}
// priorityIndexFunc knows how to get priority of actionFlows in a *policyRuleConjunction.
// It's provided to cache.Indexer to build an index of policyRuleConjunction.
func priorityIndexFunc(obj interface{}) ([]string, error) {
conj := obj.(*policyRuleConjunction)
return conj.ActionFlowPriorities(), nil
}
// genPacketInMeter generates a meter entry with specific meterID and rate.
// `rate` is represented as number of packets per second.
// Packets which exceed the rate will be dropped.
func (c *client) genPacketInMeter(meterID binding.MeterIDType, rate uint32) binding.Meter {
meter := c.bridge.CreateMeter(meterID, ofctrl.MeterBurst|ofctrl.MeterPktps).ResetMeterBands()
meter = meter.MeterBand().
MeterType(ofctrl.MeterDrop).
Rate(rate).
Burst(2 * rate).
Done()
return meter
}
func (c *client) generatePipeline() {
c.createOFTable(ClassifierTable, SpoofGuardTable.GetID(), binding.TableMissActionDrop)
c.createOFTable(arpResponderTable, binding.LastTableID, binding.TableMissActionDrop)
c.createOFTable(ConntrackTable, ConntrackStateTable.GetID(), binding.TableMissActionNone)
c.createOFTable(EgressRuleTable, EgressDefaultTable.GetID(), binding.TableMissActionNext)
c.createOFTable(EgressDefaultTable, EgressMetricTable.GetID(), binding.TableMissActionNext)
c.createOFTable(EgressMetricTable, L3ForwardingTable.GetID(), binding.TableMissActionNext)
c.createOFTable(L3ForwardingTable, L2ForwardingCalcTable.GetID(), binding.TableMissActionNext)
c.createOFTable(L3DecTTLTable, L2ForwardingCalcTable.GetID(), binding.TableMissActionNext)
// Packets from L2ForwardingCalcTable should be forwarded to IngressMetricTable by default to collect ingress stats.
c.createOFTable(L2ForwardingCalcTable, IngressMetricTable.GetID(), binding.TableMissActionNext)
c.createOFTable(IngressRuleTable, IngressDefaultTable.GetID(), binding.TableMissActionNext)
c.createOFTable(IngressDefaultTable, IngressMetricTable.GetID(), binding.TableMissActionNext)
c.createOFTable(IngressMetricTable, ConntrackCommitTable.GetID(), binding.TableMissActionNext)
c.createOFTable(L2ForwardingOutTable, binding.LastTableID, binding.TableMissActionDrop)
if c.enableProxy {
SpoofGuardTable = c.createOFTable(SpoofGuardTable, ServiceHairpinTable.GetID(), binding.TableMissActionDrop)
IPv6Table = c.createOFTable(IPv6Table, ServiceHairpinTable.GetID(), binding.TableMissActionNext)
if c.proxyAll {
ServiceHairpinTable = c.createOFTable(ServiceHairpinTable, ServiceConntrackTable.GetID(), binding.TableMissActionNext)
ServiceConntrackTable = c.createOFTable(ServiceConntrackTable, ConntrackTable.GetID(), binding.TableMissActionNext)
ServiceClassifierTable = c.createOFTable(ServiceClassifierTable, binding.LastTableID, binding.TableMissActionNone)
ServiceConntrackCommitTable = c.createOFTable(ServiceConntrackCommitTable, HairpinSNATTable.GetID(), binding.TableMissActionNext)
} else {
ServiceHairpinTable = c.createOFTable(ServiceHairpinTable, ConntrackTable.GetID(), binding.TableMissActionNext)
}
ConntrackStateTable = c.createOFTable(ConntrackStateTable, EndpointDNATTable.GetID(), binding.TableMissActionNext)
SessionAffinityTable = c.createOFTable(SessionAffinityTable, binding.LastTableID, binding.TableMissActionNone)
ServiceLBTable = c.createOFTable(ServiceLBTable, EndpointDNATTable.GetID(), binding.TableMissActionNext)
EndpointDNATTable = c.createOFTable(EndpointDNATTable, c.egressEntryTable, binding.TableMissActionNext)
ConntrackCommitTable = c.createOFTable(ConntrackCommitTable, HairpinSNATTable.GetID(), binding.TableMissActionNext)
HairpinSNATTable = c.createOFTable(HairpinSNATTable, L2ForwardingOutTable.GetID(), binding.TableMissActionNext)
} else {
c.createOFTable(SpoofGuardTable, ConntrackTable.GetID(), binding.TableMissActionDrop)
c.createOFTable(IPv6Table, ConntrackTable.GetID(), binding.TableMissActionNext)
c.createOFTable(ConntrackStateTable, DNATTable.GetID(), binding.TableMissActionNext)
c.createOFTable(DNATTable, c.egressEntryTable, binding.TableMissActionNext)
c.createOFTable(ConntrackCommitTable, L2ForwardingOutTable.GetID(), binding.TableMissActionNext)
}
// The default SNAT is implemented with OVS on Windows.
if c.enableEgress || runtime.IsWindowsPlatform() {
c.createOFTable(SNATTable, L2ForwardingCalcTable.GetID(), binding.TableMissActionNext)
}
if runtime.IsWindowsPlatform() {
c.createOFTable(UplinkTable, SpoofGuardTable.GetID(), binding.TableMissActionNone)
}
if c.enableAntreaPolicy {
c.createOFTable(AntreaPolicyEgressRuleTable, EgressRuleTable.GetID(), binding.TableMissActionNext)
c.createOFTable(AntreaPolicyIngressRuleTable, IngressRuleTable.GetID(), binding.TableMissActionNext)
}
}
// createOFTable sets the missAction and the next table ID of the given table according to the pipeline. Then it creates the table on the bridge. At last, it adds the table into the ofTableCache.
func (c *client) createOFTable(table binding.Table, nextID uint8, missAction binding.MissActionType) binding.Table {
c.bridge.CreateTable(table, nextID, missAction)
ofTableCache.Add(table)
return table
}
// NewClient is the constructor of the Client interface.
func NewClient(bridgeName string,
mgmtAddr string,
ovsDatapathType ovsconfig.OVSDatapathType,
enableProxy bool,
enableAntreaPolicy bool,
enableEgress bool,
enableDenyTracking bool,
proxyAll bool) Client {
bridge := binding.NewOFBridge(bridgeName, mgmtAddr)
policyCache := cache.NewIndexer(
policyConjKeyFunc,
cache.Indexers{priorityIndex: priorityIndexFunc},
)
c := &client{
bridge: bridge,
enableProxy: enableProxy,
proxyAll: proxyAll,
enableAntreaPolicy: enableAntreaPolicy,
enableDenyTracking: enableDenyTracking,
enableEgress: enableEgress,
nodeFlowCache: newFlowCategoryCache(),
podFlowCache: newFlowCategoryCache(),
serviceFlowCache: newFlowCategoryCache(),
tfFlowCache: newFlowCategoryCache(),
policyCache: policyCache,
groupCache: sync.Map{},
globalConjMatchFlowCache: map[string]*conjMatchFlowContext{},
packetInHandlers: map[uint8]map[string]PacketInHandler{},
ovsctlClient: ovsctl.NewClient(bridgeName),
ovsDatapathType: ovsDatapathType,
ovsMetersAreSupported: ovsMetersAreSupported(ovsDatapathType),
}
c.ofEntryOperations = c
if enableAntreaPolicy {
c.egressEntryTable, c.ingressEntryTable = AntreaPolicyEgressRuleTable.GetID(), AntreaPolicyIngressRuleTable.GetID()
} else {
c.egressEntryTable, c.ingressEntryTable = EgressRuleTable.GetID(), IngressRuleTable.GetID()
}
if enableEgress {
c.snatFlowCache = newFlowCategoryCache()
}
c.generatePipeline()
return c
}
type conjunctiveActionsInOrder []*conjunctiveAction
func (sl conjunctiveActionsInOrder) Len() int { return len(sl) }
func (sl conjunctiveActionsInOrder) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
func (sl conjunctiveActionsInOrder) Less(i, j int) bool {
if sl[i].conjID != sl[j].conjID {
return sl[i].conjID < sl[j].conjID
}
if sl[i].clauseID != sl[j].clauseID {
return sl[i].clauseID < sl[j].clauseID
}
return sl[i].nClause < sl[j].nClause
}
| 1 | 45,021 | Do we still need this? | antrea-io-antrea | go |
@@ -108,9 +108,6 @@ public abstract class AbstractEeaSendRawTransaction implements JsonRpcMethod {
final Transaction privateMarkerTransaction =
createPrivateMarkerTransaction(privateTransaction, user);
- LOG.error("CHEESE");
- LOG.error(privateMarkerTransaction.getHash());
-
return transactionPool
.addLocalTransaction(privateMarkerTransaction)
.either( | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.eea;
import static org.apache.logging.log4j.LogManager.getLogger;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcEnclaveErrorConverter.convertEnclaveInvalidReason;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcErrorConverter.convertTransactionInvalidReason;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.DECODE_ERROR;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.JsonRpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.PrivacyIdProvider;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.privacy.PrivateTransaction;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.rlp.RLPException;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import org.hyperledger.besu.ethereum.util.NonceProvider;
import org.hyperledger.besu.plugin.data.TransactionType;
import org.hyperledger.besu.plugin.services.privacy.PrivateMarkerTransactionFactory;
import java.util.Optional;
import java.util.concurrent.locks.Lock;
import com.google.common.util.concurrent.Striped;
import io.vertx.ext.auth.User;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
public abstract class AbstractEeaSendRawTransaction implements JsonRpcMethod {
private static final Logger LOG = getLogger();
private final TransactionPool transactionPool;
private final PrivacyIdProvider privacyIdProvider;
private final PrivateMarkerTransactionFactory privateMarkerTransactionFactory;
private final NonceProvider publicNonceProvider;
private static final int MAX_CONCURRENT_PMT_SIGNATURE_REQUESTS = 10;
private final Striped<Lock> stripedLock =
Striped.lazyWeakLock(MAX_CONCURRENT_PMT_SIGNATURE_REQUESTS);
protected AbstractEeaSendRawTransaction(
final TransactionPool transactionPool,
final PrivacyIdProvider privacyIdProvider,
final PrivateMarkerTransactionFactory privateMarkerTransactionFactory,
final NonceProvider publicNonceProvider) {
this.transactionPool = transactionPool;
this.privacyIdProvider = privacyIdProvider;
this.privateMarkerTransactionFactory = privateMarkerTransactionFactory;
this.publicNonceProvider = publicNonceProvider;
}
@Override
public String getName() {
return RpcMethod.EEA_SEND_RAW_TRANSACTION.getMethodName();
}
@Override
public JsonRpcResponse response(final JsonRpcRequestContext requestContext) {
final Object id = requestContext.getRequest().getId();
final Optional<User> user = requestContext.getUser();
final String rawPrivateTransaction = requestContext.getRequiredParameter(0, String.class);
try {
final PrivateTransaction privateTransaction =
PrivateTransaction.readFrom(RLP.input(Bytes.fromHexString(rawPrivateTransaction)));
final ValidationResult<TransactionInvalidReason> validationResult =
validatePrivateTransaction(privateTransaction, user);
if (!validationResult.isValid()) {
return new JsonRpcErrorResponse(
id, convertTransactionInvalidReason(validationResult.getInvalidReason()));
}
final org.hyperledger.besu.plugin.data.Address sender =
privateMarkerTransactionFactory.getSender(
privateTransaction, privacyIdProvider.getPrivacyUserId(user));
// We lock by sender address so that we can send multiple requests from the same address
// the request will be blocked until the nonce for that sender has been calculated and the
// transaction
// has completed submission to the transaction pool
final Lock lock = stripedLock.get(sender.toShortHexString());
lock.lock();
try {
final Transaction privateMarkerTransaction =
createPrivateMarkerTransaction(privateTransaction, user);
LOG.error("CHEESE");
LOG.error(privateMarkerTransaction.getHash());
return transactionPool
.addLocalTransaction(privateMarkerTransaction)
.either(
() -> new JsonRpcSuccessResponse(id, privateMarkerTransaction.getHash().toString()),
errorReason -> getJsonRpcErrorResponse(id, errorReason));
} finally {
lock.unlock();
}
} catch (final JsonRpcErrorResponseException e) {
return new JsonRpcErrorResponse(id, e.getJsonRpcError());
} catch (final IllegalArgumentException | RLPException e) {
LOG.error(e);
return new JsonRpcErrorResponse(id, DECODE_ERROR);
} catch (final Exception e) {
return new JsonRpcErrorResponse(id, convertEnclaveInvalidReason(e.getMessage()));
}
}
JsonRpcErrorResponse getJsonRpcErrorResponse(
final Object id, final TransactionInvalidReason errorReason) {
if (errorReason.equals(TransactionInvalidReason.INTRINSIC_GAS_EXCEEDS_GAS_LIMIT)) {
return new JsonRpcErrorResponse(id, JsonRpcError.PMT_FAILED_INTRINSIC_GAS_EXCEEDS_LIMIT);
}
return new JsonRpcErrorResponse(id, convertTransactionInvalidReason(errorReason));
}
protected abstract ValidationResult<TransactionInvalidReason> validatePrivateTransaction(
final PrivateTransaction privateTransaction, final Optional<User> user);
protected abstract Transaction createPrivateMarkerTransaction(
final PrivateTransaction privateTransaction, final Optional<User> user);
protected Transaction createPrivateMarkerTransaction(
final Address privacyPrecompileAddress,
final String pmtPayload,
final PrivateTransaction privateTransaction,
final String privacyUserId) {
final Address sender =
Address.fromPlugin(
privateMarkerTransactionFactory.getSender(privateTransaction, privacyUserId));
final long nonce = publicNonceProvider.getNonce(sender);
final Transaction unsignedPrivateMarkerTransaction =
new Transaction.Builder()
.type(TransactionType.FRONTIER)
.nonce(nonce)
.gasPrice(privateTransaction.getGasPrice())
.gasLimit(getGasLimit(privateTransaction, pmtPayload))
.to(privacyPrecompileAddress)
.value(Wei.ZERO)
.payload(Bytes.fromBase64String(pmtPayload))
.build();
final Bytes rlpBytes =
privateMarkerTransactionFactory.create(
unsignedPrivateMarkerTransaction, privateTransaction, privacyUserId);
return Transaction.readFrom(rlpBytes);
}
protected abstract long getGasLimit(PrivateTransaction privateTransaction, String pmtPayload);
}
| 1 | 25,540 | We do have a disconnect between the privateMarkerTransactionFactory.getSender() call and the createPrivateMarkerTransaction() call. When the plugin receives the call to create the PMT it does not know what the sender was returned by the other call. At a minimum we need to pass in the sender to the second call. I would prefer that the plugin does the locking if that is necessary. If you are using random keys you don't have to lock! Besu could signal to the plugin when a PMT was un- or successfully added to the pool using a callback. Callback could be null if this is not needed. The plugin would have to be able to get the nonce for a certain key from besu. What do you think? | hyperledger-besu | java |
@@ -217,7 +217,7 @@ import 'css!./imageeditor';
reload(context, null, focusContext);
}, function () {
import('alert').then(({default: alert}) => {
- alert(globalize.translate('DefaultErrorMessage'));
+ alert(globalize.translate('MessageDefaultError'));
});
});
} | 1 | import dialogHelper from 'dialogHelper';
import connectionManager from 'connectionManager';
import loading from 'loading';
import dom from 'dom';
import layoutManager from 'layoutManager';
import focusManager from 'focusManager';
import globalize from 'globalize';
import scrollHelper from 'scrollHelper';
import imageLoader from 'imageLoader';
import browser from 'browser';
import appHost from 'apphost';
import 'cardStyle';
import 'formDialogStyle';
import 'emby-button';
import 'paper-icon-button-light';
import 'css!./imageeditor';
/* eslint-disable indent */
const enableFocusTransform = !browser.slow && !browser.edge;
let currentItem;
let hasChanges = false;
function getBaseRemoteOptions() {
const options = {};
options.itemId = currentItem.Id;
return options;
}
function reload(page, item, focusContext) {
loading.show();
let apiClient;
if (item) {
apiClient = connectionManager.getApiClient(item.ServerId);
reloadItem(page, item, apiClient, focusContext);
} else {
apiClient = connectionManager.getApiClient(currentItem.ServerId);
apiClient.getItem(apiClient.getCurrentUserId(), currentItem.Id).then(function (item) {
reloadItem(page, item, apiClient, focusContext);
});
}
}
function addListeners(container, className, eventName, fn) {
container.addEventListener(eventName, function (e) {
const elem = dom.parentWithClass(e.target, className);
if (elem) {
fn.call(elem, e);
}
});
}
function reloadItem(page, item, apiClient, focusContext) {
currentItem = item;
apiClient.getRemoteImageProviders(getBaseRemoteOptions()).then(function (providers) {
const btnBrowseAllImages = page.querySelectorAll('.btnBrowseAllImages');
for (let i = 0, length = btnBrowseAllImages.length; i < length; i++) {
if (providers.length) {
btnBrowseAllImages[i].classList.remove('hide');
} else {
btnBrowseAllImages[i].classList.add('hide');
}
}
apiClient.getItemImageInfos(currentItem.Id).then(function (imageInfos) {
renderStandardImages(page, apiClient, item, imageInfos, providers);
renderBackdrops(page, apiClient, item, imageInfos, providers);
renderScreenshots(page, apiClient, item, imageInfos, providers);
loading.hide();
if (layoutManager.tv) {
focusManager.autoFocus((focusContext || page));
}
});
});
}
function getImageUrl(item, apiClient, type, index, options) {
options = options || {};
options.type = type;
options.index = index;
if (type === 'Backdrop') {
options.tag = item.BackdropImageTags[index];
} else if (type === 'Screenshot') {
options.tag = item.ScreenshotImageTags[index];
} else if (type === 'Primary') {
options.tag = item.PrimaryImageTag || item.ImageTags[type];
} else {
options.tag = item.ImageTags[type];
}
// For search hints
return apiClient.getScaledImageUrl(item.Id || item.ItemId, options);
}
function getCardHtml(image, index, numImages, apiClient, imageProviders, imageSize, tagName, enableFooterButtons) {
// TODO move card creation code to Card component
let html = '';
let cssClass = 'card scalableCard imageEditorCard';
const cardBoxCssClass = 'cardBox visualCardBox';
cssClass += ' backdropCard backdropCard-scalable';
if (tagName === 'button') {
cssClass += ' btnImageCard';
if (layoutManager.tv) {
cssClass += ' show-focus';
if (enableFocusTransform) {
cssClass += ' show-animation';
}
}
html += '<button type="button" class="' + cssClass + '"';
} else {
html += '<div class="' + cssClass + '"';
}
html += ' data-id="' + currentItem.Id + '" data-serverid="' + apiClient.serverId() + '" data-index="' + index + '" data-numimages="' + numImages + '" data-imagetype="' + image.ImageType + '" data-providers="' + imageProviders.length + '"';
html += '>';
html += '<div class="' + cardBoxCssClass + '">';
html += '<div class="cardScalable visualCardBox-cardScalable" style="background-color:transparent;">';
html += '<div class="cardPadder-backdrop"></div>';
html += '<div class="cardContent">';
const imageUrl = getImageUrl(currentItem, apiClient, image.ImageType, image.ImageIndex, { maxWidth: imageSize });
html += '<div class="cardImageContainer" style="background-image:url(\'' + imageUrl + '\');background-position:center center;background-size:contain;"></div>';
html += '</div>';
html += '</div>';
html += '<div class="cardFooter visualCardBox-cardFooter">';
html += '<h3 class="cardText cardTextCentered" style="margin:0;">' + globalize.translate('' + image.ImageType) + '</h3>';
html += '<div class="cardText cardText-secondary cardTextCentered">';
if (image.Width && image.Height) {
html += image.Width + ' X ' + image.Height;
} else {
html += ' ';
}
html += '</div>';
if (enableFooterButtons) {
html += '<div class="cardText cardTextCentered">';
if (image.ImageType === 'Backdrop' || image.ImageType === 'Screenshot') {
if (index > 0) {
html += '<button type="button" is="paper-icon-button-light" class="btnMoveImage autoSize" data-imagetype="' + image.ImageType + '" data-index="' + image.ImageIndex + '" data-newindex="' + (image.ImageIndex - 1) + '" title="' + globalize.translate('MoveLeft') + '"><span class="material-icons chevron_left"></span></button>';
} else {
html += '<button type="button" is="paper-icon-button-light" class="autoSize" disabled title="' + globalize.translate('MoveLeft') + '"><span class="material-icons chevron_left"></span></button>';
}
if (index < numImages - 1) {
html += '<button type="button" is="paper-icon-button-light" class="btnMoveImage autoSize" data-imagetype="' + image.ImageType + '" data-index="' + image.ImageIndex + '" data-newindex="' + (image.ImageIndex + 1) + '" title="' + globalize.translate('MoveRight') + '"><span class="material-icons chevron_right"></span></button>';
} else {
html += '<button type="button" is="paper-icon-button-light" class="autoSize" disabled title="' + globalize.translate('MoveRight') + '"><span class="material-icons chevron_right"></span></button>';
}
} else {
if (imageProviders.length) {
html += '<button type="button" is="paper-icon-button-light" data-imagetype="' + image.ImageType + '" class="btnSearchImages autoSize" title="' + globalize.translate('Search') + '"><span class="material-icons search"></span></button>';
}
}
html += '<button type="button" is="paper-icon-button-light" data-imagetype="' + image.ImageType + '" data-index="' + (image.ImageIndex != null ? image.ImageIndex : 'null') + '" class="btnDeleteImage autoSize" title="' + globalize.translate('Delete') + '"><span class="material-icons delete"></span></button>';
html += '</div>';
}
html += '</div>';
html += '</div>';
html += '</' + tagName + '>';
return html;
}
function deleteImage(context, itemId, type, index, apiClient, enableConfirmation) {
const afterConfirm = function () {
apiClient.deleteItemImage(itemId, type, index).then(function () {
hasChanges = true;
reload(context);
});
};
if (!enableConfirmation) {
afterConfirm();
return;
}
import('confirm').then(({default: confirm}) => {
confirm({
text: globalize.translate('ConfirmDeleteImage'),
confirmText: globalize.translate('Delete'),
primary: 'delete'
}).then(afterConfirm);
});
}
function moveImage(context, apiClient, itemId, type, index, newIndex, focusContext) {
apiClient.updateItemImageIndex(itemId, type, index, newIndex).then(function () {
hasChanges = true;
reload(context, null, focusContext);
}, function () {
import('alert').then(({default: alert}) => {
alert(globalize.translate('DefaultErrorMessage'));
});
});
}
function renderImages(page, item, apiClient, images, imageProviders, elem) {
let html = '';
let imageSize = 300;
const windowSize = dom.getWindowSize();
if (windowSize.innerWidth >= 1280) {
imageSize = Math.round(windowSize.innerWidth / 4);
}
const tagName = layoutManager.tv ? 'button' : 'div';
const enableFooterButtons = !layoutManager.tv;
for (let i = 0, length = images.length; i < length; i++) {
const image = images[i];
html += getCardHtml(image, i, length, apiClient, imageProviders, imageSize, tagName, enableFooterButtons);
}
elem.innerHTML = html;
imageLoader.lazyChildren(elem);
}
function renderStandardImages(page, apiClient, item, imageInfos, imageProviders) {
const images = imageInfos.filter(function (i) {
return i.ImageType !== 'Screenshot' && i.ImageType !== 'Backdrop' && i.ImageType !== 'Chapter';
});
renderImages(page, item, apiClient, images, imageProviders, page.querySelector('#images'));
}
function renderBackdrops(page, apiClient, item, imageInfos, imageProviders) {
const images = imageInfos.filter(function (i) {
return i.ImageType === 'Backdrop';
}).sort(function (a, b) {
return a.ImageIndex - b.ImageIndex;
});
if (images.length) {
page.querySelector('#backdropsContainer', page).classList.remove('hide');
renderImages(page, item, apiClient, images, imageProviders, page.querySelector('#backdrops'));
} else {
page.querySelector('#backdropsContainer', page).classList.add('hide');
}
}
function renderScreenshots(page, apiClient, item, imageInfos, imageProviders) {
const images = imageInfos.filter(function (i) {
return i.ImageType === 'Screenshot';
}).sort(function (a, b) {
return a.ImageIndex - b.ImageIndex;
});
if (images.length) {
page.querySelector('#screenshotsContainer', page).classList.remove('hide');
renderImages(page, item, apiClient, images, imageProviders, page.querySelector('#screenshots'));
} else {
page.querySelector('#screenshotsContainer', page).classList.add('hide');
}
}
function showImageDownloader(page, imageType) {
import('imageDownloader').then(({default: ImageDownloader}) => {
ImageDownloader.show(currentItem.Id, currentItem.ServerId, currentItem.Type, imageType).then(function () {
hasChanges = true;
reload(page);
});
});
}
function showActionSheet(context, imageCard) {
const itemId = imageCard.getAttribute('data-id');
const serverId = imageCard.getAttribute('data-serverid');
const apiClient = connectionManager.getApiClient(serverId);
const type = imageCard.getAttribute('data-imagetype');
const index = parseInt(imageCard.getAttribute('data-index'));
const providerCount = parseInt(imageCard.getAttribute('data-providers'));
const numImages = parseInt(imageCard.getAttribute('data-numimages'));
import('actionsheet').then(({default: actionSheet}) => {
const commands = [];
commands.push({
name: globalize.translate('Delete'),
id: 'delete'
});
if (type === 'Backdrop' || type === 'Screenshot') {
if (index > 0) {
commands.push({
name: globalize.translate('MoveLeft'),
id: 'moveleft'
});
}
if (index < numImages - 1) {
commands.push({
name: globalize.translate('MoveRight'),
id: 'moveright'
});
}
}
if (providerCount) {
commands.push({
name: globalize.translate('Search'),
id: 'search'
});
}
actionSheet.show({
items: commands,
positionTo: imageCard
}).then(function (id) {
switch (id) {
case 'delete':
deleteImage(context, itemId, type, index, apiClient, false);
break;
case 'search':
showImageDownloader(context, type);
break;
case 'moveleft':
moveImage(context, apiClient, itemId, type, index, index - 1, dom.parentWithClass(imageCard, 'itemsContainer'));
break;
case 'moveright':
moveImage(context, apiClient, itemId, type, index, index + 1, dom.parentWithClass(imageCard, 'itemsContainer'));
break;
default:
break;
}
});
});
}
function initEditor(context, options) {
const uploadButtons = context.querySelectorAll('.btnOpenUploadMenu');
const isFileInputSupported = appHost.supports('fileinput');
for (let i = 0, length = uploadButtons.length; i < length; i++) {
if (isFileInputSupported) {
uploadButtons[i].classList.remove('hide');
} else {
uploadButtons[i].classList.add('hide');
}
}
addListeners(context, 'btnOpenUploadMenu', 'click', function () {
const imageType = this.getAttribute('data-imagetype');
import('imageUploader').then(({default: imageUploader}) => {
imageUploader.show({
theme: options.theme,
imageType: imageType,
itemId: currentItem.Id,
serverId: currentItem.ServerId
}).then(function (hasChanged) {
if (hasChanged) {
hasChanges = true;
reload(context);
}
});
});
});
addListeners(context, 'btnSearchImages', 'click', function () {
showImageDownloader(context, this.getAttribute('data-imagetype'));
});
addListeners(context, 'btnBrowseAllImages', 'click', function () {
showImageDownloader(context, this.getAttribute('data-imagetype') || 'Primary');
});
addListeners(context, 'btnImageCard', 'click', function () {
showActionSheet(context, this);
});
addListeners(context, 'btnDeleteImage', 'click', function () {
const type = this.getAttribute('data-imagetype');
let index = this.getAttribute('data-index');
index = index === 'null' ? null : parseInt(index);
const apiClient = connectionManager.getApiClient(currentItem.ServerId);
deleteImage(context, currentItem.Id, type, index, apiClient, true);
});
addListeners(context, 'btnMoveImage', 'click', function () {
const type = this.getAttribute('data-imagetype');
const index = this.getAttribute('data-index');
const newIndex = this.getAttribute('data-newindex');
const apiClient = connectionManager.getApiClient(currentItem.ServerId);
moveImage(context, apiClient, currentItem.Id, type, index, newIndex, dom.parentWithClass(this, 'itemsContainer'));
});
}
function showEditor(options, resolve, reject) {
const itemId = options.itemId;
const serverId = options.serverId;
loading.show();
import('text!./imageeditor.template.html').then(({default: template}) => {
const apiClient = connectionManager.getApiClient(serverId);
apiClient.getItem(apiClient.getCurrentUserId(), itemId).then(function (item) {
const dialogOptions = {
removeOnClose: true
};
if (layoutManager.tv) {
dialogOptions.size = 'fullscreen';
} else {
dialogOptions.size = 'small';
}
const dlg = dialogHelper.createDialog(dialogOptions);
dlg.classList.add('formDialog');
dlg.innerHTML = globalize.translateHtml(template, 'core');
if (layoutManager.tv) {
scrollHelper.centerFocus.on(dlg, false);
}
initEditor(dlg, options);
// Has to be assigned a z-index after the call to .open()
dlg.addEventListener('close', function () {
if (layoutManager.tv) {
scrollHelper.centerFocus.off(dlg, false);
}
loading.hide();
if (hasChanges) {
resolve();
} else {
reject();
}
});
dialogHelper.open(dlg);
reload(dlg, item);
dlg.querySelector('.btnCancel').addEventListener('click', function () {
dialogHelper.close(dlg);
});
});
});
}
export function show (options) {
return new Promise(function (resolve, reject) {
hasChanges = false;
showEditor(options, resolve, reject);
});
}
export default {
show
};
/* eslint-enable indent */
| 1 | 17,051 | This is a bit inconsistent with the `ErrorStartHourGreaterThanEnd` in the previous file. Wouldn't `ErrorDefault``fit better here? | jellyfin-jellyfin-web | js |
@@ -35,12 +35,13 @@ import (
"antrea.io/antrea/pkg/features"
)
-func skipIfNetworkPolicyStatsDisabled(tb testing.TB, data *TestData) {
- skipIfFeatureDisabled(tb, data, features.NetworkPolicyStats, true, true)
+func skipIfNetworkPolicyStatsDisabled(tb testing.TB) {
+ skipIfFeatureDisabled(tb, features.NetworkPolicyStats, true, true)
}
-func TestNetworkPolicyStats(t *testing.T) {
- skipIfNotIPv4Cluster(t)
+// TestNetworkPolicy is the top-level test which contains all subtests for
+// NetworkPolicy related test cases so they can share setup, teardown.
+func TestNetworkPolicy(t *testing.T) {
skipIfHasWindowsNodes(t)
data, err := setupTest(t) | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"encoding/json"
"fmt"
"net"
"sync"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"antrea.io/antrea/pkg/agent/apiserver/handlers/agentinfo"
"antrea.io/antrea/pkg/apis/crd/v1beta1"
"antrea.io/antrea/pkg/apis/stats/v1alpha1"
"antrea.io/antrea/pkg/features"
)
func skipIfNetworkPolicyStatsDisabled(tb testing.TB, data *TestData) {
skipIfFeatureDisabled(tb, data, features.NetworkPolicyStats, true, true)
}
func TestNetworkPolicyStats(t *testing.T) {
skipIfNotIPv4Cluster(t)
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
skipIfNetworkPolicyStatsDisabled(t, data)
serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace)
defer cleanupFunc()
clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace)
defer cleanupFunc()
// When using the userspace OVS datapath and tunneling,
// the first IP packet sent on a tunnel is always dropped because of a missing ARP entry.
// So we need to "warm-up" the tunnel.
if clusterInfo.podV4NetworkCIDR != "" {
cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())}
data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd)
}
if clusterInfo.podV6NetworkCIDR != "" {
cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())}
data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd)
}
np1, err := data.createNetworkPolicy("test-networkpolicy-ingress", &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": clientName,
},
}},
},
}},
})
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np1); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
}()
np2, err := data.createNetworkPolicy("test-networkpolicy-egress", &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
Egress: []networkingv1.NetworkPolicyEgressRule{{
To: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": serverName,
},
}},
},
}},
})
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np2); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
}()
// Wait for a few seconds in case that connections are established before policies are enforced.
time.Sleep(2 * time.Second)
sessionsPerAddressFamily := 10
var wg sync.WaitGroup
for i := 0; i < sessionsPerAddressFamily; i++ {
wg.Add(1)
go func() {
if clusterInfo.podV4NetworkCIDR != "" {
cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())}
data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd)
}
if clusterInfo.podV6NetworkCIDR != "" {
cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())}
data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd)
}
wg.Done()
}()
}
wg.Wait()
totalSessions := 0
if clusterInfo.podV4NetworkCIDR != "" {
totalSessions += sessionsPerAddressFamily
}
if clusterInfo.podV6NetworkCIDR != "" {
totalSessions += sessionsPerAddressFamily
}
if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) {
var ingressStats *v1alpha1.NetworkPolicyStats
for _, np := range []string{"test-networkpolicy-ingress", "test-networkpolicy-egress"} {
stats, err := data.crdClient.StatsV1alpha1().NetworkPolicyStats(testNamespace).Get(context.TODO(), np, metav1.GetOptions{})
if err != nil {
return false, err
}
t.Logf("Got NetworkPolicy stats: %v", stats)
if ingressStats != nil {
if stats.TrafficStats.Packets != ingressStats.TrafficStats.Packets {
return false, nil
}
if stats.TrafficStats.Bytes != ingressStats.TrafficStats.Bytes {
return false, nil
}
} else {
ingressStats = stats
}
if stats.TrafficStats.Sessions != int64(totalSessions) {
return false, nil
}
if stats.TrafficStats.Packets < stats.TrafficStats.Sessions || stats.TrafficStats.Bytes < stats.TrafficStats.Sessions {
return false, fmt.Errorf("Neither 'Packets' nor 'Bytes' should be smaller than 'Sessions'")
}
}
return true, nil
}); err != nil {
t.Fatalf("Error when waiting for NetworkPolicy stats: %v", err)
}
}
func TestDifferentNamedPorts(t *testing.T) {
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
checkFn, cleanupFn := data.setupDifferentNamedPorts(t)
defer cleanupFn()
checkFn()
}
func (data *TestData) setupDifferentNamedPorts(t *testing.T) (checkFn func(), cleanupFn func()) {
var success bool
var cleanupFuncs []func()
cleanupFn = func() {
for i := len(cleanupFuncs) - 1; i >= 0; i-- {
cleanupFuncs[i]()
}
}
// Call cleanupFn only if the function fails. In case of success, we will call cleanupFn in callers.
defer func() {
if !success {
cleanupFn()
}
}()
server0Port := int32(80)
server0Name, server0IPs, cleanupFunc := createAndWaitForPod(t, data, func(name string, ns string, nodeName string) error {
return data.createServerPod(name, testNamespace, "http", server0Port, false, false)
}, "test-server-", "", testNamespace)
cleanupFuncs = append(cleanupFuncs, cleanupFunc)
server1Port := int32(8080)
server1Name, server1IPs, cleanupFunc := createAndWaitForPod(t, data, func(name string, ns string, nodeName string) error {
return data.createServerPod(name, testNamespace, "http", server1Port, false, false)
}, "test-server-", "", testNamespace)
cleanupFuncs = append(cleanupFuncs, cleanupFunc)
client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace)
cleanupFuncs = append(cleanupFuncs, cleanupFunc)
client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace)
cleanupFuncs = append(cleanupFuncs, cleanupFunc)
preCheckFunc := func(server0IP, server1IP string) {
// Both clients can connect to both servers.
for _, clientName := range []string{client0Name, client1Name} {
if err := data.runNetcatCommandFromTestPod(clientName, testNamespace, server0IP, server0Port); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(server0IP, fmt.Sprint(server0Port)))
}
if err := data.runNetcatCommandFromTestPod(clientName, testNamespace, server1IP, server1Port); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(server1IP, fmt.Sprint(server1Port)))
}
}
}
// Precondition check: client is able to access server with the given IP address.
if clusterInfo.podV4NetworkCIDR != "" {
preCheckFunc(server0IPs.ipv4.String(), server1IPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
preCheckFunc(server0IPs.ipv6.String(), server1IPs.ipv6.String())
}
if testOptions.providerName == "kind" {
// Due to netdev datapath bug, sometimes datapath flows are not flushed after new openflows that change the
// actions are installed, causing client1 to still be able to connect to the servers after creating a policy
// that disallows it. The test waits for 10 seconds so that the datapath flows will expire.
// See https://github.com/antrea-io/antrea/issues/1608 for more details.
time.Sleep(10 * time.Second)
}
// Create NetworkPolicy rule.
spec := &networkingv1.NetworkPolicySpec{
// Apply to two server Pods.
PodSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "antrea-e2e",
Operator: metav1.LabelSelectorOpIn,
Values: []string{server0Name, server1Name},
},
}},
// Allow client0 to access named port: "http".
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: "http"},
}},
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": client0Name,
},
}},
},
}},
}
np, err := data.createNetworkPolicy(randName("test-networkpolicy-allow-client0-to-http"), spec)
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
cleanupFuncs = append(cleanupFuncs, func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
})
npCheck := func(server0IP, server1IP string) {
server0Address := net.JoinHostPort(server0IP, fmt.Sprint(server0Port))
server1Address := net.JoinHostPort(server1IP, fmt.Sprint(server1Port))
// client0 can connect to both servers.
if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server0IP, server0Port); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, server0Address)
}
if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server1IP, server1Port); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, server1Address)
}
// client1 cannot connect to both servers.
if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server0IP, server0Port); err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, server0Address)
}
if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server1IP, server1Port); err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, server1Address)
}
}
checkFn = func() {
// NetworkPolicy check.
if clusterInfo.podV4NetworkCIDR != "" {
npCheck(server0IPs.ipv4.String(), server1IPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(server0IPs.ipv6.String(), server1IPs.ipv6.String())
}
}
success = true
return
}
// TestDefaultDenyIngressPolicy performs additional validation to the upstream test for deny-all policy:
// 1. The traffic initiated from the host network namespace cannot be dropped.
// 2. The traffic initiated externally that access the Pod via NodePort service can be dropped (skipped if provider is kind).
func TestDefaultDenyIngressPolicy(t *testing.T) {
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
serverNode := workerNodeName(1)
serverNodeIP := workerNodeIP(1)
serverPort := int32(80)
_, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, testNamespace)
defer cleanupFunc()
service, err := data.createService("nginx", serverPort, serverPort, map[string]string{"app": "nginx"}, false, corev1.ServiceTypeNodePort, nil)
if err != nil {
t.Fatalf("Error when creating nginx NodePort service: %v", err)
}
defer data.deleteService(service.Name)
// client1 is a host network Pod and is on the same node as the server Pod, simulating kubelet probe traffic.
client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createHostNetworkBusyboxPodOnNode, "test-hostnetwork-client-", serverNode, testNamespace)
defer cleanupFunc()
// client2 is a host network Pod and is on a different node from the server Pod, accessing the server Pod via the NodePort service.
client2Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createHostNetworkBusyboxPodOnNode, "test-hostnetwork-client-", controlPlaneNodeName(), testNamespace)
defer cleanupFunc()
spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{},
}
np, err := data.createNetworkPolicy("test-networkpolicy-deny-all-ingress", spec)
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
}()
npCheck := func(clientName, serverIP string, serverPort int32, wantErr bool) {
if err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); wantErr && err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))
} else if !wantErr && err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))
}
}
// Locally generated traffic can always access the Pods regardless of NetworkPolicy configuration.
if clusterInfo.podV4NetworkCIDR != "" {
npCheck(client1Name, serverIPs.ipv4.String(), serverPort, false)
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(client1Name, serverIPs.ipv6.String(), serverPort, false)
}
if testOptions.providerName == "kind" {
t.Logf("Skipped testing NodePort traffic for TestDefaultDenyIngressPolicy because pkt_mark is not properly supported on OVS netdev datapath")
} else {
if clusterInfo.podV4NetworkCIDR != "" {
npCheck(client2Name, serverIPs.ipv4.String(), serverPort, true)
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(client2Name, serverIPs.ipv6.String(), serverPort, true)
}
npCheck(client2Name, serverNodeIP, service.Spec.Ports[0].NodePort, true)
}
}
func TestDefaultDenyEgressPolicy(t *testing.T) {
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
serverPort := int32(80)
_, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace)
defer cleanupFunc()
clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace)
defer cleanupFunc()
preCheckFunc := func(serverIP string) {
if err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))
}
}
if clusterInfo.podV4NetworkCIDR != "" {
preCheckFunc(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
preCheckFunc(serverIPs.ipv6.String())
}
spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
Egress: []networkingv1.NetworkPolicyEgressRule{},
}
np, err := data.createNetworkPolicy("test-networkpolicy-deny-all-egress", spec)
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
}()
npCheck := func(serverIP string) {
if err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))
}
}
if clusterInfo.podV4NetworkCIDR != "" {
npCheck(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(serverIPs.ipv6.String())
}
}
// TestEgressToServerInCIDRBlock is a duplicate of upstream test case "should allow egress access to server in CIDR block
// [Feature:NetworkPolicy]", which is currently buggy in v1.19 release for clusters which use IPv6.
// This should be deleted when upstream is updated.
// https://github.com/kubernetes/kubernetes/blob/v1.20.0-alpha.0/test/e2e/network/network_policy.go#L1365
// https://github.com/kubernetes/kubernetes/pull/93583
func TestEgressToServerInCIDRBlock(t *testing.T) {
skipIfNotIPv6Cluster(t)
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
workerNode := workerNodeName(1)
serverAName, serverAIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace)
defer cleanupFunc()
serverBName, serverBIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace)
defer cleanupFunc()
clientA, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace)
defer cleanupFunc()
var serverCIDR string
var serverAIP, serverBIP string
if serverAIPs.ipv6 == nil {
t.Fatal("server IPv6 address is empty")
}
serverCIDR = fmt.Sprintf("%s/128", serverAIPs.ipv6.String())
serverAIP = serverAIPs.ipv6.String()
serverBIP = serverBIPs.ipv6.String()
if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err != nil {
t.Fatalf("%s should be able to netcat %s", clientA, serverAName)
}
if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverBIP, 80); err != nil {
t.Fatalf("%s should be able to netcat %s", clientA, serverBName)
}
np, err := data.createNetworkPolicy("allow-client-a-via-cidr-egress-rule", &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": clientA,
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &networkingv1.IPBlock{
CIDR: serverCIDR,
},
},
},
},
},
})
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
cleanupNP := func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Errorf("Error when deleting network policy: %v", err)
}
}
defer cleanupNP()
if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err != nil {
t.Fatalf("%s should be able to netcat %s", clientA, serverAName)
}
if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverBIP, 80); err == nil {
t.Fatalf("%s should not be able to netcat %s", clientA, serverBName)
}
}
// TestEgressToServerInCIDRBlockWithException is a duplicate of upstream test case "should allow egress access to server
// in CIDR block [Feature:NetworkPolicy]", which is currently buggy in v1.19 release for clusters which use IPv6.
// This should be deleted when upstream is updated.
// https://github.com/kubernetes/kubernetes/blob/v1.20.0-alpha.0/test/e2e/network/network_policy.go#L1444
// https://github.com/kubernetes/kubernetes/pull/93583
func TestEgressToServerInCIDRBlockWithException(t *testing.T) {
skipIfNotIPv6Cluster(t)
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
workerNode := workerNodeName(1)
serverAName, serverAIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace)
defer cleanupFunc()
clientA, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace)
defer cleanupFunc()
var serverAAllowCIDR string
var serverAExceptList []string
var serverAIP string
if serverAIPs.ipv6 == nil {
t.Fatal("server IPv6 address is empty")
}
_, serverAAllowSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/%d", serverAIPs.ipv6.String(), 64))
if err != nil {
t.Fatalf("could not parse allow subnet")
}
serverAAllowCIDR = serverAAllowSubnet.String()
serverAExceptList = []string{fmt.Sprintf("%s/%d", serverAIPs.ipv6.String(), 128)}
serverAIP = serverAIPs.ipv6.String()
if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err != nil {
t.Fatalf("%s should be able to netcat %s", clientA, serverAName)
}
np, err := data.createNetworkPolicy("deny-client-a-via-except-cidr-egress-rule", &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": clientA,
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &networkingv1.IPBlock{
CIDR: serverAAllowCIDR,
Except: serverAExceptList,
},
},
},
},
},
})
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
cleanupNP := func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Errorf("Error when deleting network policy: %v", err)
}
}
defer cleanupNP()
if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err == nil {
t.Fatalf("%s should not be able to netcat %s", clientA, serverAName)
}
}
func TestNetworkPolicyResyncAfterRestart(t *testing.T) {
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
workerNode := workerNodeName(1)
antreaPod, err := data.getAntreaPodOnNode(workerNode)
if err != nil {
t.Fatalf("Error when getting antrea-agent pod name: %v", err)
}
server0Name, server0IPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace)
defer cleanupFunc()
server1Name, server1IPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace)
defer cleanupFunc()
client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace)
defer cleanupFunc()
client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace)
defer cleanupFunc()
netpol0, err := data.createNetworkPolicy("test-isolate-server0", &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": server0Name,
},
},
})
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
cleanupNetpol0 := func() {
if netpol0 == nil {
return
}
if err = data.deleteNetworkpolicy(netpol0); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
netpol0 = nil
}
defer cleanupNetpol0()
preCheckFunc := func(server0IP, server1IP string) {
if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server0IP, 80); err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client0Name, server0Name)
}
if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server1IP, 80); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client1Name, server1Name)
}
}
if clusterInfo.podV4NetworkCIDR != "" {
preCheckFunc(server0IPs.ipv4.String(), server1IPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
preCheckFunc(server0IPs.ipv6.String(), server1IPs.ipv6.String())
}
scaleFunc := func(replicas int32) {
scale, err := data.clientset.AppsV1().Deployments(antreaNamespace).GetScale(context.TODO(), antreaDeployment, metav1.GetOptions{})
if err != nil {
t.Fatalf("error when getting scale of Antrea Deployment: %v", err)
}
scale.Spec.Replicas = replicas
if _, err := data.clientset.AppsV1().Deployments(antreaNamespace).UpdateScale(context.TODO(), antreaDeployment, scale, metav1.UpdateOptions{}); err != nil {
t.Fatalf("error when scaling Antrea Deployment to %d: %v", replicas, err)
}
}
// Scale antrea-controller to 0 so antrea-agent will lose connection with antrea-controller.
scaleFunc(0)
defer scaleFunc(1)
// Make sure antrea-agent disconnects from antrea-controller.
waitForAgentCondition(t, data, antreaPod, v1beta1.ControllerConnectionUp, corev1.ConditionFalse)
// Remove netpol0, we expect client0 can connect server0 after antrea-controller is up.
cleanupNetpol0()
// Create netpol1, we expect client1 cannot connect server1 after antrea-controller is up.
netpol1, err := data.createNetworkPolicy("test-isolate-server1", &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": server1Name,
},
},
})
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(netpol1); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
}()
// Scale antrea-controller to 1 so antrea-agent will connect to antrea-controller.
scaleFunc(1)
// Make sure antrea-agent connects to antrea-controller.
waitForAgentCondition(t, data, antreaPod, v1beta1.ControllerConnectionUp, corev1.ConditionTrue)
npCheck := func(server0IP, server1IP string) {
if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server0IP, 80); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, server0Name)
}
if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server1IP, 80); err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, server1Name)
}
}
if clusterInfo.podV4NetworkCIDR != "" {
npCheck(server0IPs.ipv4.String(), server1IPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(server0IPs.ipv6.String(), server1IPs.ipv6.String())
}
}
func TestIngressPolicyWithoutPortNumber(t *testing.T) {
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
serverPort := int32(80)
_, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace)
defer cleanupFunc()
client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace)
defer cleanupFunc()
client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace)
defer cleanupFunc()
preCheckFunc := func(serverIP string) {
// Both clients can connect to server.
for _, clientName := range []string{client0Name, client1Name} {
if err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))
}
}
}
if clusterInfo.podV4NetworkCIDR != "" {
preCheckFunc(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
preCheckFunc(serverIPs.ipv6.String())
}
protocol := corev1.ProtocolTCP
spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Protocol: &protocol,
},
},
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": client0Name,
},
}},
},
},
},
}
np, err := data.createNetworkPolicy("test-networkpolicy-ingress-no-portnumber", spec)
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
}()
npCheck := func(serverIP string) {
serverAddress := net.JoinHostPort(serverIP, fmt.Sprint(serverPort))
// Client0 can access server.
if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, serverIP, serverPort); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, serverAddress)
}
// Client1 can't access server.
if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, serverIP, serverPort); err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, serverAddress)
}
}
if clusterInfo.podV4NetworkCIDR != "" {
npCheck(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(serverIPs.ipv6.String())
}
}
func TestIngressPolicyWithEndPort(t *testing.T) {
skipIfHasWindowsNodes(t)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
serverPort := int32(80)
serverEndPort := int32(84)
policyPort := int32(81)
policyEndPort := int32(83)
var serverPorts []int32
for i := serverPort; i <= serverEndPort; i++ {
serverPorts = append(serverPorts, i)
}
// makeContainerSpec creates a Container listening on a specific port.
makeContainerSpec := func(port int32) corev1.Container {
return corev1.Container{
Name: fmt.Sprintf("c%d", port),
ImagePullPolicy: corev1.PullIfNotPresent,
Image: agnhostImage,
Command: []string{"/bin/bash", "-c"},
Args: []string{fmt.Sprintf("/agnhost serve-hostname --tcp --http=false --port=%d", port)},
Ports: []corev1.ContainerPort{
{
ContainerPort: port,
Name: fmt.Sprintf("serve-%d", port),
Protocol: corev1.ProtocolTCP,
},
},
}
}
// createAgnhostPodOnNodeWithMultiPort creates a Pod in the test namespace with
// multiple agnhost containers listening on multiple ports.
// The Pod will be scheduled on the specified Node (if nodeName is not empty).
createAgnhostPodOnNodeWithMultiPort := func(name string, ns string, nodeName string) error {
var containers []corev1.Container
for _, port := range serverPorts {
containers = append(containers, makeContainerSpec(port))
}
podSpec := corev1.PodSpec{
Containers: containers,
RestartPolicy: corev1.RestartPolicyNever,
HostNetwork: false,
}
if nodeName != "" {
podSpec.NodeSelector = map[string]string{
"kubernetes.io/hostname": nodeName,
}
}
if nodeName == controlPlaneNodeName() {
// tolerate NoSchedule taint if we want Pod to run on control-plane Node
noScheduleToleration := controlPlaneNoScheduleToleration()
podSpec.Tolerations = []corev1.Toleration{noScheduleToleration}
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"antrea-e2e": name,
"app": getImageName(agnhostImage),
},
},
Spec: podSpec,
}
if _, err := data.clientset.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
return err
}
return nil
}
serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, createAgnhostPodOnNodeWithMultiPort, "test-server-", "", testNamespace)
defer cleanupFunc()
clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace)
defer cleanupFunc()
preCheck := func(serverIP string) {
// The client can connect to server on all ports.
for _, port := range serverPorts {
if err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, port); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port)))
}
}
}
if clusterInfo.podV4NetworkCIDR != "" {
preCheck(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
preCheck(serverIPs.ipv6.String())
}
protocol := corev1.ProtocolTCP
spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": serverName,
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Protocol: &protocol,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: policyPort},
EndPort: &policyEndPort,
},
},
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": clientName,
},
}},
},
},
},
}
np, err := data.createNetworkPolicy("test-networkpolicy-ingress-with-endport", spec)
if err != nil {
t.Fatalf("Error when creating NetworkPolicy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Errorf("Error when deleting NetworkPolicy: %v", err)
}
}()
if np.Spec.Ingress[0].Ports[0].EndPort == nil {
t.Skipf("Skipping test as the kube-apiserver doesn't support `endPort` " +
"or `NetworkPolicyEndPort` feature-gate is not enabled.")
}
npCheck := func(serverIP string) {
for _, port := range serverPorts {
err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, port)
if port >= policyPort && port <= policyEndPort {
if err != nil {
t.Errorf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port)))
}
} else if err == nil {
t.Errorf("Pod %s should be not able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port)))
}
}
}
if clusterInfo.podV4NetworkCIDR != "" {
npCheck(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(serverIPs.ipv6.String())
}
}
func createAndWaitForPod(t *testing.T, data *TestData, createFunc func(name string, ns string, nodeName string) error, namePrefix string, nodeName string, ns string) (string, *PodIPs, func()) {
name := randName(namePrefix)
if err := createFunc(name, ns, nodeName); err != nil {
t.Fatalf("Error when creating busybox test Pod: %v", err)
}
cleanupFunc := func() {
deletePodWrapper(t, data, name)
}
podIP, err := data.podWaitForIPs(defaultTimeout, name, ns)
if err != nil {
cleanupFunc()
t.Fatalf("Error when waiting for IP for Pod '%s': %v", name, err)
}
return name, podIP, cleanupFunc
}
func createAndWaitForPodWithLabels(t *testing.T, data *TestData, createFunc func(name, ns string, portNum int32, labels map[string]string) error, name, ns string, portNum int32, labels map[string]string) (string, *PodIPs, func() error) {
if err := createFunc(name, ns, portNum, labels); err != nil {
t.Fatalf("Error when creating busybox test Pod: %v", err)
}
cleanupFunc := func() error {
if err := data.deletePod(ns, name); err != nil {
return fmt.Errorf("error when deleting Pod: %v", err)
}
return nil
}
podIP, err := data.podWaitForIPs(defaultTimeout, name, ns)
if err != nil {
cleanupFunc()
t.Fatalf("Error when waiting for IP for Pod '%s': %v", name, err)
}
return name, podIP, cleanupFunc
}
func waitForAgentCondition(t *testing.T, data *TestData, podName string, conditionType v1beta1.AgentConditionType, expectedStatus corev1.ConditionStatus) {
if err := wait.Poll(defaultInterval, defaultTimeout, func() (bool, error) {
cmds := []string{"antctl", "get", "agentinfo", "-o", "json"}
t.Logf("cmds: %s", cmds)
stdout, _, err := runAntctl(podName, cmds, data)
if err != nil {
return true, err
}
var agentInfo agentinfo.AntreaAgentInfoResponse
err = json.Unmarshal([]byte(stdout), &agentInfo)
if err != nil {
return true, err
}
for _, condition := range agentInfo.AgentConditions {
if condition.Type == conditionType && condition.Status == expectedStatus {
return true, nil
}
}
return false, nil
}); err != nil {
t.Fatalf("Error when waiting for condition '%s'=='%s': %v", conditionType, expectedStatus, err)
}
}
| 1 | 39,097 | I think it will revert the change @antoninbas did on purpose 05eee251c9c53400277def576f92d614ca234898 | antrea-io-antrea | go |
@@ -614,6 +614,8 @@ func (m *bpfEndpointManager) attachDataIfaceProgram(ifaceName string, polDirecti
epType := tc.EpTypeHost
if ifaceName == "tunl0" {
epType = tc.EpTypeTunnel
+ } else if ifaceName == "wireguard.cali" {
+ epType = tc.EpTypeWireguard
}
ap := m.calculateTCAttachPoint(epType, polDirection, ifaceName)
ap.HostIP = m.hostIP | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"net"
"os/exec"
"regexp"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/polprog"
"github.com/projectcalico/felix/bpf/tc"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/ifacemonitor"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/set"
"github.com/projectcalico/felix/proto"
)
type epIface struct {
ifacemonitor.State
jumpMapFD map[PolDirection]bpf.MapFD
}
type bpfEndpointManager struct {
// Caches. Updated immediately for now.
wlEps map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint
policies map[proto.PolicyID]*proto.Policy
profiles map[proto.ProfileID]*proto.Profile
ifaces map[string]epIface
// Indexes
policiesToWorkloads map[proto.PolicyID]set.Set /*proto.WorkloadEndpointID*/
profilesToWorkloads map[proto.ProfileID]set.Set /*proto.WorkloadEndpointID*/
dirtyWorkloads set.Set
dirtyIfaces set.Set
bpfLogLevel string
hostname string
hostIP net.IP
fibLookupEnabled bool
dataIfaceRegex *regexp.Regexp
ipSetIDAlloc *idalloc.IDAllocator
epToHostDrop bool
vxlanMTU int
dsrEnabled bool
ipSetMap bpf.Map
stateMap bpf.Map
}
func newBPFEndpointManager(
bpfLogLevel string,
hostname string,
fibLookupEnabled bool,
epToHostDrop bool,
dataIfaceRegex *regexp.Regexp,
ipSetIDAlloc *idalloc.IDAllocator,
vxlanMTU int,
dsrEnabled bool,
ipSetMap bpf.Map,
stateMap bpf.Map,
) *bpfEndpointManager {
return &bpfEndpointManager{
wlEps: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{},
policies: map[proto.PolicyID]*proto.Policy{},
profiles: map[proto.ProfileID]*proto.Profile{},
ifaces: map[string]epIface{},
policiesToWorkloads: map[proto.PolicyID]set.Set{},
profilesToWorkloads: map[proto.ProfileID]set.Set{},
dirtyWorkloads: set.New(),
dirtyIfaces: set.New(),
bpfLogLevel: bpfLogLevel,
hostname: hostname,
fibLookupEnabled: fibLookupEnabled,
dataIfaceRegex: dataIfaceRegex,
ipSetIDAlloc: ipSetIDAlloc,
epToHostDrop: epToHostDrop,
vxlanMTU: vxlanMTU,
dsrEnabled: dsrEnabled,
ipSetMap: ipSetMap,
stateMap: stateMap,
}
}
func (m *bpfEndpointManager) OnUpdate(msg interface{}) {
switch msg := msg.(type) {
// Updates from the dataplane:
// Interface updates.
case *ifaceUpdate:
m.onInterfaceUpdate(msg)
// Updates from the datamodel:
// Workloads.
case *proto.WorkloadEndpointUpdate:
m.onWorkloadEndpointUpdate(msg)
case *proto.WorkloadEndpointRemove:
m.onWorkloadEnpdointRemove(msg)
// Policies.
case *proto.ActivePolicyUpdate:
m.onPolicyUpdate(msg)
case *proto.ActivePolicyRemove:
m.onPolicyRemove(msg)
// Profiles.
case *proto.ActiveProfileUpdate:
m.onProfileUpdate(msg)
case *proto.ActiveProfileRemove:
m.onProfileRemove(msg)
case *proto.HostMetadataUpdate:
if msg.Hostname == m.hostname {
log.WithField("HostMetadataUpdate", msg).Info("Host IP changed")
ip := net.ParseIP(msg.Ipv4Addr)
if ip != nil {
m.hostIP = ip
for iface := range m.ifaces {
m.dirtyIfaces.Add(iface)
}
} else {
log.WithField("HostMetadataUpdate", msg).Warn("Cannot parse IP, no change applied")
}
}
}
}
func (m *bpfEndpointManager) onInterfaceUpdate(update *ifaceUpdate) {
if update.State == ifacemonitor.StateUnknown {
log.WithField("iface", update.Name).Debug("Interface no longer present.")
if iface, ok := m.ifaces[update.Name]; ok {
for _, fd := range iface.jumpMapFD {
_ = fd.Close()
}
delete(m.ifaces, update.Name)
m.dirtyIfaces.Add(update.Name)
}
} else {
log.WithFields(log.Fields{
"name": update.Name,
"state": update.State,
}).Debug("Interface state updated.")
iface := m.ifaces[update.Name]
if iface.State != update.State {
iface.State = update.State
m.ifaces[update.Name] = iface
m.dirtyIfaces.Add(update.Name)
}
}
}
// onWorkloadEndpointUpdate adds/updates the workload in the cache along with the index from active policy to
// workloads using that policy.
func (m *bpfEndpointManager) onWorkloadEndpointUpdate(msg *proto.WorkloadEndpointUpdate) {
log.WithField("wep", msg.Endpoint).Debug("Workload endpoint update")
wlID := *msg.Id
oldWL := m.wlEps[wlID]
wl := msg.Endpoint
if oldWL != nil {
for _, t := range oldWL.Tiers {
for _, pol := range t.IngressPolicies {
polSet := m.policiesToWorkloads[proto.PolicyID{
Tier: t.Name,
Name: pol,
}]
if polSet == nil {
continue
}
polSet.Discard(wlID)
}
for _, pol := range t.EgressPolicies {
polSet := m.policiesToWorkloads[proto.PolicyID{
Tier: t.Name,
Name: pol,
}]
if polSet == nil {
continue
}
polSet.Discard(wlID)
}
}
for _, profName := range oldWL.ProfileIds {
profID := proto.ProfileID{Name: profName}
profSet := m.profilesToWorkloads[profID]
if profSet == nil {
continue
}
profSet.Discard(wlID)
}
}
m.wlEps[wlID] = msg.Endpoint
for _, t := range wl.Tiers {
for _, pol := range t.IngressPolicies {
polID := proto.PolicyID{
Tier: t.Name,
Name: pol,
}
if m.policiesToWorkloads[polID] == nil {
m.policiesToWorkloads[polID] = set.New()
}
m.policiesToWorkloads[polID].Add(wlID)
}
for _, pol := range t.EgressPolicies {
polID := proto.PolicyID{
Tier: t.Name,
Name: pol,
}
if m.policiesToWorkloads[polID] == nil {
m.policiesToWorkloads[polID] = set.New()
}
m.policiesToWorkloads[polID].Add(wlID)
}
for _, profName := range wl.ProfileIds {
profID := proto.ProfileID{Name: profName}
profSet := m.profilesToWorkloads[profID]
if profSet == nil {
profSet = set.New()
m.profilesToWorkloads[profID] = profSet
}
profSet.Add(wlID)
}
}
m.dirtyWorkloads.Add(wlID)
}
// onWorkloadEndpointRemove removes the workload from the cache and the index, which maps from policy to workload.
func (m *bpfEndpointManager) onWorkloadEnpdointRemove(msg *proto.WorkloadEndpointRemove) {
wlID := *msg.Id
log.WithField("id", wlID).Debug("Workload endpoint removed")
wl := m.wlEps[wlID]
for _, t := range wl.Tiers {
for _, pol := range t.IngressPolicies {
polSet := m.policiesToWorkloads[proto.PolicyID{
Tier: t.Name,
Name: pol,
}]
if polSet == nil {
continue
}
polSet.Discard(wlID)
}
for _, pol := range t.EgressPolicies {
polSet := m.policiesToWorkloads[proto.PolicyID{
Tier: t.Name,
Name: pol,
}]
if polSet == nil {
continue
}
polSet.Discard(wlID)
}
}
delete(m.wlEps, wlID)
m.dirtyWorkloads.Add(wlID)
}
// onPolicyUpdate stores the policy in the cache and marks any endpoints using it dirty.
func (m *bpfEndpointManager) onPolicyUpdate(msg *proto.ActivePolicyUpdate) {
polID := *msg.Id
log.WithField("id", polID).Debug("Policy update")
m.policies[polID] = msg.Policy
m.markPolicyUsersDirty(polID)
}
// onPolicyRemove removes the policy from the cache and marks any endpoints using it dirty.
// The latter should be a no-op due to the ordering guarantees of the calc graph.
func (m *bpfEndpointManager) onPolicyRemove(msg *proto.ActivePolicyRemove) {
polID := *msg.Id
log.WithField("id", polID).Debug("Policy removed")
m.markPolicyUsersDirty(polID)
delete(m.policies, polID)
delete(m.policiesToWorkloads, polID)
}
// onProfileUpdate stores the profile in the cache and marks any endpoints that use it as dirty.
func (m *bpfEndpointManager) onProfileUpdate(msg *proto.ActiveProfileUpdate) {
profID := *msg.Id
log.WithField("id", profID).Debug("Profile update")
m.profiles[profID] = msg.Profile
m.markProfileUsersDirty(profID)
}
// onProfileRemove removes the profile from the cache and marks any endpoints that were using it as dirty.
// The latter should be a no-op due to the ordering guarantees of the calc graph.
func (m *bpfEndpointManager) onProfileRemove(msg *proto.ActiveProfileRemove) {
profID := *msg.Id
log.WithField("id", profID).Debug("Profile removed")
m.markProfileUsersDirty(profID)
delete(m.profiles, profID)
delete(m.profilesToWorkloads, profID)
}
func (m *bpfEndpointManager) markPolicyUsersDirty(id proto.PolicyID) {
wls := m.policiesToWorkloads[id]
if wls == nil {
// Hear about the policy before the endpoint.
return
}
wls.Iter(func(item interface{}) error {
m.dirtyWorkloads.Add(item)
return nil
})
}
func (m *bpfEndpointManager) markProfileUsersDirty(id proto.ProfileID) {
wls := m.profilesToWorkloads[id]
if wls == nil {
// Hear about the policy before the endpoint.
return
}
wls.Iter(func(item interface{}) error {
m.dirtyWorkloads.Add(item)
return nil
})
}
func (m *bpfEndpointManager) CompleteDeferredWork() error {
m.applyProgramsToDirtyDataInterfaces()
m.applyProgramsToDirtyWorkloadEndpoints()
// TODO: handle cali interfaces with no WEP
return nil
}
func (m *bpfEndpointManager) setAcceptLocal(iface string, val bool) error {
numval := "0"
if val {
numval = "1"
}
path := fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/accept_local", iface)
err := writeProcSys(path, numval)
if err != nil {
log.WithField("err", err).Errorf("Failed to set %s to %s", path, numval)
return err
}
log.Infof("%s set to %s", path, numval)
return nil
}
func (m *bpfEndpointManager) applyProgramsToDirtyDataInterfaces() {
var mutex sync.Mutex
errs := map[string]error{}
var wg sync.WaitGroup
m.dirtyIfaces.Iter(func(item interface{}) error {
iface := item.(string)
if !m.dataIfaceRegex.MatchString(iface) {
log.WithField("iface", iface).Debug(
"Ignoring interface that doesn't match the host data interface regex")
return set.RemoveItem
}
if m.ifaces[iface].State != ifacemonitor.StateUp {
log.WithField("iface", iface).Debug("Ignoring interface that is down")
return set.RemoveItem
}
wg.Add(1)
go func() {
defer wg.Done()
err := m.attachDataIfaceProgram(iface, PolDirnIngress)
if err == nil {
err = m.attachDataIfaceProgram(iface, PolDirnEgress)
}
if err == nil {
// This is required to allow NodePort forwarding with
// encapsulation with the host's IP as the source address
err = m.setAcceptLocal(iface, true)
}
mutex.Lock()
errs[iface] = err
mutex.Unlock()
}()
return nil
})
wg.Wait()
m.dirtyIfaces.Iter(func(item interface{}) error {
iface := item.(string)
err := errs[iface]
if err == nil {
log.WithField("id", iface).Info("Applied program to host interface")
return set.RemoveItem
}
if err == tc.ErrDeviceNotFound {
log.WithField("iface", iface).Debug(
"Tried to apply BPF program to interface but the interface wasn't present. " +
"Will retry if it shows up.")
}
log.WithError(err).Warn("Failed to apply policy to interface")
return nil
})
}
func (m *bpfEndpointManager) applyProgramsToDirtyWorkloadEndpoints() {
var mutex sync.Mutex
errs := map[proto.WorkloadEndpointID]error{}
var wg sync.WaitGroup
m.dirtyWorkloads.Iter(func(item interface{}) error {
wg.Add(1)
go func() {
defer wg.Done()
wlID := item.(proto.WorkloadEndpointID)
err := m.applyPolicy(wlID)
mutex.Lock()
errs[wlID] = err
mutex.Unlock()
}()
return nil
})
wg.Wait()
if m.dirtyWorkloads.Len() > 0 {
// Clean up any left-over jump maps in the background...
go tc.CleanUpJumpMaps()
}
m.dirtyWorkloads.Iter(func(item interface{}) error {
wlID := item.(proto.WorkloadEndpointID)
err := errs[wlID]
if err == nil {
log.WithField("id", wlID).Info("Applied policy to workload")
return set.RemoveItem
}
if err == tc.ErrDeviceNotFound {
log.WithField("wep", wlID).Debug(
"Tried to apply BPF program to interface but the interface wasn't present. " +
"Will retry if it shows up.")
}
log.WithError(err).Warn("Failed to apply policy to endpoint")
return nil
})
}
// applyPolicy actually applies the policy to the given workload.
func (m *bpfEndpointManager) applyPolicy(wlID proto.WorkloadEndpointID) error {
startTime := time.Now()
wep := m.wlEps[wlID]
if wep == nil {
// TODO clean up old workloads
return nil
}
var ingressErr, egressErr error
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
ingressErr = m.attachWorkloadProgram(wep, PolDirnIngress)
}()
go func() {
defer wg.Done()
egressErr = m.attachWorkloadProgram(wep, PolDirnEgress)
}()
wg.Wait()
if ingressErr != nil {
return ingressErr
}
if egressErr != nil {
return egressErr
}
applyTime := time.Since(startTime)
log.WithField("timeTaken", applyTime).Info("Finished applying BPF programs for workload")
return nil
}
var calicoRouterIP = net.IPv4(169, 254, 1, 1).To4()
func (m *bpfEndpointManager) attachWorkloadProgram(endpoint *proto.WorkloadEndpoint, polDirection PolDirection) error {
ap := m.calculateTCAttachPoint(tc.EpTypeWorkload, polDirection, endpoint.Name)
// Host side of the veth is always configured as 169.254.1.1.
ap.HostIP = calicoRouterIP
// * VXLAN MTU should be the host ifaces MTU -50, in order to allow space for VXLAN.
// * We also expect that to be the MTU used on veths.
// * We do encap on the veths, and there's a bogus kernel MTU check in the BPF helper
// for resizing the packet, so we have to reduce the apparent MTU by another 50 bytes
// when we cannot encap the packet - non-GSO & too close to veth MTU
ap.TunnelMTU = uint16(m.vxlanMTU - 50)
var tier *proto.TierInfo
if len(endpoint.Tiers) != 0 {
tier = endpoint.Tiers[0]
}
rules := m.extractRules(tier, endpoint.ProfileIds, polDirection)
iface := m.ifaces[endpoint.Name]
if iface.jumpMapFD[polDirection] == 0 {
// We don't have a program attached to this interface yet, attach one now.
err := ap.AttachProgram()
if err != nil {
return err
}
jumpMapFD, err := FindJumpMap(ap)
if err != nil {
return errors.Wrap(err, "failed to look up jump map")
}
if iface.jumpMapFD == nil {
iface.jumpMapFD = map[PolDirection]bpf.MapFD{}
}
iface.jumpMapFD[polDirection] = jumpMapFD
m.ifaces[endpoint.Name] = iface
}
return m.updatePolicyProgram(iface.jumpMapFD[polDirection], rules)
}
func (m *bpfEndpointManager) updatePolicyProgram(jumpMapFD bpf.MapFD, rules [][][]*proto.Rule) error {
pg := polprog.NewBuilder(m.ipSetIDAlloc, m.ipSetMap.MapFD(), m.stateMap.MapFD(), jumpMapFD)
insns, err := pg.Instructions(rules)
if err != nil {
return errors.Wrap(err, "failed to generate policy bytecode")
}
progFD, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0")
if err != nil {
return errors.Wrap(err, "failed to load BPF policy program")
}
k := make([]byte, 4)
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, uint32(progFD))
err = bpf.UpdateMapEntry(jumpMapFD, k, v)
if err != nil {
return errors.Wrap(err, "failed to update jump map")
}
return nil
}
func FindJumpMap(ap tc.AttachPoint) (bpf.MapFD, error) {
tcCmd := exec.Command("tc", "filter", "show", "dev", ap.Iface, string(ap.Hook))
out, err := tcCmd.Output()
if err != nil {
return 0, errors.Wrap(err, "failed to find TC filter for interface "+ap.Iface)
}
progName := ap.ProgramName()
for _, line := range bytes.Split(out, []byte("\n")) {
line := string(line)
if strings.Contains(line, progName) {
re := regexp.MustCompile(`id (\d+)`)
m := re.FindStringSubmatch(line)
if len(m) > 0 {
progIDStr := m[1]
bpftool := exec.Command("bpftool", "prog", "show", "id", progIDStr, "--json")
output, err := bpftool.Output()
if err != nil {
return 0, errors.Wrap(err, "failed to get map metadata")
}
var prog struct {
MapIDs []int `json:"map_ids"`
}
err = json.Unmarshal(output, &prog)
if err != nil {
return 0, errors.Wrap(err, "failed to parse bpftool output")
}
for _, mapID := range prog.MapIDs {
mapFD, err := bpf.GetMapFDByID(mapID)
if err != nil {
return 0, errors.Wrap(err, "failed to get map FD from ID")
}
mapInfo, err := bpf.GetMapInfo(mapFD)
if err != nil {
err = mapFD.Close()
if err != nil {
log.WithError(err).Panic("Failed to close FD.")
}
return 0, errors.Wrap(err, "failed to get map info")
}
if mapInfo.Type == unix.BPF_MAP_TYPE_PROG_ARRAY {
return mapFD, nil
}
}
}
return 0, errors.New("failed to find map")
}
}
return 0, errors.New("failed to find TC program")
}
func (m *bpfEndpointManager) attachDataIfaceProgram(ifaceName string, polDirection PolDirection) error {
epType := tc.EpTypeHost
if ifaceName == "tunl0" {
epType = tc.EpTypeTunnel
}
ap := m.calculateTCAttachPoint(epType, polDirection, ifaceName)
ap.HostIP = m.hostIP
ap.TunnelMTU = uint16(m.vxlanMTU)
return ap.AttachProgram()
}
// PolDirection is the Calico datamodel direction of policy. On a host endpoint, ingress is towards the host.
// On a workload endpoint, ingress is towards the workload.
type PolDirection string
const (
PolDirnIngress PolDirection = "ingress"
PolDirnEgress PolDirection = "egress"
)
func (m *bpfEndpointManager) calculateTCAttachPoint(endpointType tc.EndpointType, policyDirection PolDirection, ifaceName string) tc.AttachPoint {
var ap tc.AttachPoint
if endpointType == tc.EpTypeWorkload {
// Policy direction is relative to the workload so, from the host namespace it's flipped.
if policyDirection == PolDirnIngress {
ap.Hook = tc.HookEgress
} else {
ap.Hook = tc.HookIngress
}
} else {
// Host endpoints have the natural relationship between policy direction and hook.
if policyDirection == PolDirnIngress {
ap.Hook = tc.HookIngress
} else {
ap.Hook = tc.HookEgress
}
}
var toOrFrom tc.ToOrFromEp
if ap.Hook == tc.HookIngress {
toOrFrom = tc.FromEp
} else {
toOrFrom = tc.ToEp
}
ap.Iface = ifaceName
ap.Type = endpointType
ap.ToOrFrom = toOrFrom
ap.ToHostDrop = m.epToHostDrop
ap.FIB = m.fibLookupEnabled
ap.DSR = m.dsrEnabled
ap.LogLevel = m.bpfLogLevel
return ap
}
func (m *bpfEndpointManager) extractRules(tier *proto.TierInfo, profileNames []string, direction PolDirection) [][][]*proto.Rule {
var allRules [][][]*proto.Rule
if tier != nil {
var pols [][]*proto.Rule
directionalPols := tier.IngressPolicies
if direction == PolDirnEgress {
directionalPols = tier.EgressPolicies
}
if len(directionalPols) > 0 {
for _, polName := range directionalPols {
pol := m.policies[proto.PolicyID{Tier: tier.Name, Name: polName}]
if direction == PolDirnIngress {
pols = append(pols, pol.InboundRules)
} else {
pols = append(pols, pol.OutboundRules)
}
}
allRules = append(allRules, pols)
}
}
var profs [][]*proto.Rule
for _, profName := range profileNames {
prof := m.profiles[proto.ProfileID{Name: profName}]
if direction == PolDirnIngress {
profs = append(profs, prof.InboundRules)
} else {
profs = append(profs, prof.OutboundRules)
}
}
allRules = append(allRules, profs)
return allRules
}
| 1 | 18,141 | nit: a switch perhaps? | projectcalico-felix | c |
@@ -14,7 +14,7 @@
<![endif]-->
<title><%= render_page_title %></title>
- <link href="<%= opensearch_catalog_path(:format => 'xml', :only_path => false) %>" title="<%= application_name%>" type="application/opensearchdescription+xml" rel="search"/>
+ <%= opensearch_description_tag application_name, opensearch_catalog_path(:format => 'xml', :only_path => false) %>
<%= favicon_link_tag asset_path('favicon.ico') %>
<%= stylesheet_link_tag "application" %>
<%= javascript_include_tag "application" %> | 1 | <!DOCTYPE html>
<html lang="en" class="no-js">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<!-- Mobile viewport optimization h5bp.com/ad -->
<meta name="HandheldFriendly" content="True">
<meta name="viewport" content="width=device-width,initial-scale=1.0">
<!-- Mobile IE allows us to activate ClearType technology for smoothing fonts for easy reading -->
<!--[if IEMobile]>
<meta http-equiv="cleartype" content="on">
<![endif]-->
<title><%= render_page_title %></title>
<link href="<%= opensearch_catalog_path(:format => 'xml', :only_path => false) %>" title="<%= application_name%>" type="application/opensearchdescription+xml" rel="search"/>
<%= favicon_link_tag asset_path('favicon.ico') %>
<%= stylesheet_link_tag "application" %>
<%= javascript_include_tag "application" %>
<%= csrf_meta_tags %>
<%= content_for(:head) %>
<!-- Le HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]>
<script src="//html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
</head>
<body class="<%= render_body_class %>">
<%= render :partial => 'shared/header_navbar' %>
<div id="ajax-modal" class="modal fade" tabindex="-1" role="dialog" aria-labelledby="modal menu" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
</div>
</div>
</div>
<!-- /container -->
<div id="main-container" class="container">
<div class="row">
<div class="col-md-12">
<div id="main-flashes">
<%= render :partial=>'/flash_msg' %>
</div>
</div>
</div>
<div class="row">
<%= yield %>
</div>
</div>
<%= render :partial => 'shared/footer' %>
</body>
</html>
| 1 | 5,234 | Would it make sense for `opensearch_description_tag` to have default values for the title and href attributes? | projectblacklight-blacklight | rb |
@@ -281,7 +281,7 @@ func (j journalMDOps) getRange(
[]ImmutableRootMetadata, error) {
// Grab the range from the journal first.
jirmds, err := j.getRangeFromJournal(ctx, id, bid, mStatus, start, stop)
- if err != nil {
+ if err != nil && err != errTLFJournalDisabled {
return nil, err
}
| 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
// journalMDOps is an implementation of MDOps that delegates to a
// TLF's mdJournal, if one exists. Specifically, it intercepts put
// calls to write to the journal instead of the MDServer, where
// something else is presumably flushing the journal to the MDServer.
//
// It then intercepts get calls to provide a combined view of the MDs
// from the journal and the server when the journal is
// non-empty. Specifically, if rev is the earliest revision in the
// journal, and BID is the branch ID of the journal (which can only
// have one), then any requests for revisions >= rev on BID will be
// served from the journal instead of the server. If BID is empty,
// i.e. the journal is holding merged revisions, then this means that
// all merged revisions on the server from rev are hidden.
//
// TODO: This makes server updates meaningless for revisions >=
// rev. Fix this.
type journalMDOps struct {
MDOps
jServer *JournalServer
}
var _ MDOps = journalMDOps{}
// convertImmutableBareRMDToIRMD decrypts the bare MD into a
// full-fledged RMD.
func (j journalMDOps) convertImmutableBareRMDToIRMD(ctx context.Context,
ibrmd ImmutableBareRootMetadata, handle *TlfHandle,
uid keybase1.UID, key kbfscrypto.VerifyingKey) (
ImmutableRootMetadata, error) {
// TODO: Avoid having to do this type assertion.
brmd, ok := ibrmd.BareRootMetadata.(MutableBareRootMetadata)
if !ok {
return ImmutableRootMetadata{}, MutableBareRootMetadataNoImplError{}
}
rmd := makeRootMetadata(brmd, ibrmd.extra, handle)
config := j.jServer.config
pmd, err := decryptMDPrivateData(ctx, config.Codec(), config.Crypto(),
config.BlockCache(), config.BlockOps(), config.KeyManager(),
uid, rmd.GetSerializedPrivateMetadata(), rmd, rmd)
if err != nil {
return ImmutableRootMetadata{}, err
}
rmd.data = pmd
irmd := MakeImmutableRootMetadata(
rmd, key, ibrmd.mdID, ibrmd.localTimestamp)
return irmd, nil
}
// getHeadFromJournal returns the head RootMetadata for the TLF with
// the given ID stored in the journal, assuming it exists and matches
// the given branch ID and merge status. As a special case, if bid is
// NullBranchID and mStatus is Unmerged, the branch ID check is
// skipped.
func (j journalMDOps) getHeadFromJournal(
ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,
handle *TlfHandle) (
ImmutableRootMetadata, error) {
tlfJournal, ok := j.jServer.getTLFJournal(id)
if !ok {
return ImmutableRootMetadata{}, nil
}
head, err := tlfJournal.getMDHead(ctx)
if err == errTLFJournalDisabled {
return ImmutableRootMetadata{}, nil
} else if err != nil {
return ImmutableRootMetadata{}, err
}
if head == (ImmutableBareRootMetadata{}) {
return ImmutableRootMetadata{}, nil
}
if head.MergedStatus() != mStatus {
return ImmutableRootMetadata{}, nil
}
if mStatus == Unmerged && bid != NullBranchID && bid != head.BID() {
// The given branch ID doesn't match the one in the
// journal, which can only be an error.
return ImmutableRootMetadata{},
fmt.Errorf("Expected branch ID %s, got %s",
bid, head.BID())
}
headBareHandle, err := head.MakeBareTlfHandleWithExtra()
if err != nil {
return ImmutableRootMetadata{}, err
}
if handle == nil {
handle, err = MakeTlfHandle(
ctx, headBareHandle, j.jServer.config.KBPKI())
if err != nil {
return ImmutableRootMetadata{}, err
}
} else {
// Check for mutual handle resolution.
headHandle, err := MakeTlfHandle(ctx, headBareHandle,
j.jServer.config.KBPKI())
if err != nil {
return ImmutableRootMetadata{}, err
}
if err := headHandle.MutuallyResolvesTo(ctx, j.jServer.config.Codec(),
j.jServer.config.KBPKI(), *handle, head.RevisionNumber(),
head.TlfID(), j.jServer.log); err != nil {
return ImmutableRootMetadata{}, err
}
}
irmd, err := j.convertImmutableBareRMDToIRMD(
ctx, head, handle, tlfJournal.uid, tlfJournal.key)
if err != nil {
return ImmutableRootMetadata{}, err
}
return irmd, nil
}
func (j journalMDOps) getRangeFromJournal(
ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,
start, stop MetadataRevision) (
[]ImmutableRootMetadata, error) {
tlfJournal, ok := j.jServer.getTLFJournal(id)
if !ok {
return nil, nil
}
ibrmds, err := tlfJournal.getMDRange(ctx, start, stop)
if err == errTLFJournalDisabled {
return nil, nil
} else if err != nil {
return nil, err
}
if len(ibrmds) == 0 {
return nil, nil
}
head := ibrmds[len(ibrmds)-1]
if head.MergedStatus() != mStatus {
return nil, nil
}
if mStatus == Unmerged && bid != NullBranchID && bid != head.BID() {
// The given branch ID doesn't match the one in the
// journal, which can only be an error.
return nil, fmt.Errorf("Expected branch ID %s, got %s",
bid, head.BID())
}
bareHandle, err := head.MakeBareTlfHandleWithExtra()
if err != nil {
return nil, err
}
handle, err := MakeTlfHandle(ctx, bareHandle, j.jServer.config.KBPKI())
if err != nil {
return nil, err
}
irmds := make([]ImmutableRootMetadata, 0, len(ibrmds))
for _, ibrmd := range ibrmds {
irmd, err := j.convertImmutableBareRMDToIRMD(
ctx, ibrmd, handle, tlfJournal.uid, tlfJournal.key)
if err != nil {
return nil, err
}
irmds = append(irmds, irmd)
}
return irmds, nil
}
func (j journalMDOps) GetForHandle(
ctx context.Context, handle *TlfHandle, mStatus MergeStatus) (
tlf.ID, ImmutableRootMetadata, error) {
// Need to always consult the server to get the tlfID. No need to
// optimize this, since all subsequent lookups will be by
// TLF. Although if we did want to, we could store a handle -> TLF
// ID mapping with the journals. If we are looking for an
// unmerged head, that exists only in the journal, so check the
// remote server only to get the TLF ID.
remoteMStatus := mStatus
if mStatus == Unmerged {
remoteMStatus = Merged
}
tlfID, rmd, err := j.MDOps.GetForHandle(ctx, handle, remoteMStatus)
if err != nil {
return tlf.ID{}, ImmutableRootMetadata{}, err
}
if rmd != (ImmutableRootMetadata{}) && (rmd.TlfID() != tlfID) {
return tlf.ID{}, ImmutableRootMetadata{},
fmt.Errorf("Expected RMD to have TLF ID %s, but got %s",
tlfID, rmd.TlfID())
}
// If the journal has a head, use that.
irmd, err := j.getHeadFromJournal(
ctx, tlfID, NullBranchID, mStatus, handle)
if err != nil {
return tlf.ID{}, ImmutableRootMetadata{}, err
}
if irmd != (ImmutableRootMetadata{}) {
return tlf.ID{}, irmd, nil
}
if remoteMStatus != mStatus {
return tlfID, ImmutableRootMetadata{}, nil
}
// Otherwise, use the server's head.
return tlfID, rmd, nil
}
// TODO: Combine the two GetForTLF functions in MDOps to avoid the
// need for this helper function.
func (j journalMDOps) getForTLF(
ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,
delegateFn func(context.Context, tlf.ID) (ImmutableRootMetadata, error)) (
ImmutableRootMetadata, error) {
// If the journal has a head, use that.
irmd, err := j.getHeadFromJournal(ctx, id, bid, mStatus, nil)
if err != nil {
return ImmutableRootMetadata{}, err
}
if irmd != (ImmutableRootMetadata{}) {
return irmd, nil
}
// Otherwise, consult the server instead.
return delegateFn(ctx, id)
}
func (j journalMDOps) GetForTLF(
ctx context.Context, id tlf.ID) (ImmutableRootMetadata, error) {
return j.getForTLF(ctx, id, NullBranchID, Merged, j.MDOps.GetForTLF)
}
func (j journalMDOps) GetUnmergedForTLF(
ctx context.Context, id tlf.ID, bid BranchID) (
ImmutableRootMetadata, error) {
delegateFn := func(ctx context.Context, id tlf.ID) (
ImmutableRootMetadata, error) {
return j.MDOps.GetUnmergedForTLF(ctx, id, bid)
}
return j.getForTLF(ctx, id, bid, Unmerged, delegateFn)
}
// TODO: Combine the two GetRange functions in MDOps to avoid the need
// for this helper function.
func (j journalMDOps) getRange(
ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,
start, stop MetadataRevision,
delegateFn func(ctx context.Context, id tlf.ID,
start, stop MetadataRevision) (
[]ImmutableRootMetadata, error)) (
[]ImmutableRootMetadata, error) {
// Grab the range from the journal first.
jirmds, err := j.getRangeFromJournal(ctx, id, bid, mStatus, start, stop)
if err != nil {
return nil, err
}
// If it's empty or disabled, just fall back to the server.
if len(jirmds) == 0 || err == errTLFJournalDisabled {
return delegateFn(ctx, id, start, stop)
}
// If the first revision from the journal is the first
// revision we asked for, then just return the range from the
// journal.
if jirmds[0].Revision() == start {
return jirmds, nil
}
// Otherwise, fetch the rest from the server and prepend them.
serverStop := jirmds[0].Revision() - 1
irmds, err := delegateFn(ctx, id, start, serverStop)
if err != nil {
return nil, err
}
if len(irmds) == 0 {
return jirmds, nil
}
lastRev := irmds[len(irmds)-1].Revision()
if lastRev != serverStop {
return nil, fmt.Errorf(
"Expected last server rev %d, got %d",
serverStop, lastRev)
}
return append(irmds, jirmds...), nil
}
func (j journalMDOps) GetRange(
ctx context.Context, id tlf.ID, start, stop MetadataRevision) (
[]ImmutableRootMetadata, error) {
return j.getRange(ctx, id, NullBranchID, Merged, start, stop,
j.MDOps.GetRange)
}
func (j journalMDOps) GetUnmergedRange(
ctx context.Context, id tlf.ID, bid BranchID,
start, stop MetadataRevision) ([]ImmutableRootMetadata, error) {
delegateFn := func(ctx context.Context, id tlf.ID,
start, stop MetadataRevision) (
[]ImmutableRootMetadata, error) {
return j.MDOps.GetUnmergedRange(ctx, id, bid, start, stop)
}
return j.getRange(ctx, id, bid, Unmerged, start, stop,
delegateFn)
}
func (j journalMDOps) Put(ctx context.Context, rmd *RootMetadata) (
MdID, error) {
if tlfJournal, ok := j.jServer.getTLFJournal(rmd.TlfID()); ok {
// Just route to the journal.
mdID, err := tlfJournal.putMD(ctx, rmd)
if err != errTLFJournalDisabled {
return mdID, err
}
}
return j.MDOps.Put(ctx, rmd)
}
func (j journalMDOps) PutUnmerged(ctx context.Context, rmd *RootMetadata) (
MdID, error) {
if tlfJournal, ok := j.jServer.getTLFJournal(rmd.TlfID()); ok {
rmd.SetUnmerged()
mdID, err := tlfJournal.putMD(ctx, rmd)
if err != errTLFJournalDisabled {
return mdID, err
}
}
return j.MDOps.PutUnmerged(ctx, rmd)
}
func (j journalMDOps) PruneBranch(
ctx context.Context, id tlf.ID, bid BranchID) error {
if tlfJournal, ok := j.jServer.getTLFJournal(id); ok {
// Prune the journal, too.
err := tlfJournal.clearMDs(ctx, bid)
if err != nil && err != errTLFJournalDisabled {
return err
}
}
return j.MDOps.PruneBranch(ctx, id, bid)
}
func (j journalMDOps) ResolveBranch(
ctx context.Context, id tlf.ID, bid BranchID,
blocksToDelete []BlockID, rmd *RootMetadata) (MdID, error) {
if tlfJournal, ok := j.jServer.getTLFJournal(id); ok {
mdID, err := tlfJournal.resolveBranch(
ctx, bid, blocksToDelete, rmd, rmd.extra)
if err != errTLFJournalDisabled {
return mdID, err
}
}
return j.MDOps.ResolveBranch(ctx, id, bid, blocksToDelete, rmd)
}
| 1 | 14,276 | Just noticed this while debugging -- it's unlikely to be causing issues in practice. | keybase-kbfs | go |
@@ -75,7 +75,7 @@ describe( 'setting up the Analytics module with no existing account and no exist
await expect( page ).toClick( '.googlesitekit-analytics__select-country' );
await expect( page ).toClick( '.mdc-menu-surface--open li', { text: /united kingdom/i } );
- await expect( page ).toMatchElement( 'p', { text: /need to give Site Kit permission to create an Analytics account/i } );
+ await expect( page ).toMatchElement( 'p', { text: /need to give Site Kit permission to create an Analytics account/i, timeout: 750 } );
await Promise.all( [
page.waitForNavigation(), // User is sent directly to OAuth. | 1 | /**
* WordPress dependencies
*/
import { activatePlugin, createURL, visitAdminPage } from '@wordpress/e2e-test-utils';
/**
* Internal dependencies
*/
import {
deactivateUtilityPlugins,
resetSiteKit,
useRequestInterception,
setSearchConsoleProperty,
} from '../../../utils';
describe( 'setting up the Analytics module with no existing account and no existing tag via proxy', () => {
beforeAll( async () => {
await page.setRequestInterception( true );
useRequestInterception( ( request ) => {
if ( request.url().startsWith( 'https://sitekit.withgoogle.com/o/oauth2/auth' ) ) {
request.respond( {
status: 302,
headers: {
location: createURL( '/wp-admin/index.php', [
'oauth2callback=1',
'code=valid-test-code',
// This is how the additional scope is granted.
'scope=https://www.googleapis.com/auth/analytics.provision',
].join( '&' ) ),
},
} );
} else if ( request.url().match( 'analytics/data/create-account-ticket' ) ) {
request.respond( { status: 200 } ); // Do nothing for now, return 200 to prevent error.
} else if ( request.url().match( '/wp-json/google-site-kit/v1/data/' ) ) {
request.respond( { status: 200 } );
} else {
request.continue();
}
} );
} );
beforeEach( async () => {
await activatePlugin( 'e2e-tests-proxy-auth-plugin' );
await activatePlugin( 'e2e-tests-site-verification-plugin' );
await activatePlugin( 'e2e-tests-oauth-callback-plugin' );
await activatePlugin( 'e2e-tests-module-setup-analytics-api-mock-no-account' );
await setSearchConsoleProperty();
await visitAdminPage( 'admin.php', 'page=googlesitekit-settings' );
await page.waitForSelector( '.mdc-tab-bar' );
await expect( page ).toClick( '.mdc-tab', { text: /connect more services/i } );
await page.waitForSelector( '.googlesitekit-settings-connect-module--analytics' );
await expect( page ).toClick( '.googlesitekit-cta-link', { text: /set up analytics/i } );
await page.waitForSelector( '.googlesitekit-setup-module--analytics' );
} );
afterEach( async () => {
await deactivateUtilityPlugins();
await resetSiteKit();
} );
it( 'displays account creation form when user has no Analytics account', async () => {
await expect( page ).toMatchElement( '.googlesitekit-heading-4', { text: /Create your Analytics account/i, timeout: 5000 } );
await expect( page ).toMatchElement( '.mdc-button', { text: /create account/i } );
} );
it( 'preserves user-filled values provided and auto-submits after approving permissions', async () => {
await page.waitForSelector( '.googlesitekit-heading-4' );
// Unfortunately, the view does not have a `form`, otherwise we could use `.toFillForm( el, fields )`
await expect( page ).toFill( '#googlesitekit_analytics_account_create_account', 'Test Account Name' );
await expect( page ).toFill( '#googlesitekit_analytics_account_create_property', 'Test Property Name' );
await expect( page ).toFill( '#googlesitekit_analytics_account_create_profile', 'Test View Name' );
await expect( page ).toClick( '.googlesitekit-analytics__select-country' );
await expect( page ).toClick( '.mdc-menu-surface--open li', { text: /united kingdom/i } );
await expect( page ).toMatchElement( 'p', { text: /need to give Site Kit permission to create an Analytics account/i } );
await Promise.all( [
page.waitForNavigation(), // User is sent directly to OAuth.
expect( page ).toClick( '.mdc-button', { text: /create account/i } ),
] );
// When returning from OAuth, the form will resubmit automatically, so we won't be able to see the form to verify the values there.
// Instead, we can ensure that they were passed in the request to `create-account-ticket`
// Everything else is difficult to mock out here.
let reqBody;
await page.waitForRequest( ( req ) => req.url().match( 'analytics/data/create-account-ticket' ) && ( reqBody = req.postData() ) );
expect( JSON.parse( reqBody ) ).toMatchObject( {
data: {
accountName: 'Test Account Name',
propertyName: 'Test Property Name',
profileName: 'Test View Name',
timezone: 'Etc/GMT',
},
} );
} );
} );
| 1 | 30,916 | That seems unrelated - did that fix a random test failure you noticed while working on this issue? | google-site-kit-wp | js |
@@ -98,8 +98,11 @@ public abstract class BaseMetastoreTableOperations implements TableOperations {
LOG.info("Nothing to commit.");
return;
}
+ TableMetadata updated = (base != null && base.file() != null) ?
+ metadata.addPreviousMetadata(base.file().location(), base.lastUpdatedMillis()) : metadata;
- doCommit(base, metadata);
+ doCommit(base, updated);
+ deleteRemovedMetadata(updated);
requestRefresh();
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.util.Tasks;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class BaseMetastoreTableOperations implements TableOperations {
private static final Logger LOG = LoggerFactory.getLogger(BaseMetastoreTableOperations.class);
public static final String TABLE_TYPE_PROP = "table_type";
public static final String ICEBERG_TABLE_TYPE_VALUE = "iceberg";
public static final String METADATA_LOCATION_PROP = "metadata_location";
public static final String PREVIOUS_METADATA_LOCATION_PROP = "previous_metadata_location";
private static final String METADATA_FOLDER_NAME = "metadata";
private static final String DATA_FOLDER_NAME = "data";
private TableMetadata currentMetadata = null;
private String currentMetadataLocation = null;
private boolean shouldRefresh = true;
private int version = -1;
protected BaseMetastoreTableOperations() { }
@Override
public TableMetadata current() {
if (shouldRefresh) {
return refresh();
}
return currentMetadata;
}
public String currentMetadataLocation() {
return currentMetadataLocation;
}
public int currentVersion() {
return version;
}
@Override
public TableMetadata refresh() {
try {
doRefresh();
} catch (NoSuchTableException e) {
LOG.warn("Could not find the table during refresh, setting current metadata to null", e);
currentMetadata = null;
currentMetadataLocation = null;
version = -1;
shouldRefresh = false;
throw e;
}
return current();
}
protected void doRefresh() {
throw new UnsupportedOperationException("Not implemented: doRefresh");
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
// if the metadata is already out of date, reject it
if (base != current()) {
throw new CommitFailedException("Cannot commit: stale table metadata");
}
// if the metadata is not changed, return early
if (base == metadata) {
LOG.info("Nothing to commit.");
return;
}
doCommit(base, metadata);
requestRefresh();
}
protected void doCommit(TableMetadata base, TableMetadata metadata) {
throw new UnsupportedOperationException("Not implemented: doCommit");
}
protected void requestRefresh() {
this.shouldRefresh = true;
}
protected String writeNewMetadata(TableMetadata metadata, int newVersion) {
String newTableMetadataFilePath = newTableMetadataFilePath(metadata, newVersion);
OutputFile newMetadataLocation = io().newOutputFile(newTableMetadataFilePath);
// write the new metadata
// use overwrite to avoid negative caching in S3. this is safe because the metadata location is
// always unique because it includes a UUID.
TableMetadataParser.overwrite(metadata, newMetadataLocation);
return newMetadataLocation.location();
}
protected void refreshFromMetadataLocation(String newLocation) {
refreshFromMetadataLocation(newLocation, null, 20);
}
protected void refreshFromMetadataLocation(String newLocation, int numRetries) {
refreshFromMetadataLocation(newLocation, null, numRetries);
}
protected void refreshFromMetadataLocation(String newLocation, Predicate<Exception> shouldRetry,
int numRetries) {
// use null-safe equality check because new tables have a null metadata location
if (!Objects.equal(currentMetadataLocation, newLocation)) {
LOG.info("Refreshing table metadata from new version: {}", newLocation);
AtomicReference<TableMetadata> newMetadata = new AtomicReference<>();
Tasks.foreach(newLocation)
.retry(numRetries).exponentialBackoff(100, 5000, 600000, 4.0 /* 100, 400, 1600, ... */)
.throwFailureWhenFinished()
.shouldRetryTest(shouldRetry)
.run(metadataLocation -> newMetadata.set(
TableMetadataParser.read(this, io().newInputFile(metadataLocation))));
String newUUID = newMetadata.get().uuid();
if (currentMetadata != null) {
Preconditions.checkState(newUUID == null || newUUID.equals(currentMetadata.uuid()),
"Table UUID does not match: current=%s != refreshed=%s", currentMetadata.uuid(), newUUID);
}
this.currentMetadata = newMetadata.get();
this.currentMetadataLocation = newLocation;
this.version = parseVersion(newLocation);
}
this.shouldRefresh = false;
}
private String metadataFileLocation(TableMetadata metadata, String filename) {
String metadataLocation = metadata.properties()
.get(TableProperties.WRITE_METADATA_LOCATION);
if (metadataLocation != null) {
return String.format("%s/%s", metadataLocation, filename);
} else {
return String.format("%s/%s/%s", metadata.location(), METADATA_FOLDER_NAME, filename);
}
}
@Override
public String metadataFileLocation(String filename) {
return metadataFileLocation(current(), filename);
}
@Override
public LocationProvider locationProvider() {
return LocationProviders.locationsFor(current().location(), current().properties());
}
@Override
public TableOperations temp(TableMetadata uncommittedMetadata) {
return new TableOperations() {
@Override
public TableMetadata current() {
return uncommittedMetadata;
}
@Override
public TableMetadata refresh() {
throw new UnsupportedOperationException("Cannot call refresh on temporary table operations");
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
throw new UnsupportedOperationException("Cannot call commit on temporary table operations");
}
@Override
public String metadataFileLocation(String fileName) {
return BaseMetastoreTableOperations.this.metadataFileLocation(uncommittedMetadata, fileName);
}
@Override
public LocationProvider locationProvider() {
return LocationProviders.locationsFor(uncommittedMetadata.location(), uncommittedMetadata.properties());
}
@Override
public FileIO io() {
return BaseMetastoreTableOperations.this.io();
}
@Override
public EncryptionManager encryption() {
return BaseMetastoreTableOperations.this.encryption();
}
@Override
public long newSnapshotId() {
return BaseMetastoreTableOperations.this.newSnapshotId();
}
};
}
private String newTableMetadataFilePath(TableMetadata meta, int newVersion) {
String codecName = meta.property(
TableProperties.METADATA_COMPRESSION, TableProperties.METADATA_COMPRESSION_DEFAULT);
String fileExtension = TableMetadataParser.getFileExtension(codecName);
return metadataFileLocation(meta, String.format("%05d-%s%s", newVersion, UUID.randomUUID(), fileExtension));
}
private static int parseVersion(String metadataLocation) {
int versionStart = metadataLocation.lastIndexOf('/') + 1; // if '/' isn't found, this will be 0
int versionEnd = metadataLocation.indexOf('-', versionStart);
try {
return Integer.valueOf(metadataLocation.substring(versionStart, versionEnd));
} catch (NumberFormatException e) {
LOG.warn("Unable to parse version from metadata location: {}", metadataLocation, e);
return -1;
}
}
}
| 1 | 16,614 | This kind of concern should be handled in the `TableMetadata` update methods. This is similar to `metadata.rollbackTo`, which not only sets the current version, but also updates the table history. I think that all of those methods should add the current object's `file.location()` to the previous metadata location list, if it is non-null. That way, the caller never needs to remember to update it. | apache-iceberg | java |
@@ -82,3 +82,18 @@ def flip_tensor(src_tensor, flip_direction):
else:
out_tensor = torch.flip(src_tensor, [2, 3])
return out_tensor
+
+
+def collect_mlvl_tensor_single(mlvl_tensors, batch_id, detach=True):
+ assert isinstance(mlvl_tensors, (list, tuple))
+ num_levels = len(mlvl_tensors)
+
+ if detach:
+ mlvl_tensor_list = [
+ mlvl_tensors[i][batch_id].detach() for i in range(num_levels)
+ ]
+ else:
+ mlvl_tensor_list = [
+ mlvl_tensors[i][batch_id] for i in range(num_levels)
+ ]
+ return mlvl_tensor_list | 1 | from functools import partial
import numpy as np
import torch
from six.moves import map, zip
from ..mask.structures import BitmapMasks, PolygonMasks
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
def mask2ndarray(mask):
"""Convert Mask to ndarray..
Args:
mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
torch.Tensor or np.ndarray): The mask to be converted.
Returns:
np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
"""
if isinstance(mask, (BitmapMasks, PolygonMasks)):
mask = mask.to_ndarray()
elif isinstance(mask, torch.Tensor):
mask = mask.detach().cpu().numpy()
elif not isinstance(mask, np.ndarray):
raise TypeError(f'Unsupported {type(mask)} data type')
return mask
def flip_tensor(src_tensor, flip_direction):
"""flip tensor base on flip_direction.
Args:
src_tensor (Tensor): input feature map, shape (B, C, H, W).
flip_direction (str): The flipping direction. Options are
'horizontal', 'vertical', 'diagonal'.
Returns:
out_tensor (Tensor): Flipped tensor.
"""
assert src_tensor.ndim == 4
valid_directions = ['horizontal', 'vertical', 'diagonal']
assert flip_direction in valid_directions
if flip_direction == 'horizontal':
out_tensor = torch.flip(src_tensor, [3])
elif flip_direction == 'vertical':
out_tensor = torch.flip(src_tensor, [2])
else:
out_tensor = torch.flip(src_tensor, [2, 3])
return out_tensor
| 1 | 23,924 | We need to know why all the tensors are detached in `get_bboxed`. | open-mmlab-mmdetection | py |
@@ -309,3 +309,13 @@ class Foo:
def method(self):
print(self.__class__.__ham)
+
+
+class TypeSelfCallInMethod:
+ """Regression test for issue 5569"""
+ @classmethod
+ def b(cls) -> None:
+ cls.__a = '' # [unused-private-member]
+
+ def a(self):
+ return type(self).__a | 1 | # pylint: disable=missing-docstring, invalid-name, too-few-public-methods, no-self-use, line-too-long, unused-argument, protected-access
class AnotherClass():
def __test(self): # [unused-private-member]
pass
class HasUnusedInClass():
__my_secret = "I have no secrets" # [unused-private-member]
__my_used_secret = "I have no secrets unused"
@classmethod
def __private_class_method_unused(cls): # [unused-private-member]
print(cls.__my_used_secret)
@classmethod
def __private_class_method_used(cls):
pass
@staticmethod
def __private_static_method_unused(): # [unused-private-member]
pass
@staticmethod
def __private_static_method_used():
pass
def __init__(self): # Will not trigger as it begins with __ and ends with __
self.__instance_secret = "I will never be initialized" # [unused-private-member]
self.__another_secret = "hello world"
def __str__(self): # Will not trigger as it begins with __ and ends with __
return "hello"
def __test(self, x, y, z): # [unused-private-member]
fn = HasUnusedInClass.__private_class_method_used
fn()
fn2 = HasUnusedInClass.__private_static_method_used
fn2()
def __my_print(self, string):
print(self.__another_secret + string)
another_obj = AnotherClass()
another_obj.__test() # this class's test should still be unused
def hey(self): # Will not trigger as it does not begin with __
self.__my_print("!")
def __test_fn_as_var(self):
pass
def assign_fn_to_var(self):
fn = self.__test_fn_as_var
fn()
def __test_recursive(self): # [unused-private-member]
self.__test_recursive()
# False positive: Singleton Pattern
class MyCls:
__class_var = None
@classmethod
def set_class_var(cls, var):
cls.__class_var = var # should not emit a message, used in get_class_var()
@classmethod
def get_class_var(cls):
return cls.__class_var
class Bla:
"""Regression test for issue 4638"""
def __init__(self):
type(self).__a()
self.__b()
Bla.__c()
@classmethod
def __a(cls):
pass
@classmethod
def __b(cls):
pass
@classmethod
def __c(cls):
pass
class Klass:
"""Regression test for 4644"""
__seventyseven = 77
__ninetyone = 91
def __init__(self):
self.twentyone = 21 * (1 / (self.__seventyseven + 33)) % 100
self.ninetyfive = Klass.__ninetyone + 4
k = Klass()
print(k.twentyone)
print(k.ninetyfive)
# https://github.com/PyCQA/pylint/issues/4657
# Mutation of class member with cls should not fire a false-positive
class FalsePositive4657:
"""False positivie tests for 4657"""
__attr_a = None
__attr_b = 'b'
@classmethod
def load_attrs(cls):
"""Load attributes."""
cls.__attr_a = 'a'
@property
def attr_a(self):
"""Get a."""
return self.__attr_a
@property
def attr_b(self):
"""Get b."""
return self.__attr_b
# Test cases where we assign self.attr, but try to
# access cls.attr
def __init__(self):
self.__attr_c = "this is an unused private instance attribute" # [unused-private-member]
@property
def attr_c(self):
"""Get c."""
return cls.__attr_c # [undefined-variable]
# https://github.com/PyCQA/pylint/issues/4668
# Attributes assigned within __new__() has to be processed as part of the class
class FalsePositive4668:
# pylint: disable=protected-access, no-member, unreachable
def __new__(cls, func, *args):
if args:
true_obj = super(FalsePositive4668, cls).__new__(cls)
true_obj.func = func
true_obj.__args = args # Do not emit message here
return true_obj
false_obj = super(FalsePositive4668, cls).__new__(cls)
false_obj.func = func
false_obj.__args = args # Do not emit message here
false_obj.__secret_bool = False
false_obj.__unused = None # [unused-private-member]
return false_obj
# unreachable but non-Name return value
return 3+4
def exec(self):
print(self.__secret_bool)
return self.func(*self.__args)
# https://github.com/PyCQA/pylint/issues/4673
# Nested functions shouldn't cause a false positive if they are properly used
class FalsePositive4673:
""" The testing class """
def __init__(self, in_thing):
self.thing = False
self.do_thing(in_thing)
def do_thing(self, in_thing):
""" Checks the false-positive condition, sets a property. """
def __false_positive(in_thing):
print(in_thing)
def __true_positive(in_thing): # [unused-private-member]
print(in_thing)
__false_positive(in_thing)
self.thing = True
def undo_thing(self):
""" Unsets a property. """
self.thing = False
def complicated_example(self, flag):
def __inner_1():
pass
def __inner_2():
pass
def __inner_3(fn):
return fn
def __inner_4(): # [unused-private-member]
pass
fn_to_return = __inner_1 if flag else __inner_3(__inner_2)
return fn_to_return
# https://github.com/PyCQA/pylint/issues/4755
# Nested attributes shouldn't cause crash
class Crash4755Context:
def __init__(self):
self.__messages = None # [unused-private-member]
class Crash4755Command:
def __init__(self):
self.context = Crash4755Context()
def method(self):
self.context.__messages = []
for message in self.context.__messages:
print(message)
# https://github.com/PyCQA/pylint/issues/4681
# Accessing attributes of the class using the class name should not result in a false positive
# as long as it is used within the class
class FalsePositive4681:
__instance = None
__should_cause_error = None # [unused-private-member]
@staticmethod
def instance():
if FalsePositive4681.__instance is None:
FalsePositive4681()
return FalsePositive4681.__instance
def __init__(self):
try:
FalsePositive4681.__instance = 42 # This should be fine
FalsePositive4681.__should_cause_error = True # [unused-private-member]
except Exception: # pylint: disable=broad-except
print("Error")
FalsePositive4681.__instance = False # This should be fine
FalsePositive4681.__should_cause_error = False # [unused-private-member]
# Accessing attributes of the class using `cls` should not result in a false positive
# as long as it is used within the class
class FalsePositive4681b:
__instance = None
@classmethod # Use class method here
def instance(cls):
if cls.__instance is None:
cls()
return cls.__instance
def __init__(self):
try:
FalsePositive4681b.__instance = 42 # This should be fine
except Exception: # pylint: disable=broad-except
print("Error")
FalsePositive4681b.__instance = False # This should be fine
# https://github.com/PyCQA/pylint/issues/4849
# Accessing private static methods from classmethods via `cls` should not result in a
# false positive
class FalsePositive4849:
@staticmethod
def __private_method():
"""Is private and does nothing."""
# This should already be covered by `HasUnusedInClass`
@staticmethod
def __unused_private_method(): # [unused-private-member]
"""Is not used."""
@classmethod
def use_private_method(cls):
"""Calls private method."""
cls.__private_method() # This should pass
class Pony:
"""https://github.com/PyCQA/pylint/issues/4837"""
__defaults = {}
__defaults_set = False
def __init__(self, value):
self.value = value
def __init_defaults(self): # [unused-private-member]
if not self.__defaults_set:
type(self).__defaults = { "fur": "pink" }
type(self).__defaults_set = True
def __get_fur_color(self): # [unused-private-member]
color = lookup_attribute(self.__defaults, "fur")
return color
def lookup_attribute(mapping, key):
return mapping[key]
# Test for regression on checking __class__ attribute
# See: https://github.com/PyCQA/pylint/issues/5261
class Foo:
__ham = 1
def method(self):
print(self.__class__.__ham)
| 1 | 20,271 | Unrelated but shouldn't this raise `attribute-defined-outside-init`? | PyCQA-pylint | py |
@@ -24,7 +24,7 @@ class Service(service.ChromiumService):
"""
def __init__(self, executable_path, port=0, service_args=None,
- log_path=None, env=None):
+ log_path=None, env=None, create_no_window=False):
"""
Creates a new instance of the Service
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.chromium import service
class Service(service.ChromiumService):
"""
Object that manages the starting and stopping of the ChromeDriver
"""
def __init__(self, executable_path, port=0, service_args=None,
log_path=None, env=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the ChromeDriver
- port : Port the service is running on
- service_args : List of args to pass to the chromedriver service
- log_path : Path for the chromedriver service to log to"""
super(Service, self).__init__(
executable_path,
port,
service_args,
log_path,
env,
"Please see https://chromedriver.chromium.org/home")
| 1 | 17,876 | I would rather no have this as a `kwarg` as it encourages "growth" which lead to an unweildy constructor in other classes. Let's add a method or property to take care of this instead as I think it's usage is going to be quite low. | SeleniumHQ-selenium | js |
@@ -98,8 +98,7 @@ class Phase < ActiveRecord::Base
# TODO: Move this to Plan model as `num_answered_questions(phase=nil)`
# Returns the number of answered question for the phase.
def num_answered_questions(plan)
- return 0 if plan.nil?
- sections.to_a.sum { |s| s.num_answered_questions(plan) }
+ plan&.num_answered_questions.to_i
end
# Returns the number of questions for a phase. Note, this method becomes useful | 1 | # == Schema Information
#
# Table name: phases
#
# id :integer not null, primary key
# description :text
# modifiable :boolean
# number :integer
# title :string
# created_at :datetime
# updated_at :datetime
# template_id :integer
#
# Indexes
#
# index_phases_on_template_id (template_id)
#
# Foreign Keys
#
# fk_rails_... (template_id => templates.id)
#
# [+Project:+] DMPRoadmap
# [+Description:+] This model describes informmation about the phase of a plan, it's title, order of display and which template it belongs to.
#
# [+Created:+] 03/09/2014
# [+Copyright:+] Digital Curation Centre and University of California Curation Center
class Phase < ActiveRecord::Base
include ValidationMessages
include ValidationValues
include ActsAsSortable
##
# Sort order: Number ASC
default_scope { order(number: :asc) }
# ================
# = Associations =
# ================
belongs_to :template, touch: true
belongs_to :plan
has_one :prefix_section, -> (phase) {
modifiable.where("number < ?",
phase.sections.not_modifiable.minimum(:number))
}, class_name: "Section"
has_many :sections, dependent: :destroy
has_many :template_sections, -> {
not_modifiable
}, class_name: "Section"
has_many :suffix_sections, -> (phase) {
modifiable.where(<<~SQL, phase_id: phase.id, modifiable: false)
sections.number > (SELECT MAX(number) FROM sections
WHERE sections.modifiable = :modifiable
AND sections.phase_id = :phase_id)
SQL
}, class_name: "Section"
# ===============
# = Validations =
# ===============
validates :title, presence: { message: PRESENCE_MESSAGE }
validates :number, presence: { message: PRESENCE_MESSAGE },
uniqueness: { message: UNIQUENESS_MESSAGE,
scope: :template_id }
validates :template, presence: { message: PRESENCE_MESSAGE }
validates :modifiable, inclusion: { in: BOOLEAN_VALUES,
message: INCLUSION_MESSAGE }
# ==========
# = Scopes =
# ==========
scope :titles, -> (template_id) {
Phase.where(template_id: template_id).select(:id, :title)
}
def deep_copy(**options)
copy = self.dup
copy.modifiable = options.fetch(:modifiable, self.modifiable)
copy.template_id = options.fetch(:template_id, nil)
copy.save!(validate:false) if options.fetch(:save, false)
options[:phase_id] = copy.id
self.sections.each{ |section| copy.sections << section.deep_copy(options) }
return copy
end
# TODO: Move this to Plan model as `num_answered_questions(phase=nil)`
# Returns the number of answered question for the phase.
def num_answered_questions(plan)
return 0 if plan.nil?
sections.to_a.sum { |s| s.num_answered_questions(plan) }
end
# Returns the number of questions for a phase. Note, this method becomes useful
# for when sections and their questions are eager loaded so that avoids SQL queries.
def num_questions
n = 0
self.sections.each do |s|
n+= s.questions.size()
end
n
end
end
| 1 | 18,071 | You just changed my life :) I grew very attached to this safe nav approach when working with Groovy years ago `obj?.method`. Glad to see its now a part of Ruby! | DMPRoadmap-roadmap | rb |
@@ -23,9 +23,10 @@
<div class="checkbox">
<%= f.label :published, raw("#{f.check_box :published, as: :check_boxes, 'data-toggle': 'tooltip', title: _("Check this box when you are ready for this guidance to appear on user's plans.")} #{_('Published?')}") %>
</div>
-
- <%= f.submit _('Save'), name: "edit_guidance_submit", class: "btn btn-primary" %>
- <%= link_to _('Cancel'), admin_index_guidance_path, class: "btn btn-primary", role: 'button' %>
+ <div class="form-group clear-fix">
+ <%= f.submit _('Save'), name: "edit_guidance_submit", class: "btn btn-primary" %>
+ <%= link_to _('Cancel'), admin_index_guidance_path, class: "btn btn-primary", role: 'button' %>
+ </div>
<%end%>
</div>
</div> | 1 | <%# locals: { guidance, themes, guidance_groups, options } %>
<div class="row">
<div class="col-xs-12">
<h1><%= _('Guidance') %></h1>
<%= link_to _('View all guidance'), admin_index_guidance_path(current_user.org_id), class: 'btn btn-default pull-right' %>
</div>
</div>
<div class="row">
<div class="col-xs-12">
<%= form_for(guidance, url: options[:url], html: { method: options[:method] , id: 'new_edit_guidance'}) do |f| %>
<div class="form-group" data-toggle="tooltip" title="<%= _('Enter your guidance here. You can include links where needed.') %>">
<%= f.label :text, class: 'control-label' %>
<%= text_area_tag("guidance-text", guidance.text, class: "tinymce form-control", 'aria-required': true, rows: 10) %>
</div>
<%= render partial: 'org_admin/shared/theme_selector',
locals: { f: f, all_themes: themes,
popover_message: _('Select one or more themes that are relevant to this guidance. This will display your generic organisation-level guidance, as well as that from other sources e.g. the %{org_name} guidance or any Schools/Departments that you provide guidance for.') % { org_name: (current_user.org.abbreviation.present? ? current_user.org.abbreviation : current_user.org.name ) } } %>
<div class="form-group">
<%= f.label _('Guidance group'), for: :guidance_group_id, class: 'control-label' %>
<%= f.collection_select(:guidance_group_id, guidance_groups,
:id, :name, {prompt: false}, {multiple: false, 'data-toggle': 'tooltip', title: _('Select which group this guidance relates to.'), class: 'form-control', 'aria-required': true})%>
</div>
<div class="checkbox">
<%= f.label :published, raw("#{f.check_box :published, as: :check_boxes, 'data-toggle': 'tooltip', title: _("Check this box when you are ready for this guidance to appear on user's plans.")} #{_('Published?')}") %>
</div>
<%= f.submit _('Save'), name: "edit_guidance_submit", class: "btn btn-primary" %>
<%= link_to _('Cancel'), admin_index_guidance_path, class: "btn btn-primary", role: 'button' %>
<%end%>
</div>
</div> | 1 | 17,668 | do you mean 'clearfix' class? | DMPRoadmap-roadmap | rb |
@@ -391,6 +391,7 @@ MongoClient.connect = function(url, options, callback) {
* Starts a new session on the server
*
* @param {SessionOptions} [options] optional settings for a driver session
+ * @param {boolean} [options.causalConsistency=true] Enables or disables causal consistency for the session.
* @return {ClientSession} the newly established session
*/
MongoClient.prototype.startSession = function(options) { | 1 | 'use strict';
const ChangeStream = require('./change_stream');
const Db = require('./db');
const EventEmitter = require('events').EventEmitter;
const executeOperation = require('./utils').executeOperation;
const inherits = require('util').inherits;
const MongoError = require('mongodb-core').MongoError;
const resolveReadPreference = require('./utils').resolveReadPreference;
// Operations
const connectOp = require('./operations/mongo_client_ops').connectOp;
const logout = require('./operations/mongo_client_ops').logout;
const validOptions = require('./operations/mongo_client_ops').validOptions;
/**
* @fileOverview The **MongoClient** class is a class that allows for making Connections to MongoDB.
*
* @example
* // Connect using a MongoClient instance
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
* // Connect using MongoClient
* const mongoClient = new MongoClient(url);
* mongoClient.connect(function(err, client) {
* const db = client.db(dbName);
* client.close();
* });
*
* @example
* // Connect using the MongoClient.connect static method
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, client) {
* const db = client.db(dbName);
* client.close();
* });
*/
/**
* Creates a new MongoClient instance
* @class
* @param {string} url The connection URI string
* @param {object} [options] Optional settings
* @param {number} [options.poolSize=5] The maximum size of the individual server pool
* @param {boolean} [options.ssl=false] Enable SSL connection.
* @param {boolean} [options.sslValidate=true] Validate mongod server certificate against Certificate Authority
* @param {buffer} [options.sslCA=undefined] SSL Certificate store binary buffer
* @param {buffer} [options.sslCert=undefined] SSL Certificate binary buffer
* @param {buffer} [options.sslKey=undefined] SSL Key file binary buffer
* @param {string} [options.sslPass=undefined] SSL Certificate pass phrase
* @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer
* @param {boolean} [options.autoReconnect=true] Enable autoReconnect for single server instances
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket
* @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting
* @param {number} [options.family] Version of IP stack. Can be 4, 6 or null (default).
* If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure
* @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies
* @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry
* @param {string} [options.replicaSet=undefined] The Replicaset set name
* @param {number} [options.secondaryAcceptableLatencyMS=15] Cutoff latency point in MS for Replicaset member selection
* @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for Mongos proxies selection
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {string} [options.authSource=undefined] Define the database to authenticate against
* @param {(number|string)} [options.w] The write concern
* @param {number} [options.wtimeout] The write concern timeout
* @param {boolean} [options.j=false] Specify a journal write concern
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers
* @param {number} [options.bufferMaxEntries=-1] Sets a cap on how many operations the driver will buffer up before giving up on getting a working connection, default is -1 which is unlimited
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST)
* @param {object} [options.pkFactory] A primary key factory object for generation of custom _id keys
* @param {object} [options.promiseLibrary] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {object} [options.readConcern] Specify a read concern for the collection (only MongoDB 3.2 or higher supported)
* @param {string} [options.readConcern.level='local'] Specify a read concern level for the collection operations, one of [local|majority]. (only MongoDB 3.2 or higher supported)
* @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed)
* @param {string} [options.loggerLevel=undefined] The logging level (error/warn/info/debug)
* @param {object} [options.logger=undefined] Custom logger object
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers
* @param {boolean} [options.promoteLongs=true] Promotes long values to number if they fit inside the 53 bits resolution
* @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function
* @param {object} [options.validateOptions=false] Validate MongoClient passed in options for correctness
* @param {string} [options.appname=undefined] The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in the server log upon establishing each connection. It is also recorded in the slow query log and profile collections
* @param {string} [options.auth.user=undefined] The username for auth
* @param {string} [options.auth.password=undefined] The password for auth
* @param {string} [options.authMechanism=undefined] Mechanism for authentication: MDEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1
* @param {object} [options.compression] Type of compression to use: snappy or zlib
* @param {boolean} [options.fsync=false] Specify a file sync write concern
* @param {array} [options.readPreferenceTags] Read preference tags
* @param {number} [options.numberOfRetries=5] The number of retries for a tailable cursor
* @param {boolean} [options.auto_reconnect=true] Enable auto reconnecting for single server instances
* @param {boolean} [options.monitorCommands=false] Enable command monitoring for this client
* @param {number} [options.minSize] If present, the connection pool will be initialized with minSize connections, and will never dip below minSize connections
* @param {boolean} [options.useNewUrlParser=false] Determines whether or not to use the new url parser
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {MongoClient} a MongoClient instance
*/
function MongoClient(url, options) {
if (!(this instanceof MongoClient)) return new MongoClient(url, options);
// Set up event emitter
EventEmitter.call(this);
// The internal state
this.s = {
url: url,
options: options || {},
promiseLibrary: null,
dbCache: {},
sessions: []
};
// Get the promiseLibrary
const promiseLibrary = this.s.options.promiseLibrary || Promise;
// Add the promise to the internal state
this.s.promiseLibrary = promiseLibrary;
if (this.s.options.readPreference) {
this.s.options.readPreference = resolveReadPreference(this.s.options);
}
}
/**
* @ignore
*/
inherits(MongoClient, EventEmitter);
/**
* The callback format for results
* @callback MongoClient~connectCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {MongoClient} client The connected client.
*/
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise<MongoClient>} returns Promise if no callback passed
*/
MongoClient.prototype.connect = function(callback) {
// Validate options object
const err = validOptions(this.s.options);
if (typeof callback === 'string') {
throw new TypeError('`connect` only accepts a callback');
}
return executeOperation(this, connectOp, [this, err, callback], {
skipSessions: true
});
};
/**
* Logout user from server, fire off on all connections and remove all auth info
* @method
* @param {object} [options] Optional settings.
* @param {string} [options.dbName] Logout against different database than current.
* @param {Db~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.prototype.logout = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Establish the correct database name
const dbName = this.s.options.authSource ? this.s.options.authSource : this.s.options.dbName;
return executeOperation(this, logout, [this, dbName, callback], {
skipSessions: true
});
};
/**
* Close the db and its underlying connections
* @method
* @param {boolean} force Force close, emitting no events
* @param {Db~noResultCallback} [callback] The result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.prototype.close = function(force, callback) {
if (typeof force === 'function') (callback = force), (force = false);
const _close = closeCallback => {
const completeClose = () => {
// Emit close event
this.emit('close', this);
// Fire close event on any cached db instances
for (const name in this.s.dbCache) {
this.s.dbCache[name].emit('close');
}
// Remove listeners after emit
this.removeAllListeners('close');
process.nextTick(() => closeCallback(null, null));
};
if (this.topology) {
this.topology.close(force, () => completeClose());
return;
}
completeClose();
};
// Callback after next event loop tick
if (typeof callback === 'function') {
_close(callback);
return;
}
return new this.s.promiseLibrary(resolve => {
_close(() => resolve());
});
};
/**
* Create a new Db instance sharing the current socket connections. Be aware that the new db instances are
* related in a parent-child relationship to the original instance so that events are correctly emitted on child
* db instances. Child db instances are cached so performing db('db1') twice will return the same instance.
* You can control these behaviors with the options noListener and returnNonCachedInstance.
*
* @method
* @param {string} [dbName] The name of the database we want to use. If not provided, use database name from connection string.
* @param {object} [options] Optional settings.
* @param {boolean} [options.noListener=false] Do not make the db an event listener to the original connection.
* @param {boolean} [options.returnNonCachedInstance=false] Control if you want to return a cached instance or have a new one created
* @return {Db}
*/
MongoClient.prototype.db = function(dbName, options) {
options = options || {};
// Default to db from connection string if not provided
if (!dbName) {
dbName = this.s.options.dbName;
}
// Copy the options and add out internal override of the not shared flag
const finalOptions = Object.assign({}, this.s.options, options);
if (finalOptions.readPreference) {
finalOptions.readPreference = resolveReadPreference(options, { client: this });
}
// Do we have the db in the cache already
if (this.s.dbCache[dbName] && finalOptions.returnNonCachedInstance !== true) {
return this.s.dbCache[dbName];
}
// Add promiseLibrary
finalOptions.promiseLibrary = this.s.promiseLibrary;
// If no topology throw an error message
if (!this.topology) {
throw new MongoError('MongoClient must be connected before calling MongoClient.prototype.db');
}
// Return the db object
const db = new Db(dbName, this.topology, finalOptions);
// Add the db to the cache
this.s.dbCache[dbName] = db;
// Return the database
return db;
};
/**
* Check if MongoClient is connected
*
* @method
* @param {object} [options] Optional settings.
* @param {boolean} [options.noListener=false] Do not make the db an event listener to the original connection.
* @param {boolean} [options.returnNonCachedInstance=false] Control if you want to return a cached instance or have a new one created
* @return {boolean}
*/
MongoClient.prototype.isConnected = function(options) {
options = options || {};
if (!this.topology) return false;
return this.topology.isConnected(options);
};
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @static
* @param {string} url The connection URI string
* @param {object} [options] Optional settings
* @param {number} [options.poolSize=5] The maximum size of the individual server pool
* @param {boolean} [options.ssl=false] Enable SSL connection.
* @param {boolean} [options.sslValidate=true] Validate mongod server certificate against Certificate Authority
* @param {buffer} [options.sslCA=undefined] SSL Certificate store binary buffer
* @param {buffer} [options.sslCert=undefined] SSL Certificate binary buffer
* @param {buffer} [options.sslKey=undefined] SSL Key file binary buffer
* @param {string} [options.sslPass=undefined] SSL Certificate pass phrase
* @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer
* @param {boolean} [options.autoReconnect=true] Enable autoReconnect for single server instances
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {boolean} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket
* @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting
* @param {number} [options.family] Version of IP stack. Can be 4, 6 or null (default).
* If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure
* @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies
* @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry
* @param {string} [options.replicaSet=undefined] The Replicaset set name
* @param {number} [options.secondaryAcceptableLatencyMS=15] Cutoff latency point in MS for Replicaset member selection
* @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for Mongos proxies selection
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {string} [options.authSource=undefined] Define the database to authenticate against
* @param {(number|string)} [options.w] The write concern
* @param {number} [options.wtimeout] The write concern timeout
* @param {boolean} [options.j=false] Specify a journal write concern
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers
* @param {number} [options.bufferMaxEntries=-1] Sets a cap on how many operations the driver will buffer up before giving up on getting a working connection, default is -1 which is unlimited
* @param {(ReadPreference|string)} [options.readPreference] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST)
* @param {object} [options.pkFactory] A primary key factory object for generation of custom _id keys
* @param {object} [options.promiseLibrary] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {object} [options.readConcern] Specify a read concern for the collection (only MongoDB 3.2 or higher supported)
* @param {string} [options.readConcern.level='local'] Specify a read concern level for the collection operations, one of [local|majority]. (only MongoDB 3.2 or higher supported)
* @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed)
* @param {string} [options.loggerLevel=undefined] The logging level (error/warn/info/debug)
* @param {object} [options.logger=undefined] Custom logger object
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers
* @param {boolean} [options.promoteLongs=true] Promotes long values to number if they fit inside the 53 bits resolution
* @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function
* @param {object} [options.validateOptions=false] Validate MongoClient passed in options for correctness
* @param {string} [options.appname=undefined] The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in the server log upon establishing each connection. It is also recorded in the slow query log and profile collections
* @param {string} [options.auth.user=undefined] The username for auth
* @param {string} [options.auth.password=undefined] The password for auth
* @param {string} [options.authMechanism=undefined] Mechanism for authentication: MDEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1
* @param {object} [options.compression] Type of compression to use: snappy or zlib
* @param {boolean} [options.fsync=false] Specify a file sync write concern
* @param {array} [options.readPreferenceTags] Read preference tags
* @param {number} [options.numberOfRetries=5] The number of retries for a tailable cursor
* @param {boolean} [options.auto_reconnect=true] Enable auto reconnecting for single server instances
* @param {number} [options.minSize] If present, the connection pool will be initialized with minSize connections, and will never dip below minSize connections
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise<MongoClient>} returns Promise if no callback passed
*/
MongoClient.connect = function(url, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() : null;
options = options || {};
// Create client
const mongoClient = new MongoClient(url, options);
// Execute the connect method
return mongoClient.connect(callback);
};
/**
* Starts a new session on the server
*
* @param {SessionOptions} [options] optional settings for a driver session
* @return {ClientSession} the newly established session
*/
MongoClient.prototype.startSession = function(options) {
options = Object.assign({ explicit: true }, options);
if (!this.topology) {
throw new MongoError('Must connect to a server before calling this method');
}
if (!this.topology.hasSessionSupport()) {
throw new MongoError('Current topology does not support sessions');
}
return this.topology.startSession(options, this.s.options);
};
/**
* Runs a given operation with an implicitly created session. The lifetime of the session
* will be handled without the need for user interaction.
*
* NOTE: presently the operation MUST return a Promise (either explicit or implicity as an async function)
*
* @param {Object} [options] Optional settings to be appled to implicitly created session
* @param {Function} operation An operation to execute with an implicitly created session. The signature of this MUST be `(session) => {}`
* @return {Promise}
*/
MongoClient.prototype.withSession = function(options, operation) {
if (typeof options === 'function') (operation = options), (options = undefined);
const session = this.startSession(options);
let cleanupHandler = (err, result, opts) => {
// prevent multiple calls to cleanupHandler
cleanupHandler = () => {
throw new ReferenceError('cleanupHandler was called too many times');
};
opts = Object.assign({ throw: true }, opts);
session.endSession();
if (err) {
if (opts.throw) throw err;
return Promise.reject(err);
}
};
try {
const result = operation(session);
return Promise.resolve(result)
.then(result => cleanupHandler(null, result))
.catch(err => cleanupHandler(err, null, { throw: true }));
} catch (err) {
return cleanupHandler(err, null, { throw: false });
}
};
/**
* Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this cluster. Will ignore all changes to system collections, as well as the local, admin,
* and config databases.
* @method
* @since 3.1.0
* @param {Array} [pipeline] An array of {@link https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
* @param {object} [options] Optional settings
* @param {string} [options.fullDocument='default'] Allowed values: ‘default’, ‘updateLookup’. When set to ‘updateLookup’, the change stream will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
* @param {object} [options.resumeAfter] Specifies the logical starting point for the new change stream. This should be the _id field from a previously returned change stream document.
* @param {number} [options.maxAwaitTimeMS] The maximum amount of time for the server to wait on new documents to satisfy a change stream query
* @param {number} [options.batchSize] The number of documents to return per batch. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {object} [options.collation] Specify collation settings for operation. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {ReadPreference} [options.readPreference] The read preference. See {@link https://docs.mongodb.com/manual/reference/read-preference|read preference documentation}.
* @param {Timestamp} [options.startAtClusterTime] receive change events that occur after the specified timestamp
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {ChangeStream} a ChangeStream instance.
*/
MongoClient.prototype.watch = function(pipeline, options) {
pipeline = pipeline || [];
options = options || {};
// Allow optionally not specifying a pipeline
if (!Array.isArray(pipeline)) {
options = pipeline;
pipeline = [];
}
if (options.readPreference) {
options.readPreference = resolveReadPreference(options, { client: this });
}
return new ChangeStream(this, pipeline, options);
};
/**
* Return the mongo client logger
* @method
* @return {Logger} return the mongo client logger
* @ignore
*/
MongoClient.prototype.getLogger = function() {
return this.s.options.logger;
};
module.exports = MongoClient;
| 1 | 14,914 | `causalConsistency` is already defined on `SessionOptions`. We do not need to duplicate it here. | mongodb-node-mongodb-native | js |
@@ -46,9 +46,7 @@ import java.lang.reflect.Method;
import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy;
import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.*;
/** | 1 | /*
* Copyright 2002-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.oauth2.client.web.method.annotation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.springframework.core.MethodParameter;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.security.authentication.TestingAuthenticationToken;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.oauth2.client.ClientAuthorizationRequiredException;
import org.springframework.security.oauth2.client.OAuth2AuthorizedClient;
import org.springframework.security.oauth2.client.annotation.RegisteredOAuth2AuthorizedClient;
import org.springframework.security.oauth2.client.authentication.OAuth2AuthenticationToken;
import org.springframework.security.oauth2.client.endpoint.OAuth2AccessTokenResponseClient;
import org.springframework.security.oauth2.client.endpoint.OAuth2ClientCredentialsGrantRequest;
import org.springframework.security.oauth2.client.registration.ClientRegistration;
import org.springframework.security.oauth2.client.registration.ClientRegistrationRepository;
import org.springframework.security.oauth2.client.registration.InMemoryClientRegistrationRepository;
import org.springframework.security.oauth2.client.web.OAuth2AuthorizedClientRepository;
import org.springframework.security.oauth2.core.AuthorizationGrantType;
import org.springframework.security.oauth2.core.ClientAuthenticationMethod;
import org.springframework.security.oauth2.core.OAuth2AccessToken;
import org.springframework.security.oauth2.core.endpoint.OAuth2AccessTokenResponse;
import org.springframework.util.ReflectionUtils;
import org.springframework.web.context.request.ServletWebRequest;
import javax.servlet.http.HttpServletRequest;
import java.lang.reflect.Method;
import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy;
import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.*;
/**
* Tests for {@link OAuth2AuthorizedClientArgumentResolver}.
*
* @author Joe Grandja
*/
public class OAuth2AuthorizedClientArgumentResolverTests {
private TestingAuthenticationToken authentication;
private String principalName = "principal-1";
private ClientRegistration registration1;
private ClientRegistration registration2;
private ClientRegistrationRepository clientRegistrationRepository;
private OAuth2AuthorizedClient authorizedClient1;
private OAuth2AuthorizedClient authorizedClient2;
private OAuth2AuthorizedClientRepository authorizedClientRepository;
private OAuth2AuthorizedClientArgumentResolver argumentResolver;
private MockHttpServletRequest request;
@Before
public void setup() {
this.authentication = new TestingAuthenticationToken(this.principalName, "password");
SecurityContext securityContext = SecurityContextHolder.createEmptyContext();
securityContext.setAuthentication(this.authentication);
SecurityContextHolder.setContext(securityContext);
this.registration1 = ClientRegistration.withRegistrationId("client1")
.clientId("client-1")
.clientSecret("secret")
.clientAuthenticationMethod(ClientAuthenticationMethod.BASIC)
.authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE)
.redirectUriTemplate("{baseUrl}/login/oauth2/code/{registrationId}")
.scope("user")
.authorizationUri("https://provider.com/oauth2/authorize")
.tokenUri("https://provider.com/oauth2/token")
.userInfoUri("https://provider.com/oauth2/user")
.userNameAttributeName("id")
.clientName("client-1")
.build();
this.registration2 = ClientRegistration.withRegistrationId("client2")
.clientId("client-2")
.clientSecret("secret")
.clientAuthenticationMethod(ClientAuthenticationMethod.BASIC)
.authorizationGrantType(AuthorizationGrantType.CLIENT_CREDENTIALS)
.scope("read", "write")
.tokenUri("https://provider.com/oauth2/token")
.build();
this.clientRegistrationRepository = new InMemoryClientRegistrationRepository(this.registration1, this.registration2);
this.authorizedClientRepository = mock(OAuth2AuthorizedClientRepository.class);
this.argumentResolver = new OAuth2AuthorizedClientArgumentResolver(
this.clientRegistrationRepository, this.authorizedClientRepository);
this.authorizedClient1 = new OAuth2AuthorizedClient(this.registration1, this.principalName, mock(OAuth2AccessToken.class));
when(this.authorizedClientRepository.loadAuthorizedClient(
eq(this.registration1.getRegistrationId()), any(Authentication.class), any(HttpServletRequest.class)))
.thenReturn(this.authorizedClient1);
this.authorizedClient2 = new OAuth2AuthorizedClient(this.registration2, this.principalName, mock(OAuth2AccessToken.class));
when(this.authorizedClientRepository.loadAuthorizedClient(
eq(this.registration2.getRegistrationId()), any(Authentication.class), any(HttpServletRequest.class)))
.thenReturn(this.authorizedClient2);
this.request = new MockHttpServletRequest();
}
@After
public void cleanup() {
SecurityContextHolder.clearContext();
}
@Test
public void constructorWhenClientRegistrationRepositoryIsNullThenThrowIllegalArgumentException() {
assertThatThrownBy(() -> new OAuth2AuthorizedClientArgumentResolver(null, this.authorizedClientRepository))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void constructorWhenOAuth2AuthorizedClientRepositoryIsNullThenThrowIllegalArgumentException() {
assertThatThrownBy(() -> new OAuth2AuthorizedClientArgumentResolver(this.clientRegistrationRepository, null))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void setClientCredentialsTokenResponseClientWhenClientIsNullThenThrowIllegalArgumentException() {
assertThatThrownBy(() -> this.argumentResolver.setClientCredentialsTokenResponseClient(null))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void supportsParameterWhenParameterTypeOAuth2AuthorizedClientThenTrue() {
MethodParameter methodParameter = this.getMethodParameter("paramTypeAuthorizedClient", OAuth2AuthorizedClient.class);
assertThat(this.argumentResolver.supportsParameter(methodParameter)).isTrue();
}
@Test
public void supportsParameterWhenParameterTypeOAuth2AuthorizedClientWithoutAnnotationThenFalse() {
MethodParameter methodParameter = this.getMethodParameter("paramTypeAuthorizedClientWithoutAnnotation", OAuth2AuthorizedClient.class);
assertThat(this.argumentResolver.supportsParameter(methodParameter)).isFalse();
}
@Test
public void supportsParameterWhenParameterTypeUnsupportedThenFalse() {
MethodParameter methodParameter = this.getMethodParameter("paramTypeUnsupported", String.class);
assertThat(this.argumentResolver.supportsParameter(methodParameter)).isFalse();
}
@Test
public void supportsParameterWhenParameterTypeUnsupportedWithoutAnnotationThenFalse() {
MethodParameter methodParameter = this.getMethodParameter("paramTypeUnsupportedWithoutAnnotation", String.class);
assertThat(this.argumentResolver.supportsParameter(methodParameter)).isFalse();
}
@Test
public void resolveArgumentWhenRegistrationIdEmptyAndNotOAuth2AuthenticationThenThrowIllegalArgumentException() {
MethodParameter methodParameter = this.getMethodParameter("registrationIdEmpty", OAuth2AuthorizedClient.class);
assertThatThrownBy(() -> this.argumentResolver.resolveArgument(methodParameter, null, null, null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Unable to resolve the Client Registration Identifier. It must be provided via @RegisteredOAuth2AuthorizedClient(\"client1\") or @RegisteredOAuth2AuthorizedClient(registrationId = \"client1\").");
}
@Test
public void resolveArgumentWhenRegistrationIdEmptyAndOAuth2AuthenticationThenResolves() throws Exception {
OAuth2AuthenticationToken authentication = mock(OAuth2AuthenticationToken.class);
when(authentication.getAuthorizedClientRegistrationId()).thenReturn("client1");
SecurityContext securityContext = SecurityContextHolder.createEmptyContext();
securityContext.setAuthentication(authentication);
SecurityContextHolder.setContext(securityContext);
MethodParameter methodParameter = this.getMethodParameter("registrationIdEmpty", OAuth2AuthorizedClient.class);
assertThat(this.argumentResolver.resolveArgument(
methodParameter, null, new ServletWebRequest(this.request), null)).isSameAs(this.authorizedClient1);
}
@Test
public void resolveArgumentWhenAuthorizedClientFoundThenResolves() throws Exception {
MethodParameter methodParameter = this.getMethodParameter("paramTypeAuthorizedClient", OAuth2AuthorizedClient.class);
assertThat(this.argumentResolver.resolveArgument(
methodParameter, null, new ServletWebRequest(this.request), null)).isSameAs(this.authorizedClient1);
}
@Test
public void resolveArgumentWhenRegistrationIdInvalidThenDoesNotResolve() throws Exception {
MethodParameter methodParameter = this.getMethodParameter("registrationIdInvalid", OAuth2AuthorizedClient.class);
assertThat(this.argumentResolver.resolveArgument(
methodParameter, null, new ServletWebRequest(this.request), null)).isNull();
}
@Test
public void resolveArgumentWhenAuthorizedClientNotFoundForAuthorizationCodeClientThenThrowClientAuthorizationRequiredException() {
when(this.authorizedClientRepository.loadAuthorizedClient(anyString(), any(), any(HttpServletRequest.class)))
.thenReturn(null);
MethodParameter methodParameter = this.getMethodParameter("paramTypeAuthorizedClient", OAuth2AuthorizedClient.class);
assertThatThrownBy(() -> this.argumentResolver.resolveArgument(methodParameter, null, new ServletWebRequest(this.request), null))
.isInstanceOf(ClientAuthorizationRequiredException.class);
}
@SuppressWarnings("unchecked")
@Test
public void resolveArgumentWhenAuthorizedClientNotFoundForClientCredentialsClientThenResolvesFromTokenResponseClient() throws Exception {
OAuth2AccessTokenResponseClient<OAuth2ClientCredentialsGrantRequest> clientCredentialsTokenResponseClient =
mock(OAuth2AccessTokenResponseClient.class);
this.argumentResolver.setClientCredentialsTokenResponseClient(clientCredentialsTokenResponseClient);
OAuth2AccessTokenResponse accessTokenResponse = OAuth2AccessTokenResponse
.withToken("access-token-1234")
.tokenType(OAuth2AccessToken.TokenType.BEARER)
.expiresIn(3600)
.build();
when(clientCredentialsTokenResponseClient.getTokenResponse(any())).thenReturn(accessTokenResponse);
when(this.authorizedClientRepository.loadAuthorizedClient(anyString(), any(), any(HttpServletRequest.class)))
.thenReturn(null);
MethodParameter methodParameter = this.getMethodParameter("clientCredentialsClient", OAuth2AuthorizedClient.class);
OAuth2AuthorizedClient authorizedClient = (OAuth2AuthorizedClient) this.argumentResolver.resolveArgument(
methodParameter, null, new ServletWebRequest(this.request), null);
assertThat(authorizedClient).isNotNull();
assertThat(authorizedClient.getClientRegistration()).isSameAs(this.registration2);
assertThat(authorizedClient.getPrincipalName()).isEqualTo(this.principalName);
assertThat(authorizedClient.getAccessToken()).isSameAs(accessTokenResponse.getAccessToken());
verify(this.authorizedClientRepository).saveAuthorizedClient(
eq(authorizedClient), eq(this.authentication), any(HttpServletRequest.class), eq(null));
}
private MethodParameter getMethodParameter(String methodName, Class<?>... paramTypes) {
Method method = ReflectionUtils.findMethod(TestController.class, methodName, paramTypes);
return new MethodParameter(method, 0);
}
static class TestController {
void paramTypeAuthorizedClient(@RegisteredOAuth2AuthorizedClient("client1") OAuth2AuthorizedClient authorizedClient) {
}
void paramTypeAuthorizedClientWithoutAnnotation(OAuth2AuthorizedClient authorizedClient) {
}
void paramTypeUnsupported(@RegisteredOAuth2AuthorizedClient("client1") String param) {
}
void paramTypeUnsupportedWithoutAnnotation(String param) {
}
void registrationIdEmpty(@RegisteredOAuth2AuthorizedClient OAuth2AuthorizedClient authorizedClient) {
}
void registrationIdInvalid(@RegisteredOAuth2AuthorizedClient("invalid") OAuth2AuthorizedClient authorizedClient) {
}
void clientCredentialsClient(@RegisteredOAuth2AuthorizedClient("client2") OAuth2AuthorizedClient authorizedClient) {
}
}
}
| 1 | 12,947 | This should not be included since there are no code changes. | spring-projects-spring-security | java |
@@ -56,6 +56,18 @@ func askCredentials(c *context.Context, status int, text string) {
func HTTPContexter() macaron.Handler {
return func(c *context.Context) {
+ if len(setting.HTTP.AccessControlAllowOrigin) > 0 {
+ // Set CORS headers for browser-based git clients
+ c.Resp.Header().Set("Access-Control-Allow-Origin", setting.HTTP.AccessControlAllowOrigin)
+ c.Resp.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
+
+ // Handle preflight OPTIONS request
+ if c.Req.Method == "OPTIONS" {
+ c.Status(http.StatusOK)
+ return
+ }
+ }
+
ownerName := c.Params(":username")
repoName := strings.TrimSuffix(c.Params(":reponame"), ".git")
repoName = strings.TrimSuffix(repoName, ".wiki") | 1 | // Copyright 2017 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"bytes"
"compress/gzip"
"fmt"
"net/http"
"os"
"os/exec"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/Unknwon/com"
log "gopkg.in/clog.v1"
"gopkg.in/macaron.v1"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/models/errors"
"github.com/gogits/gogs/pkg/context"
"github.com/gogits/gogs/pkg/setting"
"github.com/gogits/gogs/pkg/tool"
)
const (
ENV_AUTH_USER_ID = "GOGS_AUTH_USER_ID"
ENV_AUTH_USER_NAME = "GOGS_AUTH_USER_NAME"
ENV_AUTH_USER_EMAIL = "GOGS_AUTH_USER_EMAIL"
ENV_REPO_OWNER_NAME = "GOGS_REPO_OWNER_NAME"
ENV_REPO_OWNER_SALT_MD5 = "GOGS_REPO_OWNER_SALT_MD5"
ENV_REPO_ID = "GOGS_REPO_ID"
ENV_REPO_NAME = "GOGS_REPO_NAME"
ENV_REPO_CUSTOM_HOOKS_PATH = "GOGS_REPO_CUSTOM_HOOKS_PATH"
)
type HTTPContext struct {
*context.Context
OwnerName string
OwnerSalt string
RepoID int64
RepoName string
AuthUser *models.User
}
// askCredentials responses HTTP header and status which informs client to provide credentials.
func askCredentials(c *context.Context, status int, text string) {
c.Resp.Header().Set("WWW-Authenticate", "Basic realm=\".\"")
c.HandleText(status, text)
}
func HTTPContexter() macaron.Handler {
return func(c *context.Context) {
ownerName := c.Params(":username")
repoName := strings.TrimSuffix(c.Params(":reponame"), ".git")
repoName = strings.TrimSuffix(repoName, ".wiki")
isPull := c.Query("service") == "git-upload-pack" ||
strings.HasSuffix(c.Req.URL.Path, "git-upload-pack") ||
c.Req.Method == "GET"
owner, err := models.GetUserByName(ownerName)
if err != nil {
c.NotFoundOrServerError("GetUserByName", errors.IsUserNotExist, err)
return
}
repo, err := models.GetRepositoryByName(owner.ID, repoName)
if err != nil {
c.NotFoundOrServerError("GetRepositoryByName", errors.IsRepoNotExist, err)
return
}
// Authentication is not required for pulling from public repositories.
if isPull && !repo.IsPrivate && !setting.Service.RequireSignInView {
c.Map(&HTTPContext{
Context: c,
})
return
}
// In case user requested a wrong URL and not intended to access Git objects.
action := c.Params("*")
if !strings.Contains(action, "git-") &&
!strings.Contains(action, "info/") &&
!strings.Contains(action, "HEAD") &&
!strings.Contains(action, "objects/") {
c.NotFound()
return
}
// Handle HTTP Basic Authentication
authHead := c.Req.Header.Get("Authorization")
if len(authHead) == 0 {
askCredentials(c, http.StatusUnauthorized, "")
return
}
auths := strings.Fields(authHead)
if len(auths) != 2 || auths[0] != "Basic" {
askCredentials(c, http.StatusUnauthorized, "")
return
}
authUsername, authPassword, err := tool.BasicAuthDecode(auths[1])
if err != nil {
askCredentials(c, http.StatusUnauthorized, "")
return
}
authUser, err := models.UserSignIn(authUsername, authPassword)
if err != nil && !errors.IsUserNotExist(err) {
c.Handle(http.StatusInternalServerError, "UserSignIn", err)
return
}
// If username and password combination failed, try again using username as a token.
if authUser == nil {
token, err := models.GetAccessTokenBySHA(authUsername)
if err != nil {
if models.IsErrAccessTokenEmpty(err) || models.IsErrAccessTokenNotExist(err) {
askCredentials(c, http.StatusUnauthorized, "")
} else {
c.Handle(http.StatusInternalServerError, "GetAccessTokenBySHA", err)
}
return
}
token.Updated = time.Now()
authUser, err = models.GetUserByID(token.UID)
if err != nil {
// Once we found token, we're supposed to find its related user,
// thus any error is unexpected.
c.Handle(http.StatusInternalServerError, "GetUserByID", err)
return
}
} else if authUser.IsEnabledTwoFactor() {
askCredentials(c, http.StatusUnauthorized, `User with two-factor authentication enabled cannot perform HTTP/HTTPS operations via plain username and password
Please create and use personal access token on user settings page`)
return
}
log.Trace("HTTPGit - Authenticated user: %s", authUser.Name)
mode := models.ACCESS_MODE_WRITE
if isPull {
mode = models.ACCESS_MODE_READ
}
has, err := models.HasAccess(authUser.ID, repo, mode)
if err != nil {
c.Handle(http.StatusInternalServerError, "HasAccess", err)
return
} else if !has {
askCredentials(c, http.StatusForbidden, "User permission denied")
return
}
if !isPull && repo.IsMirror {
c.HandleText(http.StatusForbidden, "Mirror repository is read-only")
return
}
c.Map(&HTTPContext{
Context: c,
OwnerName: ownerName,
OwnerSalt: owner.Salt,
RepoID: repo.ID,
RepoName: repoName,
AuthUser: authUser,
})
}
}
type serviceHandler struct {
w http.ResponseWriter
r *http.Request
dir string
file string
authUser *models.User
ownerName string
ownerSalt string
repoID int64
repoName string
}
func (h *serviceHandler) setHeaderNoCache() {
h.w.Header().Set("Expires", "Fri, 01 Jan 1980 00:00:00 GMT")
h.w.Header().Set("Pragma", "no-cache")
h.w.Header().Set("Cache-Control", "no-cache, max-age=0, must-revalidate")
}
func (h *serviceHandler) setHeaderCacheForever() {
now := time.Now().Unix()
expires := now + 31536000
h.w.Header().Set("Date", fmt.Sprintf("%d", now))
h.w.Header().Set("Expires", fmt.Sprintf("%d", expires))
h.w.Header().Set("Cache-Control", "public, max-age=31536000")
}
func (h *serviceHandler) sendFile(contentType string) {
reqFile := path.Join(h.dir, h.file)
fi, err := os.Stat(reqFile)
if os.IsNotExist(err) {
h.w.WriteHeader(http.StatusNotFound)
return
}
h.w.Header().Set("Content-Type", contentType)
h.w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
h.w.Header().Set("Last-Modified", fi.ModTime().Format(http.TimeFormat))
http.ServeFile(h.w, h.r, reqFile)
}
type ComposeHookEnvsOptions struct {
AuthUser *models.User
OwnerName string
OwnerSalt string
RepoID int64
RepoName string
RepoPath string
}
func ComposeHookEnvs(opts ComposeHookEnvsOptions) []string {
envs := []string{
"SSH_ORIGINAL_COMMAND=1",
ENV_AUTH_USER_ID + "=" + com.ToStr(opts.AuthUser.ID),
ENV_AUTH_USER_NAME + "=" + opts.AuthUser.Name,
ENV_AUTH_USER_EMAIL + "=" + opts.AuthUser.Email,
ENV_REPO_OWNER_NAME + "=" + opts.OwnerName,
ENV_REPO_OWNER_SALT_MD5 + "=" + tool.MD5(opts.OwnerSalt),
ENV_REPO_ID + "=" + com.ToStr(opts.RepoID),
ENV_REPO_NAME + "=" + opts.RepoName,
ENV_REPO_CUSTOM_HOOKS_PATH + "=" + path.Join(opts.RepoPath, "custom_hooks"),
}
return envs
}
func serviceRPC(h serviceHandler, service string) {
defer h.r.Body.Close()
if h.r.Header.Get("Content-Type") != fmt.Sprintf("application/x-git-%s-request", service) {
h.w.WriteHeader(http.StatusUnauthorized)
return
}
h.w.Header().Set("Content-Type", fmt.Sprintf("application/x-git-%s-result", service))
var (
reqBody = h.r.Body
err error
)
// Handle GZIP
if h.r.Header.Get("Content-Encoding") == "gzip" {
reqBody, err = gzip.NewReader(reqBody)
if err != nil {
log.Error(2, "HTTP.Get: fail to create gzip reader: %v", err)
h.w.WriteHeader(http.StatusInternalServerError)
return
}
}
var stderr bytes.Buffer
cmd := exec.Command("git", service, "--stateless-rpc", h.dir)
if service == "receive-pack" {
cmd.Env = append(os.Environ(), ComposeHookEnvs(ComposeHookEnvsOptions{
AuthUser: h.authUser,
OwnerName: h.ownerName,
OwnerSalt: h.ownerSalt,
RepoID: h.repoID,
RepoName: h.repoName,
RepoPath: h.dir,
})...)
}
cmd.Dir = h.dir
cmd.Stdout = h.w
cmd.Stderr = &stderr
cmd.Stdin = reqBody
if err = cmd.Run(); err != nil {
log.Error(2, "HTTP.serviceRPC: fail to serve RPC '%s': %v - %s", service, err, stderr)
h.w.WriteHeader(http.StatusInternalServerError)
return
}
}
func serviceUploadPack(h serviceHandler) {
serviceRPC(h, "upload-pack")
}
func serviceReceivePack(h serviceHandler) {
serviceRPC(h, "receive-pack")
}
func getServiceType(r *http.Request) string {
serviceType := r.FormValue("service")
if !strings.HasPrefix(serviceType, "git-") {
return ""
}
return strings.TrimPrefix(serviceType, "git-")
}
// FIXME: use process module
func gitCommand(dir string, args ...string) []byte {
cmd := exec.Command("git", args...)
cmd.Dir = dir
out, err := cmd.Output()
if err != nil {
log.Error(2, fmt.Sprintf("Git: %v - %s", err, out))
}
return out
}
func updateServerInfo(dir string) []byte {
return gitCommand(dir, "update-server-info")
}
func packetWrite(str string) []byte {
s := strconv.FormatInt(int64(len(str)+4), 16)
if len(s)%4 != 0 {
s = strings.Repeat("0", 4-len(s)%4) + s
}
return []byte(s + str)
}
func getInfoRefs(h serviceHandler) {
h.setHeaderNoCache()
service := getServiceType(h.r)
if service != "upload-pack" && service != "receive-pack" {
updateServerInfo(h.dir)
h.sendFile("text/plain; charset=utf-8")
return
}
refs := gitCommand(h.dir, service, "--stateless-rpc", "--advertise-refs", ".")
h.w.Header().Set("Content-Type", fmt.Sprintf("application/x-git-%s-advertisement", service))
h.w.WriteHeader(http.StatusOK)
h.w.Write(packetWrite("# service=git-" + service + "\n"))
h.w.Write([]byte("0000"))
h.w.Write(refs)
}
func getTextFile(h serviceHandler) {
h.setHeaderNoCache()
h.sendFile("text/plain")
}
func getInfoPacks(h serviceHandler) {
h.setHeaderCacheForever()
h.sendFile("text/plain; charset=utf-8")
}
func getLooseObject(h serviceHandler) {
h.setHeaderCacheForever()
h.sendFile("application/x-git-loose-object")
}
func getPackFile(h serviceHandler) {
h.setHeaderCacheForever()
h.sendFile("application/x-git-packed-objects")
}
func getIdxFile(h serviceHandler) {
h.setHeaderCacheForever()
h.sendFile("application/x-git-packed-objects-toc")
}
var routes = []struct {
reg *regexp.Regexp
method string
handler func(serviceHandler)
}{
{regexp.MustCompile("(.*?)/git-upload-pack$"), "POST", serviceUploadPack},
{regexp.MustCompile("(.*?)/git-receive-pack$"), "POST", serviceReceivePack},
{regexp.MustCompile("(.*?)/info/refs$"), "GET", getInfoRefs},
{regexp.MustCompile("(.*?)/HEAD$"), "GET", getTextFile},
{regexp.MustCompile("(.*?)/objects/info/alternates$"), "GET", getTextFile},
{regexp.MustCompile("(.*?)/objects/info/http-alternates$"), "GET", getTextFile},
{regexp.MustCompile("(.*?)/objects/info/packs$"), "GET", getInfoPacks},
{regexp.MustCompile("(.*?)/objects/info/[^/]*$"), "GET", getTextFile},
{regexp.MustCompile("(.*?)/objects/[0-9a-f]{2}/[0-9a-f]{38}$"), "GET", getLooseObject},
{regexp.MustCompile("(.*?)/objects/pack/pack-[0-9a-f]{40}\\.pack$"), "GET", getPackFile},
{regexp.MustCompile("(.*?)/objects/pack/pack-[0-9a-f]{40}\\.idx$"), "GET", getIdxFile},
}
func getGitRepoPath(dir string) (string, error) {
if !strings.HasSuffix(dir, ".git") {
dir += ".git"
}
filename := path.Join(setting.RepoRootPath, dir)
if _, err := os.Stat(filename); os.IsNotExist(err) {
return "", err
}
return filename, nil
}
func HTTP(c *HTTPContext) {
for _, route := range routes {
reqPath := strings.ToLower(c.Req.URL.Path)
m := route.reg.FindStringSubmatch(reqPath)
if m == nil {
continue
}
// We perform check here because routes matched in cmd/web.go is wider than needed,
// but we only want to output this message only if user is really trying to access
// Git HTTP endpoints.
if setting.Repository.DisableHTTPGit {
c.HandleText(http.StatusForbidden, "Interacting with repositories by HTTP protocol is not disabled")
return
}
if route.method != c.Req.Method {
c.NotFound()
return
}
file := strings.TrimPrefix(reqPath, m[1]+"/")
dir, err := getGitRepoPath(m[1])
if err != nil {
log.Warn("HTTP.getGitRepoPath: %v", err)
c.NotFound()
return
}
route.handler(serviceHandler{
w: c.Resp,
r: c.Req.Request,
dir: dir,
file: file,
authUser: c.AuthUser,
ownerName: c.OwnerName,
ownerSalt: c.OwnerSalt,
repoID: c.RepoID,
repoName: c.RepoName,
})
return
}
c.NotFound()
}
| 1 | 12,665 | If all we need just response 200, then we could use `c.Status(http.StatusOK)` here. | gogs-gogs | go |
@@ -683,6 +683,11 @@ func loadConfigFromDatastore(
Name: "node." + hostname,
Namespace: "",
}, "")
+ n, err := client.Get(ctx, model.ResourceKey{
+ Kind: apiv2.KindNode,
+ Name: hostname,
+ Namespace: "",
+ }, "")
if _, ok := err.(errors2.ErrorResourceDoesNotExist); err != nil && !ok {
return
} | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"errors"
"fmt"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"os/signal"
"reflect"
"runtime"
"runtime/debug"
"runtime/pprof"
"strings"
"syscall"
"time"
docopt "github.com/docopt/docopt-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/projectcalico/felix/buildinfo"
"github.com/projectcalico/felix/calc"
"github.com/projectcalico/felix/config"
_ "github.com/projectcalico/felix/config"
"github.com/projectcalico/felix/extdataplane"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/intdataplane"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/statusrep"
"github.com/projectcalico/felix/usagerep"
apiv2 "github.com/projectcalico/libcalico-go/lib/apis/v2"
"github.com/projectcalico/libcalico-go/lib/backend"
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/model"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/updateprocessors"
errors2 "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/health"
"github.com/projectcalico/typha/pkg/syncclient"
)
const usage = `Felix, the Calico per-host daemon.
Usage:
calico-felix [options]
Options:
-c --config-file=<filename> Config file to load [default: /etc/calico/felix.cfg].
--version Print the version and exit.
`
const (
// Our default value for GOGC if it is not set. This is the percentage that heap usage must
// grow by to trigger a garbage collection. Go's default is 100, meaning that 50% of the
// heap can be lost to garbage. We reduce it to this value to trade increased CPU usage for
// lower occupancy.
defaultGCPercent = 20
)
// main is the entry point to the calico-felix binary.
//
// Its main role is to sequence Felix's startup by:
//
// Initialising early logging config (log format and early debug settings).
//
// Parsing command line parameters.
//
// Loading datastore configuration from the environment or config file.
//
// Loading more configuration from the datastore (this is retried until success).
//
// Starting the configured internal (golang) or external dataplane driver.
//
// Starting the background processing goroutines, which load and keep in sync with the
// state from the datastore, the "calculation graph".
//
// Starting the usage reporting and prometheus metrics endpoint threads (if configured).
//
// Then, it defers to monitorAndManageShutdown(), which blocks until one of the components
// fails, then attempts a graceful shutdown. At that point, all the processing is in
// background goroutines.
//
// To avoid having to maintain rarely-used code paths, Felix handles updates to its
// main config parameters by exiting and allowing itself to be restarted by the init
// daemon.
func main() {
// Go's RNG is not seeded by default. Do that now.
rand.Seed(time.Now().UTC().UnixNano())
// Special-case handling for environment variable-configured logging:
// Initialise early so we can trace out config parsing.
logutils.ConfigureEarlyLogging()
ctx := context.Background()
if os.Getenv("GOGC") == "" {
// Tune the GC to trade off a little extra CPU usage for significantly lower
// occupancy at high scale. This is worthwhile because Felix runs per-host so
// any occupancy improvement is multiplied by the number of hosts.
log.Debugf("No GOGC value set, defaulting to %d%%.", defaultGCPercent)
debug.SetGCPercent(defaultGCPercent)
}
// Parse command-line args.
version := "Version: " + buildinfo.GitVersion + "\n" +
"Full git commit ID: " + buildinfo.GitRevision + "\n" +
"Build date: " + buildinfo.BuildDate + "\n"
arguments, err := docopt.Parse(usage, nil, true, version, false)
if err != nil {
println(usage)
log.Fatalf("Failed to parse usage, exiting: %v", err)
}
buildInfoLogCxt := log.WithFields(log.Fields{
"version": buildinfo.GitVersion,
"buildDate": buildinfo.BuildDate,
"gitCommit": buildinfo.GitRevision,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
})
buildInfoLogCxt.Info("Felix starting up")
log.Infof("Command line arguments: %v", arguments)
// Load the configuration from all the different sources including the
// datastore and merge. Keep retrying on failure. We'll sit in this
// loop until the datastore is ready.
log.Infof("Loading configuration...")
var backendClient bapi.Client
var configParams *config.Config
var typhaAddr string
configRetry:
for {
// Load locally-defined config, including the datastore connection
// parameters. First the environment variables.
configParams = config.New()
envConfig := config.LoadConfigFromEnvironment(os.Environ())
// Then, the config file.
configFile := arguments["--config-file"].(string)
fileConfig, err := config.LoadConfigFile(configFile)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to load configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Parse and merge the local config.
configParams.UpdateFrom(envConfig, config.EnvironmentVariable)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration environment variable")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(fileConfig, config.ConfigFile)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// We should now have enough config to connect to the datastore
// so we can load the remainder of the config.
datastoreConfig := configParams.DatastoreConfig()
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to create datastore client")
time.Sleep(1 * time.Second)
continue configRetry
}
globalConfig, hostConfig, err := loadConfigFromDatastore(
ctx, backendClient, configParams.FelixHostname)
if err != nil {
log.WithError(err).Error("Failed to get config from datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(globalConfig, config.DatastoreGlobal)
configParams.UpdateFrom(hostConfig, config.DatastorePerHost)
configParams.Validate()
if configParams.Err != nil {
log.WithError(configParams.Err).Error(
"Failed to parse/validate configuration from datastore.")
time.Sleep(1 * time.Second)
continue configRetry
}
// We now have some config flags that affect how we configure the syncer.
// After loading the config from the datastore, reconnect, possibly with new
// config. We don't need to re-load the configuration _again_ because the
// calculation graph will spot if the config has changed since we were initialised.
datastoreConfig = configParams.DatastoreConfig()
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to (re)connect to datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
// If we're configured to discover Typha, do that now so we can retry if we fail.
typhaAddr, err = discoverTyphaAddr(configParams)
if err != nil {
log.WithError(err).Error("Typha discovery enabled but discovery failed.")
time.Sleep(1 * time.Second)
continue configRetry
}
break configRetry
}
// If we get here, we've loaded the configuration successfully.
// Update log levels before we do anything else.
logutils.ConfigureLogging(configParams)
// Since we may have enabled more logging, log with the build context
// again.
buildInfoLogCxt.WithField("config", configParams).Info(
"Successfully loaded configuration.")
// Health monitoring, for liveness and readiness endpoints.
healthAggregator := health.NewHealthAggregator()
// Start up the dataplane driver. This may be the internal go-based driver or an external
// one.
var dpDriver dataplaneDriver
var dpDriverCmd *exec.Cmd
if configParams.UseInternalDataplaneDriver {
log.Info("Using internal dataplane driver.")
// Dedicated mark bits for accept and pass actions. These are long lived bits
// that we use for communicating between chains.
markAccept := configParams.NextIptablesMark()
markPass := configParams.NextIptablesMark()
// Short-lived mark bits for local calculations within a chain.
markScratch0 := configParams.NextIptablesMark()
markScratch1 := configParams.NextIptablesMark()
log.WithFields(log.Fields{
"acceptMark": markAccept,
"passMark": markPass,
"scratch0Mark": markScratch0,
"scratch1Mark": markScratch1,
}).Info("Calculated iptables mark bits")
dpConfig := intdataplane.Config{
IfaceMonitorConfig: ifacemonitor.Config{
InterfaceExcludes: configParams.InterfaceExcludes(),
},
RulesConfig: rules.Config{
WorkloadIfacePrefixes: configParams.InterfacePrefixes(),
IPSetConfigV4: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV4,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
rules.LegacyV4IPSetNames,
),
IPSetConfigV6: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV6,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
nil,
),
OpenStackSpecialCasesEnabled: configParams.OpenstackActive(),
OpenStackMetadataIP: net.ParseIP(configParams.MetadataAddr),
OpenStackMetadataPort: uint16(configParams.MetadataPort),
IptablesMarkAccept: markAccept,
IptablesMarkPass: markPass,
IptablesMarkScratch0: markScratch0,
IptablesMarkScratch1: markScratch1,
IPIPEnabled: configParams.IpInIpEnabled,
IPIPTunnelAddress: configParams.IpInIpTunnelAddr,
IptablesLogPrefix: configParams.LogPrefix,
EndpointToHostAction: configParams.DefaultEndpointToHostAction,
IptablesFilterAllowAction: configParams.IptablesFilterAllowAction,
IptablesMangleAllowAction: configParams.IptablesMangleAllowAction,
FailsafeInboundHostPorts: configParams.FailsafeInboundHostPorts,
FailsafeOutboundHostPorts: configParams.FailsafeOutboundHostPorts,
DisableConntrackInvalid: configParams.DisableConntrackInvalidCheck,
},
IPIPMTU: configParams.IpInIpMtu,
IptablesRefreshInterval: configParams.IptablesRefreshInterval,
RouteRefreshInterval: configParams.RouteRefreshInterval,
IPSetsRefreshInterval: configParams.IpsetsRefreshInterval,
IptablesPostWriteCheckInterval: configParams.IptablesPostWriteCheckIntervalSecs,
IptablesInsertMode: configParams.ChainInsertMode,
IptablesLockFilePath: configParams.IptablesLockFilePath,
IptablesLockTimeout: configParams.IptablesLockTimeoutSecs,
IptablesLockProbeInterval: configParams.IptablesLockProbeIntervalMillis,
MaxIPSetSize: configParams.MaxIpsetSize,
IgnoreLooseRPF: configParams.IgnoreLooseRPF,
IPv6Enabled: configParams.Ipv6Support,
StatusReportingInterval: configParams.ReportingIntervalSecs,
NetlinkTimeout: configParams.NetlinkTimeoutSecs,
PostInSyncCallback: func() { dumpHeapMemoryProfile(configParams) },
HealthAggregator: healthAggregator,
DebugSimulateDataplaneHangAfter: configParams.DebugSimulateDataplaneHangAfter,
}
intDP := intdataplane.NewIntDataplaneDriver(dpConfig)
intDP.Start()
dpDriver = intDP
} else {
log.WithField("driver", configParams.DataplaneDriver).Info(
"Using external dataplane driver.")
dpDriver, dpDriverCmd = extdataplane.StartExtDataplaneDriver(configParams.DataplaneDriver)
}
// Initialise the glue logic that connects the calculation graph to/from the dataplane driver.
log.Info("Connect to the dataplane driver.")
failureReportChan := make(chan string)
dpConnector := newConnector(configParams, backendClient, dpDriver, failureReportChan)
// Now create the calculation graph, which receives updates from the
// datastore and outputs dataplane updates for the dataplane driver.
//
// The Syncer has its own thread and we use an extra thread for the
// Validator, just to pipeline that part of the calculation then the
// main calculation graph runs in a single thread for simplicity.
// The output of the calculation graph arrives at the dataplane
// connection via channel.
//
// Syncer -chan-> Validator -chan-> Calc graph -chan-> dataplane
// KVPair KVPair protobufs
// Get a Syncer from the datastore, or a connection to our remote sync daemon, Typha,
// which will feed the calculation graph with updates, bringing Felix into sync.
var syncer Startable
var typhaConnection *syncclient.SyncerClient
syncerToValidator := calc.NewSyncerCallbacksDecoupler()
if typhaAddr != "" {
// Use a remote Syncer, via the Typha server.
log.WithField("addr", typhaAddr).Info("Connecting to Typha.")
typhaConnection = syncclient.New(
typhaAddr,
buildinfo.GitVersion,
configParams.FelixHostname,
fmt.Sprintf("Revision: %s; Build date: %s",
buildinfo.GitRevision, buildinfo.BuildDate),
syncerToValidator,
&syncclient.Options{
ReadTimeout: configParams.TyphaReadTimeout,
WriteTimeout: configParams.TyphaWriteTimeout,
},
)
} else {
// Use the syncer locally.
syncer = backendClient.Syncer(syncerToValidator)
}
log.WithField("syncer", syncer).Info("Created Syncer")
// Create the ipsets/active policy calculation graph, which will
// do the dynamic calculation of ipset memberships and active policies
// etc.
asyncCalcGraph := calc.NewAsyncCalcGraph(configParams, dpConnector.ToDataplane, healthAggregator)
if configParams.UsageReportingEnabled {
// Usage reporting enabled, add stats collector to graph. When it detects an update
// to the stats, it makes a callback, which we use to send an update on a channel.
// We use a buffered channel here to avoid blocking the calculation graph.
statsChanIn := make(chan calc.StatsUpdate, 1)
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
statsChanIn <- stats
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.Dispatcher)
// Rather than sending the updates directly to the usage reporting thread, we
// decouple with an extra goroutine. This prevents blocking the calculation graph
// goroutine if the usage reporting goroutine is blocked on IO, for example.
// Using a buffered channel wouldn't work here because the usage reporting
// goroutine can block for a long time on IO so we could build up a long queue.
statsChanOut := make(chan calc.StatsUpdate)
go func() {
var statsChanOutOrNil chan calc.StatsUpdate
var stats calc.StatsUpdate
for {
select {
case stats = <-statsChanIn:
// Got a stats update, activate the output channel.
log.WithField("stats", stats).Debug("Buffer: stats update received")
statsChanOutOrNil = statsChanOut
case statsChanOutOrNil <- stats:
// Passed on the update, deactivate the output channel until
// the next update.
log.WithField("stats", stats).Debug("Buffer: stats update sent")
statsChanOutOrNil = nil
}
}
}()
go usagerep.PeriodicallyReportUsage(
24*time.Hour,
configParams.ClusterGUID,
configParams.ClusterType,
configParams.CalicoVersion,
statsChanOut,
)
} else {
// Usage reporting disabled, but we still want a stats collector for the
// felix_cluster_* metrics. Register a no-op function as the callback.
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.Dispatcher)
}
// Create the validator, which sits between the syncer and the
// calculation graph.
validator := calc.NewValidationFilter(asyncCalcGraph)
// Start the background processing threads.
if syncer != nil {
log.Infof("Starting the datastore Syncer")
syncer.Start()
} else {
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
log.WithError(err).Fatal("Failed to connect to Typha")
}
go func() {
typhaConnection.Finished.Wait()
failureReportChan <- "Connection to Typha failed"
}()
}
go syncerToValidator.SendTo(validator)
asyncCalcGraph.Start()
log.Infof("Started the processing graph")
var stopSignalChans []chan<- bool
if configParams.EndpointReportingEnabled {
delay := configParams.EndpointReportingDelaySecs
log.WithField("delay", delay).Info(
"Endpoint status reporting enabled, starting status reporter")
dpConnector.statusReporter = statusrep.NewEndpointStatusReporter(
configParams.FelixHostname,
dpConnector.StatusUpdatesFromDataplane,
dpConnector.InSync,
dpConnector.datastore,
delay,
delay*180,
)
dpConnector.statusReporter.Start()
}
// Start communicating with the dataplane driver.
dpConnector.Start()
// Send the opening message to the dataplane driver, giving it its
// config.
dpConnector.ToDataplane <- &proto.ConfigUpdate{
Config: configParams.RawValues(),
}
if configParams.PrometheusMetricsEnabled {
log.Info("Prometheus metrics enabled. Starting server.")
gaugeHost := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "felix_host",
Help: "Configured Felix hostname (as a label), typically used in grouping/aggregating stats; the label defaults to the hostname of the host but can be overridden by configuration. The value of the gauge is always set to 1.",
ConstLabels: prometheus.Labels{"host": configParams.FelixHostname},
})
gaugeHost.Set(1)
prometheus.MustRegister(gaugeHost)
go servePrometheusMetrics(configParams)
}
if configParams.HealthEnabled {
log.WithField("port", configParams.HealthPort).Info("Health enabled. Starting server.")
go healthAggregator.ServeHTTP(configParams.HealthPort)
}
// On receipt of SIGUSR1, write out heap profile.
usr1SignalChan := make(chan os.Signal, 1)
signal.Notify(usr1SignalChan, syscall.SIGUSR1)
go func() {
for {
<-usr1SignalChan
dumpHeapMemoryProfile(configParams)
}
}()
// Now monitor the worker process and our worker threads and shut
// down the process gracefully if they fail.
monitorAndManageShutdown(failureReportChan, dpDriverCmd, stopSignalChans)
}
func dumpHeapMemoryProfile(configParams *config.Config) {
// If a memory profile file name is configured, dump a heap memory profile. If the
// configured filename includes "<timestamp>", that will be replaced with a stamp indicating
// the current time.
memProfFileName := configParams.DebugMemoryProfilePath
if memProfFileName != "" {
logCxt := log.WithField("file", memProfFileName)
logCxt.Info("Asked to create a memory profile.")
// If the configured file name includes "<timestamp>", replace that with the current
// time.
if strings.Contains(memProfFileName, "<timestamp>") {
timestamp := time.Now().Format("2006-01-02-15:04:05")
memProfFileName = strings.Replace(memProfFileName, "<timestamp>", timestamp, 1)
logCxt = log.WithField("file", memProfFileName)
}
// Open a file with that name.
memProfFile, err := os.Create(memProfFileName)
if err != nil {
logCxt.WithError(err).Fatal("Could not create memory profile file")
memProfFile = nil
} else {
defer memProfFile.Close()
logCxt.Info("Writing memory profile...")
// The initial resync uses a lot of scratch space so now is
// a good time to force a GC and return any RAM that we can.
debug.FreeOSMemory()
if err := pprof.WriteHeapProfile(memProfFile); err != nil {
logCxt.WithError(err).Fatal("Could not write memory profile")
}
logCxt.Info("Finished writing memory profile")
}
}
}
func servePrometheusMetrics(configParams *config.Config) {
for {
log.WithField("port", configParams.PrometheusMetricsPort).Info("Starting prometheus metrics endpoint")
if configParams.PrometheusGoMetricsEnabled && configParams.PrometheusProcessMetricsEnabled {
log.Info("Including Golang & Process metrics")
} else {
if !configParams.PrometheusGoMetricsEnabled {
log.Info("Discarding Golang metrics")
prometheus.Unregister(prometheus.NewGoCollector())
}
if !configParams.PrometheusProcessMetricsEnabled {
log.Info("Discarding process metrics")
prometheus.Unregister(prometheus.NewProcessCollector(os.Getpid(), ""))
}
}
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(fmt.Sprintf(":%v", configParams.PrometheusMetricsPort), nil)
log.WithError(err).Error(
"Prometheus metrics endpoint failed, trying to restart it...")
time.Sleep(1 * time.Second)
}
}
func monitorAndManageShutdown(failureReportChan <-chan string, driverCmd *exec.Cmd, stopSignalChans []chan<- bool) {
// Ask the runtime to tell us if we get a term signal.
termSignalChan := make(chan os.Signal, 1)
signal.Notify(termSignalChan, syscall.SIGTERM)
// Start a background thread to tell us when the dataplane driver stops.
// If the driver stops unexpectedly, we'll terminate this process.
// If this process needs to stop, we'll kill the driver and then wait
// for the message from the background thread.
driverStoppedC := make(chan bool)
go func() {
if driverCmd == nil {
log.Info("No driver process to monitor")
return
}
err := driverCmd.Wait()
log.WithError(err).Warn("Driver process stopped")
driverStoppedC <- true
}()
// Wait for one of the channels to give us a reason to shut down.
driverAlreadyStopped := driverCmd == nil
receivedSignal := false
var reason string
select {
case <-driverStoppedC:
reason = "Driver stopped"
driverAlreadyStopped = true
case sig := <-termSignalChan:
reason = fmt.Sprintf("Received OS signal %v", sig)
receivedSignal = true
case reason = <-failureReportChan:
}
logCxt := log.WithField("reason", reason)
logCxt.Warn("Felix is shutting down")
// Notify other components to stop.
for _, c := range stopSignalChans {
select {
case c <- true:
default:
}
}
if !driverAlreadyStopped {
// Driver may still be running, just in case the driver is
// unresponsive, start a thread to kill this process if we
// don't manage to kill the driver.
logCxt.Info("Driver still running, trying to shut it down...")
giveUpOnSigTerm := make(chan bool)
go func() {
time.Sleep(4 * time.Second)
giveUpOnSigTerm <- true
time.Sleep(1 * time.Second)
log.Fatal("Failed to wait for driver to exit, giving up.")
}()
// Signal to the driver to exit.
driverCmd.Process.Signal(syscall.SIGTERM)
select {
case <-driverStoppedC:
logCxt.Info("Driver shut down after SIGTERM")
case <-giveUpOnSigTerm:
logCxt.Error("Driver did not respond to SIGTERM, sending SIGKILL")
driverCmd.Process.Kill()
<-driverStoppedC
logCxt.Info("Driver shut down after SIGKILL")
}
}
if !receivedSignal {
// We're exiting due to a failure or a config change, wait
// a couple of seconds to ensure that we don't go into a tight
// restart loop (which would make the init daemon give up trying
// to restart us).
logCxt.Info("Shutdown wasn't caused by signal, pausing to avoid tight restart loop")
go func() {
time.Sleep(2 * time.Second)
logCxt.Fatal("Exiting.")
}()
// But, if we get a signal while we're waiting quit immediately.
<-termSignalChan
}
logCxt.Fatal("Exiting immediately")
}
func loadConfigFromDatastore(
ctx context.Context, client bapi.Client, hostname string,
) (globalConfig, hostConfig map[string]string, err error) {
//log.Info("Waiting for the datastore to be ready")
//if kv, err := datastore.Get(ctx, model.ReadyFlagKey{}, ""); err != nil {
// log.WithError(err).Error("Failed to read global datastore 'Ready' flag, will retry...")
// time.Sleep(1 * time.Second)
// continue
//} else if kv.Value != true {
// log.Warning("Global datastore 'Ready' flag set to false, waiting...")
// time.Sleep(1 * time.Second)
// continue
//}
g, err := client.Get(ctx, model.ResourceKey{
Kind: apiv2.KindFelixConfiguration,
Name: "default",
Namespace: "",
}, "")
if _, ok := err.(errors2.ErrorResourceDoesNotExist); err != nil && !ok {
return
}
h, err := client.Get(ctx, model.ResourceKey{
Kind: apiv2.KindFelixConfiguration,
Name: "node." + hostname,
Namespace: "",
}, "")
if _, ok := err.(errors2.ErrorResourceDoesNotExist); err != nil && !ok {
return
}
ci, err := client.Get(ctx, model.ResourceKey{
Kind: apiv2.KindClusterInformation,
Name: "node." + hostname,
Namespace: "",
}, "")
if _, ok := err.(errors2.ErrorResourceDoesNotExist); err != nil && !ok {
return
}
globalConfig, err = convertV2ConfigToMap("global", g)
if err != nil {
return
}
hostConfig, err = convertV2ConfigToMap("per-host", h)
if err != nil {
return
}
// Merge in the global cluster info config.
ciConfig, err := convertV2ConfigToMap("cluster-info", ci)
if err != nil {
return
}
for k, v := range ciConfig {
globalConfig[k] = v
}
return
}
// convertV2ConfigToMap converts a v2 datamodel config struct (where each configuration entry
// is stored in a field of the struct) into a map, as required by our v1-style configuration
// loader.
func convertV2ConfigToMap(configType string, v2Config *model.KVPair) (map[string]string, error) {
logCxt := log.WithField("type", configType)
if v2Config == nil {
logCxt.Info("No config of this type")
return nil, nil
}
// Re-use the update processor logic implemented for the Syncer. We give it a v2 config
// object in a KVPair and it uses the annotations defined on it to split it into v1-style
// KV pairs.
configConverter := updateprocessors.NewFelixConfigUpdateProcessor()
v1kvs, err := configConverter.Process(v2Config)
if err != nil {
logCxt.WithError(err).Error("Failed to convert configuration")
}
c := map[string]string{}
for _, v1KV := range v1kvs {
if v1KV.Value != nil {
switch k := v1KV.Key.(type) {
case model.GlobalConfigKey:
c[k.Name] = v1KV.Value.(string)
case model.HostConfigKey:
c[k.Name] = v1KV.Value.(string)
default:
logCxt.WithField("KV", v1KV).Warn("Skipping config of unknown KV type.")
}
}
}
return c, nil
}
type dataplaneDriver interface {
SendMessage(msg interface{}) error
RecvMessage() (msg interface{}, err error)
}
type DataplaneConnector struct {
config *config.Config
ToDataplane chan interface{}
StatusUpdatesFromDataplane chan interface{}
InSync chan bool
failureReportChan chan<- string
dataplane dataplaneDriver
datastore bapi.Client
statusReporter *statusrep.EndpointStatusReporter
datastoreInSync bool
firstStatusReportSent bool
}
type Startable interface {
Start()
}
func newConnector(configParams *config.Config,
datastore bapi.Client,
dataplane dataplaneDriver,
failureReportChan chan<- string) *DataplaneConnector {
felixConn := &DataplaneConnector{
config: configParams,
datastore: datastore,
ToDataplane: make(chan interface{}),
StatusUpdatesFromDataplane: make(chan interface{}),
InSync: make(chan bool, 1),
failureReportChan: failureReportChan,
dataplane: dataplane,
}
return felixConn
}
func (fc *DataplaneConnector) readMessagesFromDataplane() {
defer func() {
fc.shutDownProcess("Failed to read messages from dataplane")
}()
log.Info("Reading from dataplane driver pipe...")
for {
payload, err := fc.dataplane.RecvMessage()
if err != nil {
log.WithError(err).Error("Failed to read from front-end socket")
fc.shutDownProcess("Failed to read from front-end socket")
}
log.WithField("payload", payload).Debug("New message from dataplane")
switch msg := payload.(type) {
case *proto.ProcessStatusUpdate:
fc.handleProcessStatusUpdate(msg)
case *proto.WorkloadEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.WorkloadEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
default:
log.WithField("msg", msg).Warning("Unknown message from dataplane")
}
log.Debug("Finished handling message from front-end")
}
}
func (fc *DataplaneConnector) handleProcessStatusUpdate(msg *proto.ProcessStatusUpdate) {
log.Debugf("Status update from dataplane driver: %v", *msg)
statusReport := model.StatusReport{
Timestamp: msg.IsoTimestamp,
UptimeSeconds: msg.Uptime,
FirstUpdate: !fc.firstStatusReportSent,
}
kv := model.KVPair{
Key: model.ActiveStatusReportKey{Hostname: fc.config.FelixHostname},
Value: &statusReport,
TTL: fc.config.ReportingTTLSecs,
}
_, err := fc.datastore.Apply(&kv)
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
} else {
fc.firstStatusReportSent = true
}
kv = model.KVPair{
Key: model.LastStatusReportKey{Hostname: fc.config.FelixHostname},
Value: &statusReport,
}
_, err = fc.datastore.Apply(&kv)
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
}
}
func (fc *DataplaneConnector) sendMessagesToDataplaneDriver() {
defer func() {
fc.shutDownProcess("Failed to send messages to dataplane")
}()
var config map[string]string
for {
msg := <-fc.ToDataplane
switch msg := msg.(type) {
case *proto.InSync:
log.Info("Datastore now in sync.")
if !fc.datastoreInSync {
log.Info("Datastore in sync for first time, sending message to status reporter.")
fc.datastoreInSync = true
fc.InSync <- true
}
case *proto.ConfigUpdate:
logCxt := log.WithFields(log.Fields{
"old": config,
"new": msg.Config,
})
logCxt.Info("Possible config update")
if config != nil && !reflect.DeepEqual(msg.Config, config) {
logCxt.Warn("Felix configuration changed. Need to restart.")
fc.shutDownProcess("config changed")
} else if config == nil {
logCxt.Info("Config resolved.")
config = make(map[string]string)
for k, v := range msg.Config {
config[k] = v
}
}
case *calc.DatastoreNotReady:
log.Warn("Datastore became unready, need to restart.")
fc.shutDownProcess("datastore became unready")
}
if err := fc.dataplane.SendMessage(msg); err != nil {
fc.shutDownProcess("Failed to write to dataplane driver")
}
}
}
func (fc *DataplaneConnector) shutDownProcess(reason string) {
// Send a failure report to the managed shutdown thread then give it
// a few seconds to do the shutdown.
fc.failureReportChan <- reason
time.Sleep(5 * time.Second)
// The graceful shutdown failed, terminate the process.
log.Panic("Managed shutdown failed. Panicking.")
}
func (fc *DataplaneConnector) Start() {
// Start a background thread to write to the dataplane driver.
go fc.sendMessagesToDataplaneDriver()
// Start background thread to read messages from dataplane driver.
go fc.readMessagesFromDataplane()
}
var ErrServiceNotReady = errors.New("Kubernetes service missing IP or port.")
func discoverTyphaAddr(configParams *config.Config) (string, error) {
if configParams.TyphaAddr != "" {
// Explicit address; trumps other sources of config.
return configParams.TyphaAddr, nil
}
if configParams.TyphaK8sServiceName == "" {
// No explicit address, and no service name, not using Typha.
return "", nil
}
// If we get here, we need to look up the Typha service using the k8s API.
// TODO Typha: support Typha lookup without using rest.InClusterConfig().
k8sconf, err := rest.InClusterConfig()
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes config.")
return "", err
}
clientset, err := kubernetes.NewForConfig(k8sconf)
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes client set.")
return "", err
}
svcClient := clientset.CoreV1().Services(configParams.TyphaK8sNamespace)
svc, err := svcClient.Get(configParams.TyphaK8sServiceName, v1.GetOptions{})
if err != nil {
log.WithError(err).Error("Unable to get Typha service from Kubernetes.")
return "", err
}
host := svc.Spec.ClusterIP
log.WithField("clusterIP", host).Info("Found Typha ClusterIP.")
if host == "" {
log.WithError(err).Error("Typha service had no ClusterIP.")
return "", ErrServiceNotReady
}
for _, p := range svc.Spec.Ports {
if p.Name == "calico-typha" {
log.WithField("port", p).Info("Found Typha service port.")
typhaAddr := fmt.Sprintf("%s:%v", host, p.Port)
return typhaAddr, nil
}
}
log.Error("Didn't find Typha service port.")
return "", ErrServiceNotReady
}
| 1 | 15,852 | Should be checking for error here | projectcalico-felix | c |
@@ -431,7 +431,7 @@ class Form extends WidgetBase
$this->fields = [];
}
- $this->allTabs->outside = new FormTabs(FormTabs::SECTION_OUTSIDE, $this->config);
+ $this->allTabs->outside = new FormTabs(FormTabs::SECTION_OUTSIDE, (array)$this->config);
$this->addFields($this->fields);
/* | 1 | <?php namespace Backend\Widgets;
use Lang;
use Form as FormHelper;
use Backend\Classes\FormTabs;
use Backend\Classes\FormField;
use Backend\Classes\WidgetBase;
use Backend\Classes\WidgetManager;
use Backend\Classes\FormWidgetBase;
use October\Rain\Database\Model;
use October\Rain\Html\Helper as HtmlHelper;
use ApplicationException;
use Exception;
/**
* Form Widget
* Used for building back end forms and renders a form.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class Form extends WidgetBase
{
use \Backend\Traits\FormModelSaver;
//
// Configurable properties
//
/**
* @var array Form field configuration.
*/
public $fields;
/**
* @var array Primary tab configuration.
*/
public $tabs;
/**
* @var array Secondary tab configuration.
*/
public $secondaryTabs;
/**
* @var Model Form model object.
*/
public $model;
/**
* @var array Dataset containing field values, if none supplied, model is used.
*/
public $data;
/**
* @var string The context of this form, fields that do not belong
* to this context will not be shown.
*/
public $context = null;
/**
* @var string If the field element names should be contained in an array.
* Eg: <input name="nameArray[fieldName]" />
*/
public $arrayName;
/**
* @var bool Used to flag that this form is being rendered as part of another form,
* a good indicator to expect that the form model and dataset values will differ.
*/
public $isNested = false;
//
// Object properties
//
/**
* @inheritDoc
*/
protected $defaultAlias = 'form';
/**
* @var boolean Determines if field definitions have been created.
*/
protected $fieldsDefined = false;
/**
* @var array Collection of all fields used in this form.
* @see Backend\Classes\FormField
*/
protected $allFields = [];
/**
* @var object Collection of tab sections used in this form.
* @see Backend\Classes\FormTabs
*/
protected $allTabs = [
'outside' => null,
'primary' => null,
'secondary' => null,
];
/**
* @var array Collection of all form widgets used in this form.
*/
protected $formWidgets = [];
/**
* @var string Active session key, used for editing forms and deferred bindings.
*/
public $sessionKey;
/**
* @var bool Render this form with uneditable preview data.
*/
public $previewMode = false;
/**
* @var \Backend\Classes\WidgetManager
*/
protected $widgetManager;
/**
* @inheritDoc
*/
public function init()
{
$this->fillFromConfig([
'fields',
'tabs',
'secondaryTabs',
'model',
'data',
'context',
'arrayName',
'isNested',
]);
$this->widgetManager = WidgetManager::instance();
$this->allTabs = (object) $this->allTabs;
$this->validateModel();
}
/**
* Ensure fields are defined and form widgets are registered so they can
* also be bound to the controller this allows their AJAX features to
* operate.
*
* @return void
*/
public function bindToController()
{
$this->defineFormFields();
parent::bindToController();
}
/**
* @inheritDoc
*/
protected function loadAssets()
{
$this->addJs('js/october.form.js', 'core');
}
/**
* Renders the widget.
*
* Options:
* - preview: Render this form as an uneditable preview. Default: false
* - useContainer: Wrap the result in a container, used by AJAX. Default: true
* - section: Which form section to render. Default: null
* - outside: Renders the Outside Fields section.
* - primary: Renders the Primary Tabs section.
* - secondary: Renders the Secondary Tabs section.
* - null: Renders all sections
*
* @param array $options
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function render($options = [])
{
if (isset($options['preview'])) {
$this->previewMode = $options['preview'];
}
if (!isset($options['useContainer'])) {
$options['useContainer'] = true;
}
if (!isset($options['section'])) {
$options['section'] = null;
}
$extraVars = [];
$targetPartial = 'form';
/*
* Determine the partial to use based on the supplied section option
*/
if ($section = $options['section']) {
$section = strtolower($section);
if (isset($this->allTabs->{$section})) {
$extraVars['tabs'] = $this->allTabs->{$section};
}
$targetPartial = 'section';
$extraVars['renderSection'] = $section;
}
/*
* Apply a container to the element
*/
if ($useContainer = $options['useContainer']) {
$targetPartial = $section ? 'section-container' : 'form-container';
}
$this->prepareVars();
/*
* Force preview mode on all widgets
*/
if ($this->previewMode) {
foreach ($this->formWidgets as $widget) {
$widget->previewMode = $this->previewMode;
}
}
return $this->makePartial($targetPartial, $extraVars);
}
/**
* Renders a single form field
*
* Options:
* - useContainer: Wrap the result in a container, used by AJAX. Default: true
*
* @param string|array $field The field name or definition
* @param array $options
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function renderField($field, $options = [])
{
if (is_string($field)) {
if (!isset($this->allFields[$field])) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_definition',
compact('field')
));
}
$field = $this->allFields[$field];
}
if (!isset($options['useContainer'])) {
$options['useContainer'] = true;
}
$targetPartial = $options['useContainer'] ? 'field-container' : 'field';
$this->prepareVars();
return $this->makePartial($targetPartial, ['field' => $field]);
}
/**
* Renders the HTML element for a field
* @param FormWidgetBase $field
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function renderFieldElement($field)
{
return $this->makePartial(
'field_' . $field->type,
[
'field' => $field,
'formModel' => $this->model
]
);
}
/**
* Validate the supplied form model.
*
* @return mixed
*/
protected function validateModel()
{
if (!$this->model) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_model',
['class'=>get_class($this->controller)]
));
}
$this->data = isset($this->data)
? (object) $this->data
: $this->model;
return $this->model;
}
/**
* Prepares the form data
*
* @return void
*/
protected function prepareVars()
{
$this->defineFormFields();
$this->applyFiltersFromModel();
$this->vars['sessionKey'] = $this->getSessionKey();
$this->vars['outsideTabs'] = $this->allTabs->outside;
$this->vars['primaryTabs'] = $this->allTabs->primary;
$this->vars['secondaryTabs'] = $this->allTabs->secondary;
}
/**
* Sets or resets form field values.
* @param array $data
* @return array
*/
public function setFormValues($data = null)
{
if ($data === null) {
$data = $this->getSaveData();
}
/*
* Fill the model as if it were to be saved
*/
$this->prepareModelsToSave($this->model, $data);
/*
* Data set differs from model
*/
if ($this->data !== $this->model) {
$this->data = (object) array_merge((array) $this->data, (array) $data);
}
/*
* Set field values from data source
*/
foreach ($this->allFields as $field) {
$field->value = $this->getFieldValue($field);
}
return $data;
}
/**
* Event handler for refreshing the form.
*
* @return array
*/
public function onRefresh()
{
$result = [];
$saveData = $this->getSaveData();
/*
* Extensibility
*/
$dataHolder = (object) ['data' => $saveData];
$this->fireSystemEvent('backend.form.beforeRefresh', [$dataHolder]);
$saveData = $dataHolder->data;
/*
* Set the form variables and prepare the widget
*/
$this->setFormValues($saveData);
$this->prepareVars();
/*
* Extensibility
*/
$this->fireSystemEvent('backend.form.refreshFields', [$this->allFields]);
/*
* If an array of fields is supplied, update specified fields individually.
*/
if (($updateFields = post('fields')) && is_array($updateFields)) {
foreach ($updateFields as $field) {
if (!isset($this->allFields[$field])) {
continue;
}
/** @var FormWidgetBase $fieldObject */
$fieldObject = $this->allFields[$field];
$result['#' . $fieldObject->getId('group')] = $this->makePartial('field', ['field' => $fieldObject]);
}
}
/*
* Update the whole form
*/
if (empty($result)) {
$result = ['#'.$this->getId() => $this->makePartial('form')];
}
/*
* Extensibility
*/
$eventResults = $this->fireSystemEvent('backend.form.refresh', [$result], false);
foreach ($eventResults as $eventResult) {
$result = $eventResult + $result;
}
return $result;
}
/**
* Creates a flat array of form fields from the configuration.
* Also slots fields in to their respective tabs.
*
* @return void
*/
protected function defineFormFields()
{
if ($this->fieldsDefined) {
return;
}
/*
* Extensibility
*/
$this->fireSystemEvent('backend.form.extendFieldsBefore');
/*
* Outside fields
*/
if (!isset($this->fields) || !is_array($this->fields)) {
$this->fields = [];
}
$this->allTabs->outside = new FormTabs(FormTabs::SECTION_OUTSIDE, $this->config);
$this->addFields($this->fields);
/*
* Primary Tabs + Fields
*/
if (!isset($this->tabs['fields']) || !is_array($this->tabs['fields'])) {
$this->tabs['fields'] = [];
}
$this->allTabs->primary = new FormTabs(FormTabs::SECTION_PRIMARY, $this->tabs);
$this->addFields($this->tabs['fields'], FormTabs::SECTION_PRIMARY);
/*
* Secondary Tabs + Fields
*/
if (!isset($this->secondaryTabs['fields']) || !is_array($this->secondaryTabs['fields'])) {
$this->secondaryTabs['fields'] = [];
}
$this->allTabs->secondary = new FormTabs(FormTabs::SECTION_SECONDARY, $this->secondaryTabs);
$this->addFields($this->secondaryTabs['fields'], FormTabs::SECTION_SECONDARY);
/*
* Extensibility
*/
$this->fireSystemEvent('backend.form.extendFields', [$this->allFields]);
/*
* Convert automatic spanned fields
*/
foreach ($this->allTabs->outside->getFields() as $fields) {
$this->processAutoSpan($fields);
}
foreach ($this->allTabs->primary->getFields() as $fields) {
$this->processAutoSpan($fields);
}
foreach ($this->allTabs->secondary->getFields() as $fields) {
$this->processAutoSpan($fields);
}
/*
* At least one tab section should stretch
*/
if (
$this->allTabs->secondary->stretch === null
&& $this->allTabs->primary->stretch === null
&& $this->allTabs->outside->stretch === null
) {
if ($this->allTabs->secondary->hasFields()) {
$this->allTabs->secondary->stretch = true;
}
elseif ($this->allTabs->primary->hasFields()) {
$this->allTabs->primary->stretch = true;
}
else {
$this->allTabs->outside->stretch = true;
}
}
/*
* Bind all form widgets to controller
*/
foreach ($this->allFields as $field) {
if ($field->type !== 'widget') {
continue;
}
$widget = $this->makeFormFieldWidget($field);
$widget->bindToController();
}
$this->fieldsDefined = true;
}
/**
* Converts fields with a span set to 'auto' as either
* 'left' or 'right' depending on the previous field.
*
* @return void
*/
protected function processAutoSpan($fields)
{
$prevSpan = null;
foreach ($fields as $field) {
if (strtolower($field->span) === 'auto') {
if ($prevSpan === 'left') {
$field->span = 'right';
}
else {
$field->span = 'left';
}
}
$prevSpan = $field->span;
}
}
/**
* Programatically add fields, used internally and for extensibility.
*
* @param array $fields
* @param string $addToArea
* @return void
*/
public function addFields(array $fields, $addToArea = null)
{
foreach ($fields as $name => $config) {
$fieldObj = $this->makeFormField($name, $config);
$fieldTab = is_array($config) ? array_get($config, 'tab') : null;
/*
* Check that the form field matches the active context
*/
if ($fieldObj->context !== null) {
$context = (is_array($fieldObj->context)) ? $fieldObj->context : [$fieldObj->context];
if (!in_array($this->getContext(), $context)) {
continue;
}
}
$this->allFields[$name] = $fieldObj;
switch (strtolower($addToArea)) {
case FormTabs::SECTION_PRIMARY:
$this->allTabs->primary->addField($name, $fieldObj, $fieldTab);
break;
case FormTabs::SECTION_SECONDARY:
$this->allTabs->secondary->addField($name, $fieldObj, $fieldTab);
break;
default:
$this->allTabs->outside->addField($name, $fieldObj);
break;
}
}
}
/**
* Add tab fields.
*
* @param array $fields
* @return void
*/
public function addTabFields(array $fields)
{
$this->addFields($fields, 'primary');
}
/**
* @param array $fields
* @return void
*/
public function addSecondaryTabFields(array $fields)
{
$this->addFields($fields, 'secondary');
}
/**
* Programatically remove a field.
*
* @param string $name
* @return bool
*/
public function removeField($name)
{
if (!isset($this->allFields[$name])) {
return false;
}
/*
* Remove from tabs
*/
$this->allTabs->primary->removeField($name);
$this->allTabs->secondary->removeField($name);
$this->allTabs->outside->removeField($name);
/*
* Remove from main collection
*/
unset($this->allFields[$name]);
return true;
}
/**
* Programatically remove all fields belonging to a tab.
*
* @param string $name
* @return bool
*/
public function removeTab($name)
{
foreach ($this->allFields as $fieldName => $field) {
if ($field->tab == $name) {
$this->removeField($fieldName);
}
}
}
/**
* Creates a form field object from name and configuration.
*
* @param string $name
* @param array $config
* @return FormField
*/
protected function makeFormField($name, $config = [])
{
$label = (isset($config['label'])) ? $config['label'] : null;
list($fieldName, $fieldContext) = $this->getFieldName($name);
$field = new FormField($fieldName, $label);
if ($fieldContext) {
$field->context = $fieldContext;
}
$field->arrayName = $this->arrayName;
$field->idPrefix = $this->getId();
/*
* Simple field type
*/
if (is_string($config)) {
if ($this->isFormWidget($config) !== false) {
$field->displayAs('widget', ['widget' => $config]);
}
else {
$field->displayAs($config);
}
}
/*
* Defined field type
*/
else {
$fieldType = isset($config['type']) ? $config['type'] : null;
if (!is_string($fieldType) && !is_null($fieldType)) {
throw new ApplicationException(Lang::get(
'backend::lang.field.invalid_type',
['type'=>gettype($fieldType)]
));
}
/*
* Widget with configuration
*/
if ($this->isFormWidget($fieldType) !== false) {
$config['widget'] = $fieldType;
$fieldType = 'widget';
}
$field->displayAs($fieldType, $config);
}
/*
* Set field value
*/
$field->value = $this->getFieldValue($field);
/*
* Check model if field is required
*/
if ($field->required === null && $this->model && method_exists($this->model, 'isAttributeRequired')) {
$fieldName = implode('.', HtmlHelper::nameToArray($field->fieldName));
$field->required = $this->model->isAttributeRequired($fieldName);
}
/*
* Get field options from model
*/
$optionModelTypes = ['dropdown', 'radio', 'checkboxlist', 'balloon-selector'];
if (in_array($field->type, $optionModelTypes, false)) {
/*
* Defer the execution of option data collection
*/
$field->options(function () use ($field, $config) {
$fieldOptions = isset($config['options']) ? $config['options'] : null;
$fieldOptions = $this->getOptionsFromModel($field, $fieldOptions);
return $fieldOptions;
});
}
return $field;
}
/**
* Check if a field type is a widget or not
*
* @param string $fieldType
* @return boolean
*/
protected function isFormWidget($fieldType)
{
if ($fieldType === null) {
return false;
}
if (strpos($fieldType, '\\')) {
return true;
}
$widgetClass = $this->widgetManager->resolveFormWidget($fieldType);
if (!class_exists($widgetClass)) {
return false;
}
if (is_subclass_of($widgetClass, 'Backend\Classes\FormWidgetBase')) {
return true;
}
return false;
}
/**
* Makes a widget object from a form field object.
*
* @param $field
* @return \Backend\Traits\FormWidgetBase|null
*/
protected function makeFormFieldWidget($field)
{
if ($field->type !== 'widget') {
return null;
}
if (isset($this->formWidgets[$field->fieldName])) {
return $this->formWidgets[$field->fieldName];
}
$widgetConfig = $this->makeConfig($field->config);
$widgetConfig->alias = $this->alias . studly_case(HtmlHelper::nameToId($field->fieldName));
$widgetConfig->sessionKey = $this->getSessionKey();
$widgetConfig->previewMode = $this->previewMode;
$widgetConfig->model = $this->model;
$widgetConfig->data = $this->data;
$widgetName = $widgetConfig->widget;
$widgetClass = $this->widgetManager->resolveFormWidget($widgetName);
if (!class_exists($widgetClass)) {
throw new ApplicationException(Lang::get(
'backend::lang.widget.not_registered',
['name' => $widgetClass]
));
}
$widget = $this->makeFormWidget($widgetClass, $field, $widgetConfig);
/*
* If options config is defined, request options from the model.
*/
if (isset($field->config['options'])) {
$field->options(function () use ($field) {
$fieldOptions = $field->config['options'];
if ($fieldOptions === true) $fieldOptions = null;
$fieldOptions = $this->getOptionsFromModel($field, $fieldOptions);
return $fieldOptions;
});
}
return $this->formWidgets[$field->fieldName] = $widget;
}
/**
* Get all the loaded form widgets for the instance.
*
* @return array
*/
public function getFormWidgets()
{
return $this->formWidgets;
}
/**
* Get a specified form widget
*
* @param string $field
* @return mixed
*/
public function getFormWidget($field)
{
if (isset($this->formWidgets[$field])) {
return $this->formWidgets[$field];
}
return null;
}
/**
* Get all the registered fields for the instance.
*
* @return array
*/
public function getFields()
{
return $this->allFields;
}
/**
* Get a specified field object
*
* @param string $field
* @return mixed
*/
public function getField($field)
{
if (isset($this->allFields[$field])) {
return $this->allFields[$field];
}
return null;
}
/**
* Get all tab objects for the instance.
*
* @return object[FormTabs]
*/
public function getTabs()
{
return $this->allTabs;
}
/**
* Get a specified tab object.
* Options: outside, primary, secondary.
*
* @param string $field
* @return mixed
*/
public function getTab($tab)
{
if (isset($this->allTabs->$tab)) {
return $this->allTabs->$tab;
}
return null;
}
/**
* Parses a field's name
* @param string $field Field name
* @return array [columnName, context]
*/
protected function getFieldName($field)
{
if (strpos($field, '@') === false) {
return [$field, null];
}
return explode('@', $field);
}
/**
* Looks up the field value.
* @param mixed $field
* @return string
*/
protected function getFieldValue($field)
{
if (is_string($field)) {
if (!isset($this->allFields[$field])) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_definition',
compact('field')
));
}
$field = $this->allFields[$field];
}
$defaultValue = !$this->model->exists
? $field->getDefaultFromData($this->data)
: null;
return $field->getValueFromData($this->data, $defaultValue);
}
/**
* Returns a HTML encoded value containing the other fields this
* field depends on
* @param \Backend\Classes\FormField $field
* @return string
*/
protected function getFieldDepends($field)
{
if (!$field->dependsOn) {
return '';
}
$dependsOn = is_array($field->dependsOn) ? $field->dependsOn : [$field->dependsOn];
$dependsOn = htmlspecialchars(json_encode($dependsOn), ENT_QUOTES, 'UTF-8');
return $dependsOn;
}
/**
* Helper method to determine if field should be rendered
* with label and comments.
* @param \Backend\Classes\FormField $field
* @return boolean
*/
protected function showFieldLabels($field)
{
if (in_array($field->type, ['checkbox', 'switch', 'section'])) {
return false;
}
if ($field->type === 'widget') {
$widget = $this->makeFormFieldWidget($field);
return $widget->showLabels;
}
return true;
}
/**
* Returns post data from a submitted form.
*
* @return array
*/
public function getSaveData()
{
$this->defineFormFields();
$result = [];
/*
* Source data
*/
$data = $this->arrayName ? post($this->arrayName) : post();
if (!$data) {
$data = [];
}
/*
* Spin over each field and extract the postback value
*/
foreach ($this->allFields as $field) {
/*
* Disabled and hidden should be omitted from data set
*/
if ($field->disabled || $field->hidden) {
continue;
}
/*
* Handle HTML array, eg: item[key][another]
*/
$parts = HtmlHelper::nameToArray($field->fieldName);
if (($value = $this->dataArrayGet($data, $parts)) !== null) {
/*
* Number fields should be converted to integers
*/
if ($field->type === 'number') {
$value = !strlen(trim($value)) ? null : (float) $value;
}
$this->dataArraySet($result, $parts, $value);
}
}
/*
* Give widgets an opportunity to process the data.
*/
foreach ($this->formWidgets as $field => $widget) {
$parts = HtmlHelper::nameToArray($field);
$widgetValue = $widget->getSaveValue($this->dataArrayGet($result, $parts));
$this->dataArraySet($result, $parts, $widgetValue);
}
return $result;
}
/*
* Allow the model to filter fields.
*/
protected function applyFiltersFromModel()
{
/*
* Standard usage
*/
if (method_exists($this->model, 'filterFields')) {
$this->model->filterFields((object) $this->allFields, $this->getContext());
}
/*
* Advanced usage
*/
if (method_exists($this->model, 'fireEvent')) {
$this->model->fireEvent('model.form.filterFields', [$this]);
}
}
/**
* Looks at the model for defined options.
*
* @param $field
* @param $fieldOptions
* @return mixed
*/
protected function getOptionsFromModel($field, $fieldOptions)
{
/*
* Advanced usage, supplied options are callable
*/
if (is_array($fieldOptions) && is_callable($fieldOptions)) {
$fieldOptions = call_user_func($fieldOptions, $this, $field);
}
/*
* Refer to the model method or any of its behaviors
*/
if (!is_array($fieldOptions) && !$fieldOptions) {
try {
list($model, $attribute) = $field->resolveModelAttribute($this->model, $field->fieldName);
}
catch (Exception $ex) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_invalid_model', [
'model' => get_class($this->model),
'field' => $field->fieldName
]));
}
$methodName = 'get'.studly_case($attribute).'Options';
if (
!$this->objectMethodExists($model, $methodName) &&
!$this->objectMethodExists($model, 'getDropdownOptions')
) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_not_exists', [
'model' => get_class($model),
'method' => $methodName,
'field' => $field->fieldName
]));
}
if ($this->objectMethodExists($model, $methodName)) {
$fieldOptions = $model->$methodName($field->value, $this->data);
}
else {
$fieldOptions = $model->getDropdownOptions($attribute, $field->value, $this->data);
}
}
/*
* Field options are an explicit method reference
*/
elseif (is_string($fieldOptions)) {
if (!$this->objectMethodExists($this->model, $fieldOptions)) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_not_exists', [
'model' => get_class($this->model),
'method' => $fieldOptions,
'field' => $field->fieldName
]));
}
$fieldOptions = $this->model->$fieldOptions($field->value, $field->fieldName, $this->data);
}
return $fieldOptions;
}
/**
* Returns the active session key.
*
* @return \Illuminate\Routing\Route|mixed|string
*/
public function getSessionKey()
{
if ($this->sessionKey) {
return $this->sessionKey;
}
if (post('_session_key')) {
return $this->sessionKey = post('_session_key');
}
return $this->sessionKey = FormHelper::getSessionKey();
}
/**
* Returns the active context for displaying the form.
*
* @return string
*/
public function getContext()
{
return $this->context;
}
/**
* Internal helper for method existence checks.
*
* @param object $object
* @param string $method
* @return boolean
*/
protected function objectMethodExists($object, $method)
{
if (method_exists($object, 'methodExists')) {
return $object->methodExists($method);
}
return method_exists($object, $method);
}
/**
* Variant to array_get() but preserves dots in key names.
*
* @param array $array
* @param array $parts
* @param null $default
* @return array|null
*/
protected function dataArrayGet(array $array, array $parts, $default = null)
{
if ($parts === null) {
return $array;
}
if (count($parts) === 1) {
$key = array_shift($parts);
if (isset($array[$key])) {
return $array[$key];
} else {
return $default;
}
}
foreach ($parts as $segment) {
if (!is_array($array) || !array_key_exists($segment, $array)) {
return $default;
}
$array = $array[$segment];
}
return $array;
}
/**
* Variant to array_set() but preserves dots in key names.
*
* @param array $array
* @param array $parts
* @param string $value
* @return array
*/
protected function dataArraySet(array &$array, array $parts, $value)
{
if ($parts === null) {
return $value;
}
while (count($parts) > 1) {
$key = array_shift($parts);
if (!isset($array[$key]) || !is_array($array[$key])) {
$array[$key] = [];
}
$array =& $array[$key];
}
$array[array_shift($parts)] = $value;
return $array;
}
}
| 1 | 13,512 | Put a space between `(array)` and `$this` and I'll merge it | octobercms-october | php |
@@ -0,0 +1,6 @@
+package runtime
+
+// Stack is a stub, not implemented
+func Stack(buf []byte, all bool) int {
+ return 0
+} | 1 | 1 | 9,608 | You could add this to stack.go, which has similar functions (to avoid yet another small file). None of these are likely to be implemented considering what TinyGo is designed for. | tinygo-org-tinygo | go |
|
@@ -0,0 +1,13 @@
+package com.fsck.k9.notification;
+
+
+class NotificationHolder {
+ public final int notificationId;
+ public final NotificationContent content;
+
+
+ public NotificationHolder(int notificationId, NotificationContent content) {
+ this.notificationId = notificationId;
+ this.content = content;
+ }
+} | 1 | 1 | 13,196 | the class names `NotificationHolder` and `NotificationsHolder` are fairly difficult to quickly tell apart, particularly since they are often used close to each other | k9mail-k-9 | java |
|
@@ -23,10 +23,7 @@
*/
package net.runelite.client.plugins.freezetimers;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
import javax.inject.Singleton;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.Actor; | 1 | /*
* Copyright (c) 2019, pklite <https://github.com/pklite/pklite>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.freezetimers;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Singleton;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.Actor;
@Slf4j
@Singleton
public class Timers
{
private final Map<Actor, HashMap<TimerType, Long>> timerMap = new HashMap<>();
public void setTimerEnd(Actor actor, TimerType type, long n)
{
if (!timerMap.containsKey(actor))
{
timerMap.put(actor, new HashMap<>());
}
timerMap.get(actor).put(type, n + type.getImmunityTime());
}
public void setTimerReApply(Actor actor, TimerType type, long n)
{
if (!timerMap.containsKey(actor))
{
timerMap.put(actor, new HashMap<>());
}
timerMap.get(actor).put(type, n);
}
public long getTimerEnd(Actor actor, TimerType type)
{
if (!timerMap.containsKey(actor))
{
return 0;
}
return timerMap.get(actor).getOrDefault(type, (long) type.getImmunityTime()) - type.getImmunityTime();
}
public long getTimerReApply(Actor actor, TimerType type)
{
if (!timerMap.containsKey(actor))
{
return 0;
}
return timerMap.get(actor).getOrDefault(type, (long) 0);
}
public List<Actor> getAllActorsOnTimer(TimerType type)
{
List<Actor> actors = new ArrayList<Actor>();
for (Actor actor : timerMap.keySet())
{
if (areAllTimersZero(actor))
{
continue;
}
final long end = getTimerReApply(actor, type);
if (end > System.currentTimeMillis())
{
actors.add(actor);
}
}
return actors;
}
public boolean areAllTimersZero(Actor actor)
{
for (TimerType type : TimerType.values())
{
if (getTimerReApply(actor, type) > System.currentTimeMillis())
{
return false;
}
}
timerMap.remove(actor);
return true;
}
}
| 1 | 15,537 | Avoid wildcard imports | open-osrs-runelite | java |
@@ -71,7 +71,7 @@ type (
GetContractState(hash.PKHash, hash.Hash32B) (hash.Hash32B, error)
SetContractState(hash.PKHash, hash.Hash32B, hash.Hash32B) error
// Candidate pool
- candidates() (uint64, []*Candidate)
+ Candidates() (uint64, []*Candidate)
CandidatesByHeight(uint64) ([]*Candidate, error)
}
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package state
import (
"context"
"math/big"
"sort"
"sync"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/blockchain/action"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/trie"
)
var (
// ErrNotEnoughBalance is the error that the balance is not enough
ErrNotEnoughBalance = errors.New("not enough balance")
// ErrAccountNotExist is the error that the account does not exist
ErrAccountNotExist = errors.New("account does not exist")
// ErrAccountCollision is the error that the account already exists
ErrAccountCollision = errors.New("account already exists")
// ErrFailedToMarshalState is the error that the state marshaling is failed
ErrFailedToMarshalState = errors.New("failed to marshal state")
// ErrFailedToUnmarshalState is the error that the state un-marshaling is failed
ErrFailedToUnmarshalState = errors.New("failed to unmarshal state")
)
const (
// CurrentHeightKey indicates the key of current factory height in underlying DB
CurrentHeightKey = "currentHeight"
// AccountTrieRootKey indicates the key of accountTrie root hash in underlying DB
AccountTrieRootKey = "accountTrieRoot"
)
type (
// Factory defines an interface for managing states
Factory interface {
lifecycle.StartStopper
// Accounts
LoadOrCreateState(string, uint64) (*State, error)
Balance(string) (*big.Int, error)
Nonce(string) (uint64, error) // Note that Nonce starts with 1.
State(string) (*State, error)
CachedState(string) (*State, error)
RootHash() hash.Hash32B
Height() (uint64, error)
NewWorkingSet() (WorkingSet, error)
RunActions(uint64, []*action.Transfer, []*action.Vote, []*action.Execution, []action.Action) (hash.Hash32B, error)
Commit(WorkingSet) error
// Contracts
GetCodeHash(hash.PKHash) (hash.Hash32B, error)
GetCode(hash.PKHash) ([]byte, error)
SetCode(hash.PKHash, []byte) error
GetContractState(hash.PKHash, hash.Hash32B) (hash.Hash32B, error)
SetContractState(hash.PKHash, hash.Hash32B, hash.Hash32B) error
// Candidate pool
candidates() (uint64, []*Candidate)
CandidatesByHeight(uint64) ([]*Candidate, error)
}
// factory implements StateFactory interface, tracks changes to account/contract and batch-commits to DB
factory struct {
lifecycle lifecycle.Lifecycle
mutex sync.RWMutex
currentChainHeight uint64
numCandidates uint
activeWs WorkingSet // active working set
rootHash hash.Hash32B // new root hash after running executions in this block
dao db.KVStore // the underlying DB for account/contract storage
actionHandlers []ActionHandler // the handlers to handle actions
}
// ActionHandler is the interface for the action handlers. For each incoming action, the assembled actions will be
// called one by one to process it. ActionHandler implementation is supposed to parse the sub-type of the action to
// decide if it wants to handle this action or not.
ActionHandler interface {
handle(action.Action) error
}
)
// FactoryOption sets Factory construction parameter
type FactoryOption func(*factory, *config.Config) error
// PrecreatedTrieDBOption uses pre-created trie DB for state factory
func PrecreatedTrieDBOption(kv db.KVStore) FactoryOption {
return func(sf *factory, cfg *config.Config) error {
if kv == nil {
return errors.New("Invalid empty trie db")
}
if err := kv.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start trie db")
}
sf.dao = kv
// get state trie root
root, err := sf.getRoot(trie.AccountKVNameSpace, AccountTrieRootKey)
if err != nil {
return errors.Wrap(err, "failed to get accountTrie's root hash from underlying DB")
}
sf.rootHash = root
return nil
}
}
// DefaultTrieOption creates trie from config for state factory
func DefaultTrieOption() FactoryOption {
return func(sf *factory, cfg *config.Config) error {
dbPath := cfg.Chain.TrieDBPath
if len(dbPath) == 0 {
return errors.New("Invalid empty trie db path")
}
trieDB := db.NewBoltDB(dbPath, &cfg.DB)
if err := trieDB.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start trie db")
}
sf.dao = trieDB
// get state trie root
root, err := sf.getRoot(trie.AccountKVNameSpace, AccountTrieRootKey)
if err != nil {
return errors.Wrap(err, "failed to get accountTrie's root hash from underlying DB")
}
sf.rootHash = root
return nil
}
}
// InMemTrieOption creates in memory trie for state factory
func InMemTrieOption() FactoryOption {
return func(sf *factory, cfg *config.Config) error {
trieDB := db.NewMemKVStore()
if err := trieDB.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start trie db")
}
sf.dao = trieDB
// get state trie root
root, err := sf.getRoot(trie.AccountKVNameSpace, AccountTrieRootKey)
if err != nil {
return errors.Wrap(err, "failed to get accountTrie's root hash from underlying DB")
}
sf.rootHash = root
return nil
}
}
// ActionHandlerOption sets the action handlers for state factory
func ActionHandlerOption(actionHandlers ...ActionHandler) FactoryOption {
return func(sf *factory, cfg *config.Config) error {
sf.actionHandlers = actionHandlers
return nil
}
}
// NewFactory creates a new state factory
func NewFactory(cfg *config.Config, opts ...FactoryOption) (Factory, error) {
sf := &factory{
currentChainHeight: 0,
numCandidates: cfg.Chain.NumCandidates,
}
for _, opt := range opts {
if err := opt(sf, cfg); err != nil {
logger.Error().Err(err).Msgf("Failed to execute state factory creation option %p", opt)
return nil, err
}
}
// create default working set
ws, err := sf.NewWorkingSet()
if err != nil {
return nil, err
}
sf.activeWs = ws
if sf.dao != nil {
sf.lifecycle.Add(sf.dao)
}
return sf, nil
}
func (sf *factory) Start(ctx context.Context) error { return sf.lifecycle.OnStart(ctx) }
func (sf *factory) Stop(ctx context.Context) error { return sf.lifecycle.OnStop(ctx) }
//======================================
// State/Account functions
//======================================
// LoadOrCreateState loads existing or adds a new State with initial balance to the factory
// addr should be a bech32 properly-encoded string
func (sf *factory) LoadOrCreateState(addr string, init uint64) (*State, error) {
sf.mutex.Lock()
defer sf.mutex.Unlock()
return sf.activeWs.LoadOrCreateState(addr, init)
}
// Balance returns balance
func (sf *factory) Balance(addr string) (*big.Int, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.activeWs.balance(addr)
}
// Nonce returns the Nonce if the account exists
func (sf *factory) Nonce(addr string) (uint64, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.activeWs.Nonce(addr)
}
// State returns the confirmed state on the chain
func (sf *factory) State(addr string) (*State, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.activeWs.state(addr)
}
// CachedState returns the cached state if the address exists in local cache
func (sf *factory) CachedState(addr string) (*State, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.activeWs.CachedState(addr)
}
// RootHash returns the hash of the root node of the state trie
func (sf *factory) RootHash() hash.Hash32B {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.rootHash
}
// Height returns factory's height
func (sf *factory) Height() (uint64, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
height, err := sf.dao.Get(trie.AccountKVNameSpace, []byte(CurrentHeightKey))
if err != nil {
return 0, errors.Wrap(err, "failed to get factory's height from underlying DB")
}
return byteutil.BytesToUint64(height), nil
}
func (sf *factory) NewWorkingSet() (WorkingSet, error) {
sf.mutex.Lock()
defer sf.mutex.Unlock()
return NewWorkingSet(sf.currentChainHeight, sf.dao, sf.rootHash, sf.actionHandlers)
}
// RunActions will be called 2 times in
// 1. In MintNewBlock(), the block producer runs all executions in new block and get the new trie root hash (which
// is written in block header), but all changes are not committed to blockchain yet
// 2. In CommitBlock(), all nodes except block producer will run all execution and verify the trie root hash match
// what's written in the block header
func (sf *factory) RunActions(
blockHeight uint64,
tsf []*action.Transfer,
vote []*action.Vote,
executions []*action.Execution,
actions []action.Action) (hash.Hash32B, error) {
sf.mutex.Lock()
defer sf.mutex.Unlock()
// use the default working set to run the actions
return sf.activeWs.RunActions(blockHeight, tsf, vote, executions, actions)
}
// Commit persists all changes in RunActions() into the DB
func (sf *factory) Commit(ws WorkingSet) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
if ws != nil {
if sf.currentChainHeight != ws.version() {
// another working set with correct version already committed, do nothing
return nil
}
sf.activeWs = nil
sf.activeWs = ws
}
if err := sf.activeWs.commit(); err != nil {
return errors.Wrap(err, "failed to commit working set")
}
// Update chain height and root
sf.currentChainHeight = sf.activeWs.height()
sf.rootHash = sf.activeWs.rootHash()
return nil
}
//======================================
// Contract functions
//======================================
// GetCodeHash returns contract's code hash
func (sf *factory) GetCodeHash(addr hash.PKHash) (hash.Hash32B, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.activeWs.GetCodeHash(addr)
}
// GetCode returns contract's code
func (sf *factory) GetCode(addr hash.PKHash) ([]byte, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.activeWs.GetCode(addr)
}
// SetCode sets contract's code
func (sf *factory) SetCode(addr hash.PKHash, code []byte) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
return sf.activeWs.SetCode(addr, code)
}
// GetContractState returns contract's storage value
func (sf *factory) GetContractState(addr hash.PKHash, key hash.Hash32B) (hash.Hash32B, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.activeWs.GetContractState(addr, key)
}
// SetContractState writes contract's storage value
func (sf *factory) SetContractState(addr hash.PKHash, key, value hash.Hash32B) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
return sf.activeWs.SetContractState(addr, key, value)
}
//======================================
// Candidate functions
//======================================
// Candidates returns array of candidates in candidate pool
func (sf *factory) candidates() (uint64, []*Candidate) {
sf.mutex.Lock()
defer sf.mutex.Unlock()
candidates, err := MapToCandidates(sf.activeWs.workingCandidates())
if err != nil {
return sf.currentChainHeight, nil
}
if len(candidates) <= int(sf.numCandidates) {
return sf.currentChainHeight, candidates
}
sort.Sort(candidates)
return sf.currentChainHeight, candidates[:sf.numCandidates]
}
// CandidatesByHeight returns array of candidates in candidate pool of a given height
func (sf *factory) CandidatesByHeight(height uint64) ([]*Candidate, error) {
sf.mutex.Lock()
defer sf.mutex.Unlock()
// Load candidates on the given height from underlying db
candidates, err := sf.activeWs.getCandidates(height)
if err != nil {
return []*Candidate{}, errors.Wrapf(err, "failed to get candidates on height %d", height)
}
if len(candidates) > int(sf.numCandidates) {
candidates = candidates[:sf.numCandidates]
}
return candidates, nil
}
//======================================
// private trie constructor functions
//======================================
func (sf *factory) getRoot(nameSpace string, key string) (hash.Hash32B, error) {
var trieRoot hash.Hash32B
switch root, err := sf.dao.Get(nameSpace, []byte(key)); errors.Cause(err) {
case nil:
trieRoot = byteutil.BytesTo32B(root)
case bolt.ErrBucketNotFound:
trieRoot = trie.EmptyRoot
default:
return hash.ZeroHash32B, err
}
return trieRoot, nil
}
| 1 | 12,603 | Need to do this to unblock mockgen. The interface method needs to be public to be visible in another package. The right fix is to delete this test only interface method | iotexproject-iotex-core | go |
@@ -101,6 +101,13 @@ module.exports = function(url, options, callback) {
record = record[0].join('');
const parsedRecord = qs.parse(record);
const items = Object.keys(parsedRecord);
+ if (
+ Object.keys(items)
+ .map(k => k.toLowerCase())
+ .indexOf('loadbalanced') !== -1
+ ) {
+ return callback(new MongoParseError('Load balancer mode requires driver version 4+'));
+ }
if (items.some(item => item !== 'authSource' && item !== 'replicaSet')) {
return callback(
new MongoParseError('Text record must only set `authSource` or `replicaSet`') | 1 | 'use strict';
const ReadPreference = require('./core').ReadPreference;
const parser = require('url');
const f = require('util').format;
const Logger = require('./core').Logger;
const dns = require('dns');
const ReadConcern = require('./read_concern');
const qs = require('querystring');
const MongoParseError = require('./core/error').MongoParseError;
module.exports = function(url, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
let result;
try {
result = parser.parse(url, true);
} catch (e) {
return callback(new Error('URL malformed, cannot be parsed'));
}
if (result.protocol !== 'mongodb:' && result.protocol !== 'mongodb+srv:') {
return callback(new Error('Invalid schema, expected `mongodb` or `mongodb+srv`'));
}
if (result.protocol === 'mongodb:') {
return parseHandler(url, options, callback);
}
// Otherwise parse this as an SRV record
if (result.hostname.split('.').length < 3) {
return callback(new Error('URI does not have hostname, domain name and tld'));
}
result.domainLength = result.hostname.split('.').length;
const hostname = url.substring('mongodb+srv://'.length).split('/')[0];
if (hostname.match(',')) {
return callback(new Error('Invalid URI, cannot contain multiple hostnames'));
}
if (result.port) {
return callback(new Error('Ports not accepted with `mongodb+srv` URIs'));
}
let srvAddress = `_mongodb._tcp.${result.host}`;
dns.resolveSrv(srvAddress, function(err, addresses) {
if (err) return callback(err);
if (addresses.length === 0) {
return callback(new Error('No addresses found at host'));
}
for (let i = 0; i < addresses.length; i++) {
if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) {
return callback(new Error('Server record does not share hostname with parent URI'));
}
}
let base = result.auth ? `mongodb://${result.auth}@` : `mongodb://`;
let connectionStrings = addresses.map(function(address, i) {
if (i === 0) return `${base}${address.name}:${address.port}`;
else return `${address.name}:${address.port}`;
});
let connectionString = connectionStrings.join(',') + '/';
let connectionStringOptions = [];
// Add the default database if needed
if (result.path) {
let defaultDb = result.path.slice(1);
if (defaultDb.indexOf('?') !== -1) {
defaultDb = defaultDb.slice(0, defaultDb.indexOf('?'));
}
connectionString += defaultDb;
}
// Default to SSL true
if (!options.ssl && !result.search) {
connectionStringOptions.push('ssl=true');
} else if (!options.ssl && result.search && !result.search.match('ssl')) {
connectionStringOptions.push('ssl=true');
}
// Keep original uri options
if (result.search) {
connectionStringOptions.push(result.search.replace('?', ''));
}
dns.resolveTxt(result.host, function(err, record) {
if (err && err.code !== 'ENODATA' && err.code !== 'ENOTFOUND') return callback(err);
if (err && err.code === 'ENODATA') record = null;
if (record) {
if (record.length > 1) {
return callback(new MongoParseError('Multiple text records not allowed'));
}
record = record[0].join('');
const parsedRecord = qs.parse(record);
const items = Object.keys(parsedRecord);
if (items.some(item => item !== 'authSource' && item !== 'replicaSet')) {
return callback(
new MongoParseError('Text record must only set `authSource` or `replicaSet`')
);
}
if (items.length > 0) {
connectionStringOptions.push(record);
}
}
// Add any options to the connection string
if (connectionStringOptions.length) {
connectionString += `?${connectionStringOptions.join('&')}`;
}
parseHandler(connectionString, options, callback);
});
});
};
function matchesParentDomain(srvAddress, parentDomain) {
let regex = /^.*?\./;
let srv = `.${srvAddress.replace(regex, '')}`;
let parent = `.${parentDomain.replace(regex, '')}`;
if (srv.endsWith(parent)) return true;
else return false;
}
function parseHandler(address, options, callback) {
let result, err;
try {
result = parseConnectionString(address, options);
} catch (e) {
err = e;
}
return err ? callback(err, null) : callback(null, result);
}
function parseConnectionString(url, options) {
// Variables
let connection_part = '';
let auth_part = '';
let query_string_part = '';
let dbName = 'admin';
// Url parser result
let result = parser.parse(url, true);
if ((result.hostname == null || result.hostname === '') && url.indexOf('.sock') === -1) {
throw new Error('No hostname or hostnames provided in connection string');
}
if (result.port === '0') {
throw new Error('Invalid port (zero) with hostname');
}
if (!isNaN(parseInt(result.port, 10)) && parseInt(result.port, 10) > 65535) {
throw new Error('Invalid port (larger than 65535) with hostname');
}
if (
result.path &&
result.path.length > 0 &&
result.path[0] !== '/' &&
url.indexOf('.sock') === -1
) {
throw new Error('Missing delimiting slash between hosts and options');
}
if (result.query) {
for (let name in result.query) {
if (name.indexOf('::') !== -1) {
throw new Error('Double colon in host identifier');
}
if (result.query[name] === '') {
throw new Error('Query parameter ' + name + ' is an incomplete value pair');
}
}
}
if (result.auth) {
let parts = result.auth.split(':');
if (url.indexOf(result.auth) !== -1 && parts.length > 2) {
throw new Error('Username with password containing an unescaped colon');
}
if (url.indexOf(result.auth) !== -1 && result.auth.indexOf('@') !== -1) {
throw new Error('Username containing an unescaped at-sign');
}
}
// Remove query
let clean = url.split('?').shift();
// Extract the list of hosts
let strings = clean.split(',');
let hosts = [];
for (let i = 0; i < strings.length; i++) {
let hostString = strings[i];
if (hostString.indexOf('mongodb') !== -1) {
if (hostString.indexOf('@') !== -1) {
hosts.push(hostString.split('@').pop());
} else {
hosts.push(hostString.substr('mongodb://'.length));
}
} else if (hostString.indexOf('/') !== -1) {
hosts.push(hostString.split('/').shift());
} else if (hostString.indexOf('/') === -1) {
hosts.push(hostString.trim());
}
}
for (let i = 0; i < hosts.length; i++) {
let r = parser.parse(f('mongodb://%s', hosts[i].trim()));
if (r.path && r.path.indexOf('.sock') !== -1) continue;
if (r.path && r.path.indexOf(':') !== -1) {
// Not connecting to a socket so check for an extra slash in the hostname.
// Using String#split as perf is better than match.
if (r.path.split('/').length > 1 && r.path.indexOf('::') === -1) {
throw new Error('Slash in host identifier');
} else {
throw new Error('Double colon in host identifier');
}
}
}
// If we have a ? mark cut the query elements off
if (url.indexOf('?') !== -1) {
query_string_part = url.substr(url.indexOf('?') + 1);
connection_part = url.substring('mongodb://'.length, url.indexOf('?'));
} else {
connection_part = url.substring('mongodb://'.length);
}
// Check if we have auth params
if (connection_part.indexOf('@') !== -1) {
auth_part = connection_part.split('@')[0];
connection_part = connection_part.split('@')[1];
}
// Check there is not more than one unescaped slash
if (connection_part.split('/').length > 2) {
throw new Error(
"Unsupported host '" +
connection_part.split('?')[0] +
"', hosts must be URL encoded and contain at most one unencoded slash"
);
}
// Check if the connection string has a db
if (connection_part.indexOf('.sock') !== -1) {
if (connection_part.indexOf('.sock/') !== -1) {
dbName = connection_part.split('.sock/')[1];
// Check if multiple database names provided, or just an illegal trailing backslash
if (dbName.indexOf('/') !== -1) {
if (dbName.split('/').length === 2 && dbName.split('/')[1].length === 0) {
throw new Error('Illegal trailing backslash after database name');
}
throw new Error('More than 1 database name in URL');
}
connection_part = connection_part.split(
'/',
connection_part.indexOf('.sock') + '.sock'.length
);
}
} else if (connection_part.indexOf('/') !== -1) {
// Check if multiple database names provided, or just an illegal trailing backslash
if (connection_part.split('/').length > 2) {
if (connection_part.split('/')[2].length === 0) {
throw new Error('Illegal trailing backslash after database name');
}
throw new Error('More than 1 database name in URL');
}
dbName = connection_part.split('/')[1];
connection_part = connection_part.split('/')[0];
}
// URI decode the host information
connection_part = decodeURIComponent(connection_part);
// Result object
let object = {};
// Pick apart the authentication part of the string
let authPart = auth_part || '';
let auth = authPart.split(':', 2);
// Decode the authentication URI components and verify integrity
let user = decodeURIComponent(auth[0]);
if (auth[0] !== encodeURIComponent(user)) {
throw new Error('Username contains an illegal unescaped character');
}
auth[0] = user;
if (auth[1]) {
let pass = decodeURIComponent(auth[1]);
if (auth[1] !== encodeURIComponent(pass)) {
throw new Error('Password contains an illegal unescaped character');
}
auth[1] = pass;
}
// Add auth to final object if we have 2 elements
if (auth.length === 2) object.auth = { user: auth[0], password: auth[1] };
// if user provided auth options, use that
if (options && options.auth != null) object.auth = options.auth;
// Variables used for temporary storage
let hostPart;
let urlOptions;
let servers;
let compression;
let serverOptions = { socketOptions: {} };
let dbOptions = { read_preference_tags: [] };
let replSetServersOptions = { socketOptions: {} };
let mongosOptions = { socketOptions: {} };
// Add server options to final object
object.server_options = serverOptions;
object.db_options = dbOptions;
object.rs_options = replSetServersOptions;
object.mongos_options = mongosOptions;
// Let's check if we are using a domain socket
if (url.match(/\.sock/)) {
// Split out the socket part
let domainSocket = url.substring(
url.indexOf('mongodb://') + 'mongodb://'.length,
url.lastIndexOf('.sock') + '.sock'.length
);
// Clean out any auth stuff if any
if (domainSocket.indexOf('@') !== -1) domainSocket = domainSocket.split('@')[1];
domainSocket = decodeURIComponent(domainSocket);
servers = [{ domain_socket: domainSocket }];
} else {
// Split up the db
hostPart = connection_part;
// Deduplicate servers
let deduplicatedServers = {};
// Parse all server results
servers = hostPart
.split(',')
.map(function(h) {
let _host, _port, ipv6match;
//check if it matches [IPv6]:port, where the port number is optional
if ((ipv6match = /\[([^\]]+)\](?::(.+))?/.exec(h))) {
_host = ipv6match[1];
_port = parseInt(ipv6match[2], 10) || 27017;
} else {
//otherwise assume it's IPv4, or plain hostname
let hostPort = h.split(':', 2);
_host = hostPort[0] || 'localhost';
_port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017;
// Check for localhost?safe=true style case
if (_host.indexOf('?') !== -1) _host = _host.split(/\?/)[0];
}
// No entry returned for duplicate server
if (deduplicatedServers[_host + '_' + _port]) return null;
deduplicatedServers[_host + '_' + _port] = 1;
// Return the mapped object
return { host: _host, port: _port };
})
.filter(function(x) {
return x != null;
});
}
// Get the db name
object.dbName = dbName || 'admin';
// Split up all the options
urlOptions = (query_string_part || '').split(/[&;]/);
// Ugh, we have to figure out which options go to which constructor manually.
urlOptions.forEach(function(opt) {
if (!opt) return;
var splitOpt = opt.split('='),
name = splitOpt[0],
value = splitOpt[1];
// Options implementations
switch (name) {
case 'slaveOk':
case 'slave_ok':
serverOptions.slave_ok = value === 'true';
dbOptions.slaveOk = value === 'true';
break;
case 'maxPoolSize':
case 'poolSize':
serverOptions.poolSize = parseInt(value, 10);
replSetServersOptions.poolSize = parseInt(value, 10);
break;
case 'appname':
object.appname = decodeURIComponent(value);
break;
case 'autoReconnect':
case 'auto_reconnect':
serverOptions.auto_reconnect = value === 'true';
break;
case 'ssl':
if (value === 'prefer') {
serverOptions.ssl = value;
replSetServersOptions.ssl = value;
mongosOptions.ssl = value;
break;
}
serverOptions.ssl = value === 'true';
replSetServersOptions.ssl = value === 'true';
mongosOptions.ssl = value === 'true';
break;
case 'sslValidate':
serverOptions.sslValidate = value === 'true';
replSetServersOptions.sslValidate = value === 'true';
mongosOptions.sslValidate = value === 'true';
break;
case 'replicaSet':
case 'rs_name':
replSetServersOptions.rs_name = value;
break;
case 'reconnectWait':
replSetServersOptions.reconnectWait = parseInt(value, 10);
break;
case 'retries':
replSetServersOptions.retries = parseInt(value, 10);
break;
case 'readSecondary':
case 'read_secondary':
replSetServersOptions.read_secondary = value === 'true';
break;
case 'fsync':
dbOptions.fsync = value === 'true';
break;
case 'journal':
dbOptions.j = value === 'true';
break;
case 'safe':
dbOptions.safe = value === 'true';
break;
case 'nativeParser':
case 'native_parser':
dbOptions.native_parser = value === 'true';
break;
case 'readConcernLevel':
dbOptions.readConcern = new ReadConcern(value);
break;
case 'connectTimeoutMS':
serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
mongosOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
break;
case 'socketTimeoutMS':
serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
mongosOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
break;
case 'w':
dbOptions.w = parseInt(value, 10);
if (isNaN(dbOptions.w)) dbOptions.w = value;
break;
case 'authSource':
dbOptions.authSource = value;
break;
case 'gssapiServiceName':
dbOptions.gssapiServiceName = value;
break;
case 'authMechanism':
if (value === 'GSSAPI') {
// If no password provided decode only the principal
if (object.auth == null) {
let urlDecodeAuthPart = decodeURIComponent(authPart);
if (urlDecodeAuthPart.indexOf('@') === -1)
throw new Error('GSSAPI requires a provided principal');
object.auth = { user: urlDecodeAuthPart, password: null };
} else {
object.auth.user = decodeURIComponent(object.auth.user);
}
} else if (value === 'MONGODB-X509') {
object.auth = { user: decodeURIComponent(authPart) };
}
// Only support GSSAPI or MONGODB-CR for now
if (
value !== 'GSSAPI' &&
value !== 'MONGODB-X509' &&
value !== 'MONGODB-CR' &&
value !== 'DEFAULT' &&
value !== 'SCRAM-SHA-1' &&
value !== 'SCRAM-SHA-256' &&
value !== 'PLAIN'
)
throw new Error(
'Only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1 is supported by authMechanism'
);
// Authentication mechanism
dbOptions.authMechanism = value;
break;
case 'authMechanismProperties':
{
// Split up into key, value pairs
let values = value.split(',');
let o = {};
// For each value split into key, value
values.forEach(function(x) {
let v = x.split(':');
o[v[0]] = v[1];
});
// Set all authMechanismProperties
dbOptions.authMechanismProperties = o;
// Set the service name value
if (typeof o.SERVICE_NAME === 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME;
if (typeof o.SERVICE_REALM === 'string') dbOptions.gssapiServiceRealm = o.SERVICE_REALM;
if (typeof o.CANONICALIZE_HOST_NAME === 'string')
dbOptions.gssapiCanonicalizeHostName =
o.CANONICALIZE_HOST_NAME === 'true' ? true : false;
}
break;
case 'wtimeoutMS':
dbOptions.wtimeout = parseInt(value, 10);
break;
case 'readPreference':
if (!ReadPreference.isValid(value))
throw new Error(
'readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest'
);
dbOptions.readPreference = value;
break;
case 'maxStalenessSeconds':
dbOptions.maxStalenessSeconds = parseInt(value, 10);
break;
case 'readPreferenceTags':
{
// Decode the value
value = decodeURIComponent(value);
// Contains the tag object
let tagObject = {};
if (value == null || value === '') {
dbOptions.read_preference_tags.push(tagObject);
break;
}
// Split up the tags
let tags = value.split(/,/);
for (let i = 0; i < tags.length; i++) {
let parts = tags[i].trim().split(/:/);
tagObject[parts[0]] = parts[1];
}
// Set the preferences tags
dbOptions.read_preference_tags.push(tagObject);
}
break;
case 'compressors':
{
compression = serverOptions.compression || {};
let compressors = value.split(',');
if (
!compressors.every(function(compressor) {
return compressor === 'snappy' || compressor === 'zlib';
})
) {
throw new Error('Compressors must be at least one of snappy or zlib');
}
compression.compressors = compressors;
serverOptions.compression = compression;
}
break;
case 'zlibCompressionLevel':
{
compression = serverOptions.compression || {};
let zlibCompressionLevel = parseInt(value, 10);
if (zlibCompressionLevel < -1 || zlibCompressionLevel > 9) {
throw new Error('zlibCompressionLevel must be an integer between -1 and 9');
}
compression.zlibCompressionLevel = zlibCompressionLevel;
serverOptions.compression = compression;
}
break;
case 'retryWrites':
dbOptions.retryWrites = value === 'true';
break;
case 'minSize':
dbOptions.minSize = parseInt(value, 10);
break;
default:
{
let logger = Logger('URL Parser');
logger.warn(`${name} is not supported as a connection string option`);
}
break;
}
});
// No tags: should be null (not [])
if (dbOptions.read_preference_tags.length === 0) {
dbOptions.read_preference_tags = null;
}
// Validate if there are an invalid write concern combinations
if (
(dbOptions.w === -1 || dbOptions.w === 0) &&
(dbOptions.journal === true || dbOptions.fsync === true || dbOptions.safe === true)
)
throw new Error('w set to -1 or 0 cannot be combined with safe/w/journal/fsync');
// If no read preference set it to primary
if (!dbOptions.readPreference) {
dbOptions.readPreference = 'primary';
}
// make sure that user-provided options are applied with priority
dbOptions = Object.assign(dbOptions, options);
// Add servers to result
object.servers = servers;
// Returned parsed object
return object;
}
| 1 | 21,101 | same question here as I have on the other file regarding doing this check here vs L112 vs not at all | mongodb-node-mongodb-native | js |
@@ -1,3 +1,5 @@
+// +build !openbsd
+
package fuse
import ( | 1 | package fuse
import (
"os"
"bazil.org/fuse"
"bazil.org/fuse/fs"
"golang.org/x/net/context"
"github.com/restic/restic"
"github.com/restic/restic/repository"
)
// Statically ensure that *dir implement those interface
var _ = fs.HandleReadDirAller(&dir{})
var _ = fs.NodeStringLookuper(&dir{})
type dir struct {
repo *repository.Repository
items map[string]*restic.Node
inode uint64
node *restic.Node
ownerIsRoot bool
}
func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) {
tree, err := restic.LoadTree(repo, *node.Subtree)
if err != nil {
return nil, err
}
items := make(map[string]*restic.Node)
for _, node := range tree.Nodes {
items[node.Name] = node
}
return &dir{
repo: repo,
node: node,
items: items,
inode: node.Inode,
ownerIsRoot: ownerIsRoot,
}, nil
}
func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) {
tree, err := restic.LoadTree(repo, *snapshot.Tree)
if err != nil {
return nil, err
}
items := make(map[string]*restic.Node)
for _, node := range tree.Nodes {
items[node.Name] = node
}
return &dir{
repo: repo,
node: &restic.Node{
UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()),
AccessTime: snapshot.Time,
ModTime: snapshot.Time,
ChangeTime: snapshot.Time,
Mode: os.ModeDir | 0555,
},
items: items,
inode: inodeFromBackendId(snapshot.ID),
ownerIsRoot: ownerIsRoot,
}, nil
}
func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
a.Inode = d.inode
a.Mode = os.ModeDir | d.node.Mode
if !d.ownerIsRoot {
a.Uid = d.node.UID
a.Gid = d.node.GID
}
a.Atime = d.node.AccessTime
a.Ctime = d.node.ChangeTime
a.Mtime = d.node.ModTime
return nil
}
func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
ret := make([]fuse.Dirent, 0, len(d.items))
for _, node := range d.items {
var typ fuse.DirentType
switch node.Type {
case "dir":
typ = fuse.DT_Dir
case "file":
typ = fuse.DT_File
case "symlink":
typ = fuse.DT_Link
}
ret = append(ret, fuse.Dirent{
Inode: node.Inode,
Type: typ,
Name: node.Name,
})
}
return ret, nil
}
func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
node, ok := d.items[name]
if !ok {
return nil, fuse.ENOENT
}
switch node.Type {
case "dir":
return newDir(d.repo, node, d.ownerIsRoot)
case "file":
return newFile(d.repo, node, d.ownerIsRoot)
case "symlink":
return newLink(d.repo, node, d.ownerIsRoot)
default:
return nil, fuse.ENOENT
}
}
| 1 | 6,840 | Adding `// +build !windows` (as a separate line) will do the same for Windows. So while you are at it we might add it as well. | restic-restic | go |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.