patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -272,8 +272,7 @@ func newAdminMembershipCommands() []cli.Command { { Name: "list_db", Usage: "List cluster membership items", - Flags: append( - getDBFlags(), + Flags: []cli.Flag{ cli.StringFlag{ Name: FlagHeartbeatedWithin, Value: "15m",
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package cli import ( "fmt" "github.com/urfave/cli" enumspb "go.temporal.io/api/enums/v1" ) func newAdminWorkflowCommands() []cli.Command { return []cli.Command{ { Name: "show", Aliases: []string{"show"}, Usage: "show workflow history from database", Flags: append(getDBFlags(), // v2 history events cli.StringFlag{ Name: FlagTreeID, Usage: "TreeId", }, cli.StringFlag{ Name: FlagBranchID, Usage: "BranchId", }, cli.StringFlag{ Name: FlagOutputFilenameWithAlias, Usage: "output file", }, // support mysql query cli.IntFlag{ Name: FlagShardIDWithAlias, Usage: "ShardId", }), Action: func(c *cli.Context) { AdminShowWorkflow(c) }, }, { Name: "describe", Aliases: []string{"desc"}, Usage: "Describe internal information of workflow execution", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagWorkflowIDWithAlias, Usage: "WorkflowId", }, cli.StringFlag{ Name: FlagRunIDWithAlias, Usage: "RunId", }, }, Action: func(c *cli.Context) { AdminDescribeWorkflow(c) }, }, { Name: "refresh_tasks", Aliases: []string{"rt"}, Usage: "Refreshes all the tasks of a workflow", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagWorkflowIDWithAlias, Usage: "WorkflowId", }, cli.StringFlag{ Name: FlagRunIDWithAlias, Usage: "RunId", }, }, Action: func(c *cli.Context) { AdminRefreshWorkflowTasks(c) }, }, { Name: "delete", Aliases: []string{"del"}, Usage: "Delete current workflow execution and the mutableState record", Flags: append( getDBAndESFlags(), cli.StringFlag{ Name: FlagWorkflowIDWithAlias, Usage: "WorkflowId", }, cli.StringFlag{ Name: FlagRunIDWithAlias, Usage: "RunId", }, cli.BoolFlag{ Name: FlagSkipErrorModeWithAlias, Usage: "skip errors", }), Action: func(c *cli.Context) { AdminDeleteWorkflow(c) }, }, } } func newAdminShardManagementCommands() []cli.Command { return []cli.Command{ { Name: "describe", Aliases: []string{"d"}, Usage: "Describe shard by Id", Flags: []cli.Flag{ cli.IntFlag{ Name: FlagShardID, Usage: "The Id of the shard to describe", }, }, Action: func(c *cli.Context) { AdminDescribeShard(c) }, }, { Name: "describe_task", Aliases: []string{"dt"}, Usage: "Describe a task based on task Id, task type, shard Id and task visibility timestamp", Flags: append( getDBFlags(), cli.IntFlag{ Name: FlagShardID, Usage: "The ID of the shard", }, cli.IntFlag{ Name: FlagTaskID, Usage: "The ID of the timer task to describe", }, cli.StringFlag{ Name: FlagTaskType, Value: "transfer", Usage: "Task type: transfer (default), timer, replication", }, cli.Int64Flag{ Name: FlagTaskVisibilityTimestamp, Usage: "Task visibility timestamp in nano", }, cli.StringFlag{ Name: FlagTargetCluster, Value: "active", Usage: "Temporal cluster to use", }, ), Action: func(c *cli.Context) { AdminDescribeTask(c) }, }, { Name: "list_tasks", Usage: "List tasks for given shard Id and task type", Flags: append(append( getDBFlags(), flagsForPagination...), cli.StringFlag{ Name: FlagTargetCluster, Value: "active", Usage: "Temporal cluster to use", }, cli.IntFlag{ Name: FlagShardID, Usage: "The ID of the shard", }, cli.StringFlag{ Name: FlagTaskType, Value: "transfer", Usage: "Task type: transfer (default), timer, replication", }, cli.StringFlag{ Name: FlagMinVisibilityTimestamp, Value: "2020-01-01T00:00:00+00:00", Usage: "Task visibility min timestamp. Supported formats are '2006-01-02T15:04:05+07:00', raw UnixNano and " + "time range (N<duration>), where 0 < N < 1000000 and duration (full-notation/short-notation) can be second/s, " + "minute/m, hour/h, day/d, week/w, month/M or year/y. For example, '15minute' or '15m' implies last 15 minutes.", }, cli.StringFlag{ Name: FlagMaxVisibilityTimestamp, Value: "2035-01-01T00:00:00+00:00", Usage: "Task visibility max timestamp. Supported formats are '2006-01-02T15:04:05+07:00', raw UnixNano and " + "time range (N<duration>), where 0 < N < 1000000 and duration (full-notation/short-notation) can be second/s, " + "minute/m, hour/h, day/d, week/w, month/M or year/y. For example, '15minute' or '15m' implies last 15 minutes.", }, ), Action: func(c *cli.Context) { AdminListTasks(c) }, }, { Name: "close_shard", Aliases: []string{"clsh"}, Usage: "close a shard given a shard id", Flags: []cli.Flag{ cli.IntFlag{ Name: FlagShardID, Usage: "ShardId for the temporal cluster to manage", }, }, Action: func(c *cli.Context) { AdminShardManagement(c) }, }, { Name: "remove_task", Aliases: []string{"rmtk"}, Usage: "remove a task based on shardId, task type, taskId, and task visibility timestamp", Flags: []cli.Flag{ cli.IntFlag{ Name: FlagShardID, Usage: "shardId", }, cli.Int64Flag{ Name: FlagTaskID, Usage: "taskId", }, cli.StringFlag{ Name: FlagTaskType, Value: "transfer", Usage: "Task type: transfer (default), timer, replication", }, cli.Int64Flag{ Name: FlagTaskVisibilityTimestamp, Usage: "task visibility timestamp in nano (required for removing timer task)", }, }, Action: func(c *cli.Context) { AdminRemoveTask(c) }, }, } } func newAdminMembershipCommands() []cli.Command { return []cli.Command{ { Name: "list_gossip", Usage: "List ringpop membership items", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagClusterMembershipRole, Value: "all", Usage: "Membership role filter: all (default), frontend, history, matching, worker", }, }, Action: func(c *cli.Context) { AdminListGossipMembers(c) }, }, { Name: "list_db", Usage: "List cluster membership items", Flags: append( getDBFlags(), cli.StringFlag{ Name: FlagHeartbeatedWithin, Value: "15m", Usage: "Filter by last heartbeat date time. Supported formats are '2006-01-02T15:04:05+07:00', raw UnixNano and " + "time range (N<duration>), where 0 < N < 1000000 and duration (full-notation/short-notation) can be second/s, " + "minute/m, hour/h, day/d, week/w, month/M or year/y. For example, '15minute' or '15m' implies last 15 minutes.", }, cli.StringFlag{ Name: FlagClusterMembershipRole, Value: "all", Usage: "Membership role filter: all (default), frontend, history, matching, worker", }, ), Action: func(c *cli.Context) { AdminListClusterMembership(c) }, }, } } func newAdminHistoryHostCommands() []cli.Command { return []cli.Command{ { Name: "describe", Aliases: []string{"desc"}, Usage: "Describe internal information of history host", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagWorkflowIDWithAlias, Usage: "WorkflowId", }, cli.StringFlag{ Name: FlagHistoryAddressWithAlias, Usage: "History Host address(IP:PORT)", }, cli.IntFlag{ Name: FlagShardIDWithAlias, Usage: "ShardId", }, cli.BoolFlag{ Name: FlagPrintFullyDetailWithAlias, Usage: "Print fully detail", }, }, Action: func(c *cli.Context) { AdminDescribeHistoryHost(c) }, }, { Name: "get_shardid", Aliases: []string{"gsh"}, Usage: "Get shardId for a namespaceId and workflowId combination", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagNamespaceID, Usage: "NamespaceId", }, cli.StringFlag{ Name: FlagWorkflowIDWithAlias, Usage: "WorkflowId", }, cli.IntFlag{ Name: FlagNumberOfShards, Usage: "NumberOfShards for the temporal cluster(see config for numHistoryShards)", }, }, Action: func(c *cli.Context) { AdminGetShardID(c) }, }, } } func newAdminNamespaceCommands() []cli.Command { return []cli.Command{ { Name: "list", Usage: "List namespaces", Flags: flagsForPagination, Action: func(c *cli.Context) { AdminListNamespaces(c) }, }, { Name: "register", Aliases: []string{"re"}, Usage: "Register workflow namespace", Flags: adminRegisterNamespaceFlags, Action: func(c *cli.Context) { newNamespaceCLI(c, true).RegisterNamespace(c) }, }, { Name: "update", Aliases: []string{"up", "u"}, Usage: "Update existing workflow namespace", Flags: adminUpdateNamespaceFlags, Action: func(c *cli.Context) { newNamespaceCLI(c, true).UpdateNamespace(c) }, }, { Name: "describe", Aliases: []string{"desc"}, Usage: "Describe existing workflow namespace", Flags: adminDescribeNamespaceFlags, Action: func(c *cli.Context) { newNamespaceCLI(c, true).DescribeNamespace(c) }, }, { Name: "get_namespaceidorname", Aliases: []string{"getdn"}, Usage: "Get namespaceId or namespace", Flags: append(getDBFlags(), cli.StringFlag{ Name: FlagNamespace, Usage: "Namespace", }, cli.StringFlag{ Name: FlagNamespaceID, Usage: "Namespace Id(uuid)", }), Action: func(c *cli.Context) { AdminGetNamespaceIDOrName(c) }, }, } } func newAdminElasticSearchCommands() []cli.Command { return []cli.Command{ { Name: "catIndex", Aliases: []string{"cind"}, Usage: "Cat Indices on Elasticsearch", Flags: getESFlags(false), Action: func(c *cli.Context) { AdminCatIndices(c) }, }, { Name: "index", Aliases: []string{"ind"}, Usage: "Index docs on Elasticsearch", Flags: append( getESFlags(true), cli.StringFlag{ Name: FlagInputFileWithAlias, Usage: "Input file of indexerspb.Message in json format, separated by newline", }, cli.IntFlag{ Name: FlagBatchSizeWithAlias, Usage: "Optional batch size of actions for bulk operations", Value: 10, }, ), Action: func(c *cli.Context) { AdminIndex(c) }, }, { Name: "delete", Aliases: []string{"del"}, Usage: "Delete docs on Elasticsearch", Flags: append( getESFlags(true), cli.StringFlag{ Name: FlagInputFileWithAlias, Usage: "Input file name. Redirect temporal wf list result (with table format) to a file and use as delete input. " + "First line should be table header like WORKFLOW TYPE | WORKFLOW ID | RUN ID | ...", }, cli.IntFlag{ Name: FlagBatchSizeWithAlias, Usage: "Optional batch size of actions for bulk operations", Value: 1000, }, cli.IntFlag{ Name: FlagRPS, Usage: "Optional batch request rate per second", Value: 30, }, ), Action: func(c *cli.Context) { AdminDelete(c) }, }, } } func newAdminTaskQueueCommands() []cli.Command { return []cli.Command{ { Name: "describe", Aliases: []string{"desc"}, Usage: "Describe pollers and status information of task queue", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagTaskQueueWithAlias, Usage: "TaskQueue description", }, cli.StringFlag{ Name: FlagTaskQueueTypeWithAlias, Value: "workflow", Usage: "Optional TaskQueue type [workflow|activity]", }, }, Action: func(c *cli.Context) { AdminDescribeTaskQueue(c) }, }, { Name: "list_tasks", Usage: "List tasks of a task queue", Flags: append(append(append(getDBFlags(), flagsForExecution...), flagsForPagination...), cli.StringFlag{ Name: FlagNamespaceID, Usage: "Namespace Id", }, cli.StringFlag{ Name: FlagTaskQueueType, Value: "activity", Usage: "Taskqueue type: activity, workflow", }, cli.StringFlag{ Name: FlagTaskQueue, Usage: "Taskqueue name", }, cli.Int64Flag{ Name: FlagMinReadLevel, Usage: "Lower bound of read level", }, cli.Int64Flag{ Name: FlagMaxReadLevel, Usage: "Upper bound of read level", }, ), Action: func(c *cli.Context) { AdminListTaskQueueTasks(c) }, }, } } func newAdminClusterCommands() []cli.Command { return []cli.Command{ { Name: "add-search-attributes", Aliases: []string{"asa"}, Usage: "Add custom search attributes", Flags: []cli.Flag{ cli.BoolFlag{ Name: FlagSkipSchemaUpdate, Usage: "Skip Elasticsearch index schema update (only register in metadata)", Required: false, }, cli.StringFlag{ Name: FlagIndex, Usage: "Elasticsearch index name (optional)", Hidden: true, // don't show it for now }, cli.StringSliceFlag{ Name: FlagNameWithAlias, Usage: "Search attribute name (multiply values are supported)", }, cli.StringSliceFlag{ Name: FlagTypeWithAlias, Usage: fmt.Sprintf("Search attribute type: %v (multiply values are supported)", allowedEnumValues(enumspb.IndexedValueType_name)), }, }, Action: func(c *cli.Context) { AdminAddSearchAttributes(c) }, }, { Name: "remove-search-attributes", Aliases: []string{"rsa"}, Usage: "Remove custom search attributes metadata only (Elasticsearch index schema is not modified)", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagIndex, Usage: "Elasticsearch index name (optional)", Hidden: true, // don't show it for now }, cli.StringSliceFlag{ Name: FlagNameWithAlias, Usage: "Search attribute name", }, }, Action: func(c *cli.Context) { AdminRemoveSearchAttributes(c) }, }, { Name: "get-search-attributes", Aliases: []string{"gsa"}, Usage: "Show existing search attributes", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagPrintJSONWithAlias, Usage: "Output in JSON format", }, cli.StringFlag{ Name: FlagIndex, Usage: "Elasticsearch index name (optional)", Hidden: true, // don't show it for now }, }, Action: func(c *cli.Context) { AdminGetSearchAttributes(c) }, }, { Name: "describe", Aliases: []string{"d"}, Usage: "Describe cluster information", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagCluster, Value: "", Usage: "Remote cluster name (optional, default to return current cluster information)", }, }, Action: func(c *cli.Context) { AdminDescribeCluster(c) }, }, { Name: "upsert-remote-cluster", Aliases: []string{"urc"}, Usage: "Add or update remote cluster information in the current cluster", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagFrontendAddressWithAlias, Usage: "Remote cluster frontend address", Required: true, }, }, Action: func(c *cli.Context) { AdminAddOrUpdateRemoteCluster(c) }, }, { Name: "remove-remote-cluster", Aliases: []string{"rrc"}, Usage: "Remove remote cluster information from the current cluster", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagCluster, Usage: "Remote cluster name", Required: true, }, }, Action: func(c *cli.Context) { AdminRemoveRemoteCluster(c) }, }, } } func newAdminDLQCommands() []cli.Command { return []cli.Command{ { Name: "read", Aliases: []string{"r"}, Usage: "Read DLQ Messages", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagDLQTypeWithAlias, Usage: "Type of DLQ to manage. (Options: namespace, history)", }, cli.StringFlag{ Name: FlagCluster, Usage: "Source cluster", }, cli.IntFlag{ Name: FlagShardIDWithAlias, Usage: "ShardId", }, cli.IntFlag{ Name: FlagMaxMessageCountWithAlias, Usage: "Max message size to fetch", }, cli.IntFlag{ Name: FlagLastMessageID, Usage: "The upper boundary of the read message", }, cli.StringFlag{ Name: FlagOutputFilenameWithAlias, Usage: "Output file to write to, if not provided output is written to stdout", }, }, Action: func(c *cli.Context) { AdminGetDLQMessages(c) }, }, { Name: "purge", Aliases: []string{"p"}, Usage: "Delete DLQ messages with equal or smaller ids than the provided task id", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagDLQTypeWithAlias, Usage: "Type of DLQ to manage. (Options: namespace, history)", }, cli.StringFlag{ Name: FlagCluster, Usage: "Source cluster", }, cli.IntFlag{ Name: FlagShardIDWithAlias, Usage: "ShardId", }, cli.IntFlag{ Name: FlagLastMessageID, Usage: "The upper boundary of the read message", }, }, Action: func(c *cli.Context) { AdminPurgeDLQMessages(c) }, }, { Name: "merge", Aliases: []string{"m"}, Usage: "Merge DLQ messages with equal or smaller ids than the provided task id", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagDLQTypeWithAlias, Usage: "Type of DLQ to manage. (Options: namespace, history)", }, cli.StringFlag{ Name: FlagCluster, Usage: "Source cluster", }, cli.IntFlag{ Name: FlagShardIDWithAlias, Usage: "ShardId", }, cli.IntFlag{ Name: FlagLastMessageID, Usage: "The upper boundary of the read message", }, }, Action: func(c *cli.Context) { AdminMergeDLQMessages(c) }, }, } } func newDBCommands() []cli.Command { return []cli.Command{ { Name: "scan", Aliases: []string{"scan"}, Usage: "scan concrete executions in database and detect corruptions", Flags: append(getDBFlags(), cli.IntFlag{ Name: FlagLowerShardBound, Usage: "lower bound of shard to scan (inclusive)", Value: 0, }, cli.IntFlag{ Name: FlagUpperShardBound, Usage: "upper bound of shard to scan (exclusive)", Value: 16384, }, cli.IntFlag{ Name: FlagStartingRPS, Usage: "starting rps of database queries, rps will be increased to target over scale up seconds", Value: 100, }, cli.IntFlag{ Name: FlagRPS, Usage: "target rps of database queries, target will be reached over scale up seconds", Value: 7000, }, cli.IntFlag{ Name: FlagPageSize, Usage: "page size used to query db executions table", Value: 500, }, cli.IntFlag{ Name: FlagConcurrency, Usage: "number of threads to handle scan", Value: 1000, }, cli.IntFlag{ Name: FlagReportRate, Usage: "the number of shards which get handled between each emitting of progress", Value: 10, }), Action: func(c *cli.Context) { AdminDBScan(c) }, }, { Name: "clean", Aliases: []string{"clean"}, Usage: "clean up corrupted workflows", Flags: append(getDBFlags(), cli.StringFlag{ Name: FlagInputDirectory, Usage: "the directory which contains corrupted workflow execution files from scan", }, cli.IntFlag{ Name: FlagLowerShardBound, Usage: "lower bound of corrupt shard to handle (inclusive)", Value: 0, }, cli.IntFlag{ Name: FlagUpperShardBound, Usage: "upper bound of shard to handle (exclusive)", Value: 16384, }, cli.IntFlag{ Name: FlagStartingRPS, Usage: "starting rps of database queries, rps will be increased to target over scale up seconds", Value: 100, }, cli.IntFlag{ Name: FlagRPS, Usage: "target rps of database queries, target will be reached over scale up seconds", Value: 7000, }, cli.IntFlag{ Name: FlagConcurrency, Usage: "number of threads to handle clean", Value: 1000, }, cli.IntFlag{ Name: FlagReportRate, Usage: "the number of shards which get handled between each emitting of progress", Value: 10, }), Action: func(c *cli.Context) { AdminDBClean(c) }, }, } } func newDecodeCommands() []cli.Command { return []cli.Command{ { Name: "proto", Usage: "Decode proto payload", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagProtoType, Usage: "full name of proto type to decode to (i.e. temporal.server.api.persistence.v1.WorkflowExecutionInfo).", }, cli.StringFlag{ Name: FlagHexData, Usage: "data in hex format (i.e. 0x0a243462613036633466...).", }, cli.StringFlag{ Name: FlagHexFile, Usage: "file with data in hex format (i.e. 0x0a243462613036633466...).", }, cli.StringFlag{ Name: FlagBinaryFile, Usage: "file with data in binary format.", }, }, Action: func(c *cli.Context) { AdminDecodeProto(c) }, }, { Name: "base64", Usage: "Decode base64 payload", Flags: []cli.Flag{ cli.StringFlag{ Name: FlagBase64Data, Usage: "data in base64 format (i.e. anNvbi9wbGFpbg==).", }, cli.StringFlag{ Name: FlagBase64File, Usage: "file with data in base64 format (i.e. anNvbi9wbGFpbg==).", }, }, Action: func(c *cli.Context) { AdminDecodeBase64(c) }, }, } }
1
13,231
should combine these 2 list (list_gossip and list_db) into just one list command, and show the discrepancy between the 2 if any.
temporalio-temporal
go
@@ -21,6 +21,11 @@ class ProposalPolicy [email protected]? end + def approve_reject? + actionable_approvers = @proposal.currently_awaiting_approvers + actionable_approvers.include? @user + end + def edit? self.test_all(:edit?) end
1
class ProposalPolicy include TreePolicy def perm_trees { edit?: [:author?, :not_approved?], update?: [:edit?] } end def initialize(user, proposal) @user = user @proposal = proposal end def author? @proposal.requester_id == @user.id end def not_approved? [email protected]? end def edit? self.test_all(:edit?) end def update? self.edit? end end
1
12,795
What do you think about prefixing the Policy method names with `can_`?
18F-C2
rb
@@ -29,11 +29,10 @@ #include <sys/types.h> #include <sys/stat.h> -/* HTTP Credentials Endpoints have a standard set of JSON Keys */ -#define AWS_HTTP_RESPONSE_ACCESS_KEY "AccessKeyId" -#define AWS_HTTP_RESPONSE_SECRET_KEY "SecretAccessKey" -#define AWS_HTTP_RESPONSE_TOKEN "Token" -#define AWS_HTTP_RESPONSE_EXPIRATION "Expiration" +#define AWS_CREDENTIAL_RESPONSE_ACCESS_KEY "AccessKeyId" +#define AWS_CREDENTIAL_RESPONSE_SECRET_KEY "SecretAccessKey" +#define AWS_HTTP_RESPONSE_TOKEN "Token" +#define AWS_CREDENTIAL_RESPONSE_EXPIRATION "Expiration" #define ECS_CREDENTIALS_HOST "169.254.170.2" #define ECS_CREDENTIALS_HOST_LEN 13
1
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2021 The Fluent Bit Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_sds.h> #include <fluent-bit/flb_http_client.h> #include <fluent-bit/flb_aws_credentials.h> #include <fluent-bit/flb_aws_util.h> #include <fluent-bit/flb_jsmn.h> #include <stdlib.h> #include <time.h> #include <sys/types.h> #include <sys/stat.h> /* HTTP Credentials Endpoints have a standard set of JSON Keys */ #define AWS_HTTP_RESPONSE_ACCESS_KEY "AccessKeyId" #define AWS_HTTP_RESPONSE_SECRET_KEY "SecretAccessKey" #define AWS_HTTP_RESPONSE_TOKEN "Token" #define AWS_HTTP_RESPONSE_EXPIRATION "Expiration" #define ECS_CREDENTIALS_HOST "169.254.170.2" #define ECS_CREDENTIALS_HOST_LEN 13 #define ECS_CREDENTIALS_PATH_ENV_VAR "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" /* Declarations */ struct flb_aws_provider_http; static int http_credentials_request(struct flb_aws_provider_http *implementation); /* * HTTP Credentials Provider - retrieve credentials from a local http server * Used to implement the ECS Credentials provider. * Equivalent to: * https://github.com/aws/aws-sdk-go/tree/master/aws/credentials/endpointcreds */ struct flb_aws_provider_http { struct flb_aws_credentials *creds; time_t next_refresh; struct flb_aws_client *client; /* Host and Path to request credentials */ flb_sds_t host; flb_sds_t path; }; struct flb_aws_credentials *get_credentials_fn_http(struct flb_aws_provider *provider) { struct flb_aws_credentials *creds = NULL; int refresh = FLB_FALSE; struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] Retrieving credentials from the " "HTTP provider.."); /* a negative next_refresh means that auto-refresh is disabled */ if (implementation->next_refresh > 0 && time(NULL) > implementation->next_refresh) { refresh = FLB_TRUE; } if (!implementation->creds || refresh == FLB_TRUE) { if (try_lock_provider(provider)) { http_credentials_request(implementation); unlock_provider(provider); } } if (!implementation->creds) { /* * We failed to lock the provider and creds are unset. This means that * another co-routine is performing the refresh. */ flb_warn("[aws_credentials] No cached credentials are available and " "a credential refresh is already in progress. The current " "co-routine will retry."); return NULL; } creds = flb_malloc(sizeof(struct flb_aws_credentials)); if (!creds) { flb_errno(); goto error; } creds->access_key_id = flb_sds_create(implementation->creds->access_key_id); if (!creds->access_key_id) { flb_errno(); goto error; } creds->secret_access_key = flb_sds_create(implementation->creds-> secret_access_key); if (!creds->secret_access_key) { flb_errno(); goto error; } if (implementation->creds->session_token) { creds->session_token = flb_sds_create(implementation->creds-> session_token); if (!creds->session_token) { flb_errno(); goto error; } } else { creds->session_token = NULL; } return creds; error: flb_aws_credentials_destroy(creds); return NULL; } int refresh_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; int ret = -1; flb_debug("[aws_credentials] Refresh called on the http provider"); if (try_lock_provider(provider)) { ret = http_credentials_request(implementation); unlock_provider(provider); } return ret; } int init_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; int ret = -1; flb_debug("[aws_credentials] Init called on the http provider"); implementation->client->debug_only = FLB_TRUE; if (try_lock_provider(provider)) { ret = http_credentials_request(implementation); unlock_provider(provider); } implementation->client->debug_only = FLB_FALSE; return ret; } void sync_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] Sync called on the http provider"); /* remove async flag */ implementation->client->upstream->flags &= ~(FLB_IO_ASYNC); } void async_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] Async called on the http provider"); /* add async flag */ implementation->client->upstream->flags |= FLB_IO_ASYNC; } void upstream_set_fn_http(struct flb_aws_provider *provider, struct flb_output_instance *ins) { struct flb_aws_provider_http *implementation = provider->implementation; flb_debug("[aws_credentials] upstream_set called on the http provider"); /* Make sure TLS is set to false before setting upstream, then reset it */ ins->use_tls = FLB_FALSE; flb_output_upstream_set(implementation->client->upstream, ins); ins->use_tls = FLB_TRUE; } void destroy_fn_http(struct flb_aws_provider *provider) { struct flb_aws_provider_http *implementation = provider->implementation; if (implementation) { if (implementation->creds) { flb_aws_credentials_destroy(implementation->creds); } if (implementation->client) { flb_aws_client_destroy(implementation->client); } if (implementation->host) { flb_sds_destroy(implementation->host); } if (implementation->path) { flb_sds_destroy(implementation->path); } flb_free(implementation); provider->implementation = NULL; } return; } static struct flb_aws_provider_vtable http_provider_vtable = { .get_credentials = get_credentials_fn_http, .init = init_fn_http, .refresh = refresh_fn_http, .destroy = destroy_fn_http, .sync = sync_fn_http, .async = async_fn_http, .upstream_set = upstream_set_fn_http, }; struct flb_aws_provider *flb_http_provider_create(struct flb_config *config, flb_sds_t host, flb_sds_t path, struct flb_aws_client_generator *generator) { struct flb_aws_provider_http *implementation = NULL; struct flb_aws_provider *provider = NULL; struct flb_upstream *upstream = NULL; flb_debug("[aws_credentials] Configuring HTTP provider with %s:80%s", host, path); provider = flb_calloc(1, sizeof(struct flb_aws_provider)); if (!provider) { flb_errno(); return NULL; } implementation = flb_calloc(1, sizeof(struct flb_aws_provider_http)); if (!implementation) { flb_free(provider); flb_errno(); return NULL; } provider->provider_vtable = &http_provider_vtable; provider->implementation = implementation; implementation->host = host; implementation->path = path; upstream = flb_upstream_create(config, host, 80, FLB_IO_TCP, NULL); if (!upstream) { flb_aws_provider_destroy(provider); flb_error("[aws_credentials] HTTP Provider: connection initialization " "error"); return NULL; } upstream->net.connect_timeout = FLB_AWS_CREDENTIAL_NET_TIMEOUT; implementation->client = generator->create(); if (!implementation->client) { flb_aws_provider_destroy(provider); flb_upstream_destroy(upstream); flb_error("[aws_credentials] HTTP Provider: client creation error"); return NULL; } implementation->client->name = "http_provider_client"; implementation->client->has_auth = FLB_FALSE; implementation->client->provider = NULL; implementation->client->region = NULL; implementation->client->service = NULL; implementation->client->port = 80; implementation->client->flags = 0; implementation->client->proxy = NULL; implementation->client->upstream = upstream; return provider; } /* * ECS Provider * The ECS Provider is just a wrapper around the HTTP Provider * with the ECS credentials endpoint. */ struct flb_aws_provider *flb_ecs_provider_create(struct flb_config *config, struct flb_aws_client_generator *generator) { flb_sds_t host = NULL; flb_sds_t path = NULL; char *path_var = NULL; host = flb_sds_create_len(ECS_CREDENTIALS_HOST, ECS_CREDENTIALS_HOST_LEN); if (!host) { flb_errno(); return NULL; } path_var = getenv(ECS_CREDENTIALS_PATH_ENV_VAR); if (path_var && strlen(path_var) > 0) { path = flb_sds_create(path_var); if (!path) { flb_errno(); flb_free(host); return NULL; } return flb_http_provider_create(config, host, path, generator); } else { flb_debug("[aws_credentials] Not initializing ECS Provider because" " %s is not set", ECS_CREDENTIALS_PATH_ENV_VAR); flb_sds_destroy(host); return NULL; } } static int http_credentials_request(struct flb_aws_provider_http *implementation) { char *response = NULL; size_t response_len; time_t expiration; struct flb_aws_credentials *creds = NULL; struct flb_aws_client *client = implementation->client; struct flb_http_client *c = NULL; c = client->client_vtable->request(client, FLB_HTTP_GET, implementation->path, NULL, 0, NULL, 0); if (!c || c->resp.status != 200) { flb_debug("[aws_credentials] http credentials request failed"); if (c) { flb_http_client_destroy(c); } return -1; } response = c->resp.payload; response_len = c->resp.payload_size; creds = flb_parse_http_credentials(response, response_len, &expiration); if (!creds) { flb_http_client_destroy(c); return -1; } /* destroy existing credentials */ flb_aws_credentials_destroy(implementation->creds); implementation->creds = NULL; implementation->creds = creds; implementation->next_refresh = expiration - FLB_AWS_REFRESH_WINDOW; flb_http_client_destroy(c); return 0; } /* * All HTTP credentials endpoints (IMDS, ECS, custom) follow the same spec: * { * "AccessKeyId": "ACCESS_KEY_ID", * "Expiration": "2019-12-18T21:27:58Z", * "SecretAccessKey": "SECRET_ACCESS_KEY", * "Token": "SECURITY_TOKEN_STRING" * } * (some implementations (IMDS) have additional fields) * Returns NULL if any part of parsing was unsuccessful. */ struct flb_aws_credentials *flb_parse_http_credentials(char *response, size_t response_len, time_t *expiration) { jsmntok_t *tokens = NULL; const jsmntok_t *t = NULL; char *current_token = NULL; jsmn_parser parser; int tokens_size = 50; size_t size; int ret; struct flb_aws_credentials *creds = NULL; int i = 0; int len; flb_sds_t tmp; /* * Remove/reset existing value of expiration. * Expiration should be in the response, but it is not * strictly speaking needed. Fluent Bit logs a warning if it is missing. */ *expiration = -1; jsmn_init(&parser); size = sizeof(jsmntok_t) * tokens_size; tokens = flb_calloc(1, size); if (!tokens) { goto error; } ret = jsmn_parse(&parser, response, response_len, tokens, tokens_size); if (ret == JSMN_ERROR_INVAL || ret == JSMN_ERROR_PART) { flb_error("[aws_credentials] Could not parse http credentials response" " - invalid JSON."); goto error; } /* Shouldn't happen, but just in case, check for too many tokens error */ if (ret == JSMN_ERROR_NOMEM) { flb_error("[aws_credentials] Could not parse http credentials response" " - response contained more tokens than expected."); goto error; } /* return value is number of tokens parsed */ tokens_size = ret; creds = flb_calloc(1, sizeof(struct flb_aws_credentials)); if (!creds) { flb_errno(); goto error; } /* * jsmn will create an array of tokens like: * key, value, key, value */ while (i < (tokens_size - 1)) { t = &tokens[i]; if (t->start == -1 || t->end == -1 || (t->start == 0 && t->end == 0)) { break; } if (t->type == JSMN_STRING) { current_token = &response[t->start]; len = t->end - t->start; if (strncmp(current_token, AWS_HTTP_RESPONSE_ACCESS_KEY, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; creds->access_key_id = flb_sds_create_len(current_token, len); if (!creds->access_key_id) { flb_errno(); goto error; } continue; } if (strncmp(current_token, AWS_HTTP_RESPONSE_SECRET_KEY, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; creds->secret_access_key = flb_sds_create_len(current_token, len); if (!creds->secret_access_key) { flb_errno(); goto error; } continue; } if (strncmp(current_token, AWS_HTTP_RESPONSE_TOKEN, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; creds->session_token = flb_sds_create_len(current_token, len); if (!creds->session_token) { flb_errno(); goto error; } continue; } if (strncmp(current_token, AWS_HTTP_RESPONSE_EXPIRATION, len) == 0) { i++; t = &tokens[i]; current_token = &response[t->start]; len = t->end - t->start; tmp = flb_sds_create_len(current_token, len); if (!tmp) { flb_errno(); goto error; } *expiration = flb_aws_cred_expiration(tmp); flb_sds_destroy(tmp); if (*expiration < 0) { flb_warn("[aws_credentials] '%s' was invalid or " "could not be parsed. Disabling auto-refresh of " "credentials.", AWS_HTTP_RESPONSE_EXPIRATION); } } } i++; } if (creds->access_key_id == NULL) { flb_error("[aws_credentials] Missing %s field in http" "credentials response", AWS_HTTP_RESPONSE_ACCESS_KEY); goto error; } if (creds->secret_access_key == NULL) { flb_error("[aws_credentials] Missing %s field in http" "credentials response", AWS_HTTP_RESPONSE_SECRET_KEY); goto error; } if (creds->session_token == NULL) { flb_error("[aws_credentials] Missing %s field in http" "credentials response", AWS_HTTP_RESPONSE_TOKEN); goto error; } flb_free(tokens); return creds; error: flb_aws_credentials_destroy(creds); flb_free(tokens); return NULL; }
1
14,915
Based on the docs, credential process can return a key `SessionToken`
fluent-fluent-bit
c
@@ -9,8 +9,12 @@ export default Ember.Mixin.create({ if (birthDate) { if (birthDate.getFullYear === undefined) { - birthDate = moment(birthDate, 'l').toDate(); + birthDate = moment(birthDate, 'LLL').toDate(); } + + birthDate.setHours(0, 0, 0, 0); + today.setHours(0, 0, 0, 0); + if (birthDate.getFullYear !== undefined) { years = today.getFullYear() - birthDate.getFullYear(); if (today.getMonth() < birthDate.getMonth()
1
import Ember from 'ember'; import moment from 'moment'; export default Ember.Mixin.create({ convertDOBToText(birthDate, shortFormat, omitDays) { let today = new Date(); let years = 0; let months = 0; let days = 0; if (birthDate) { if (birthDate.getFullYear === undefined) { birthDate = moment(birthDate, 'l').toDate(); } if (birthDate.getFullYear !== undefined) { years = today.getFullYear() - birthDate.getFullYear(); if (today.getMonth() < birthDate.getMonth() || (today.getMonth() === birthDate.getMonth() && today.getDate() < birthDate.getDate())) { years--; } } if (birthDate.getMonth) { months = today.getMonth() - birthDate.getMonth(); days = today.getDate() - birthDate.getDate(); if (months <= 0) { if (days < 0) { months += 11; } else if (months < 0) { months += 12; } } else { if (days < 0) { months = months - 1; } } } if (birthDate.getDate) { days = today.getUTCDate() - birthDate.getUTCDate(); if (days < 0) { days += 30; } } } let formatString = ''; let options = { years, days, months }; let i18n = this.get('i18n'); if (shortFormat) { if (years > 0) { formatString = i18n.t('dates.short', options); } else { formatString = i18n.t('dates.shortOmitYears', options); } } else if (omitDays) { if (years > 1) { formatString = i18n.t('dates.longOmitDaysPlural', options); } else if (years === 1) { formatString = i18n.t('dates.longOmitDays', options); } else { formatString = i18n.t('dates.longOmitDaysYears', options); } } else { if (years > 1) { formatString = i18n.t('dates.longPlural', options); } else if (years === 1) { formatString = i18n.t('dates.long', options); } else { formatString = i18n.t('dates.longOmitYears', options); } } return formatString; } });
1
13,938
I think this if should be removed (see issue)
HospitalRun-hospitalrun-frontend
js
@@ -19,6 +19,7 @@ import ( log "github.com/sirupsen/logrus" + "github.com/projectcalico/felix/ipsets" "github.com/projectcalico/libcalico-go/lib/set" )
1
// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ipsets import ( "strings" log "github.com/sirupsen/logrus" "github.com/projectcalico/libcalico-go/lib/set" ) type CallBackFunc func(ipSetId string) // IPSets manages a whole plane of IP sets, i.e. all the IPv4 sets, or all the IPv6 IP sets. type IPSets struct { IPVersionConfig *IPVersionConfig ipSetIDToIPSet map[string]*ipSet logCxt *log.Entry callbackOnUpdate CallBackFunc } func NewIPSets(ipVersionConfig *IPVersionConfig) *IPSets { return &IPSets{ IPVersionConfig: ipVersionConfig, ipSetIDToIPSet: map[string]*ipSet{}, logCxt: log.WithFields(log.Fields{ "family": ipVersionConfig.Family, }), } } func (s *IPSets) SetCallback(callback CallBackFunc) { s.callbackOnUpdate = callback } // AddOrReplaceIPSet is responsible for the creation (or replacement) of an IP set in the store func (s *IPSets) AddOrReplaceIPSet(setMetadata IPSetMetadata, members []string) { log.WithFields(log.Fields{ "metadata": setMetadata, "numMembers": len(members), }).Info("Adding IP set to cache") s.logCxt.WithFields(log.Fields{ "setID": setMetadata.SetID, "setType": setMetadata.Type, }).Info("Creating IP set") filteredMembers := s.filterMembers(members) // Create the IP set struct and stores it by id setID := setMetadata.SetID ipSet := &ipSet{ IPSetMetadata: setMetadata, Members: filteredMembers, } s.ipSetIDToIPSet[setID] = ipSet s.callbackOnUpdate(setID) } // RemoveIPSet is responsible for the removal of an IP set from the store func (s *IPSets) RemoveIPSet(setID string) { s.logCxt.WithField("setID", setID).Info("Removing IP set") delete(s.ipSetIDToIPSet, setID) s.callbackOnUpdate(setID) } // AddMembers adds a range of new members to an existing IP set in the store func (s *IPSets) AddMembers(setID string, newMembers []string) { if len(newMembers) == 0 { return } ipSet := s.ipSetIDToIPSet[setID] filteredMembers := s.filterMembers(newMembers) if filteredMembers.Len() == 0 { return } s.logCxt.WithFields(log.Fields{ "setID": setID, "filteredMembers": filteredMembers, }).Debug("Adding new members to IP set") filteredMembers.Iter(func(m interface{}) error { ipSet.Members.Add(m) return nil }) s.callbackOnUpdate(setID) } // RemoveMembers removes a range of members from an existing IP set in the store func (s *IPSets) RemoveMembers(setID string, removedMembers []string) { if len(removedMembers) == 0 { return } ipSet := s.ipSetIDToIPSet[setID] filteredMembers := s.filterMembers(removedMembers) if filteredMembers.Len() == 0 { return } s.logCxt.WithFields(log.Fields{ "setID": setID, "filteredMembers": filteredMembers, }).Debug("Removing members from IP set") filteredMembers.Iter(func(m interface{}) error { ipSet.Members.Discard(m) return nil }) s.callbackOnUpdate(setID) } // GetIPSetMembers returns all of the members for a given IP set func (s *IPSets) GetIPSetMembers(setID string) []string { var retVal []string ipSet := s.ipSetIDToIPSet[setID] if ipSet == nil { return nil } ipSet.Members.Iter(func(item interface{}) error { member := item.(string) retVal = append(retVal, member) return nil }) // Note: It is very important that nil is returned if there is no ip in an ipset // so that policy rules related to this ipset won't be populated. return retVal } // filterMembers filters out any members which are not of the correct // ip family for the IPSet func (s *IPSets) filterMembers(members []string) set.Set { filtered := set.New() wantIPV6 := s.IPVersionConfig.Family == IPFamilyV6 for _, member := range members { isIPV6 := strings.Contains(member, ":") if wantIPV6 != isIPV6 { continue } filtered.Add(member) } return filtered } func (s *IPSets) GetIPFamily() IPFamily { return s.IPVersionConfig.Family } // The following functions are no-ops on Windows. func (s *IPSets) QueueResync() { } func (m *IPSets) GetTypeOf(setID string) (IPSetType, error) { panic("Not implemented") } func (m *IPSets) GetMembers(setID string) (set.Set, error) { // GetMembers is only called from XDPState, and XDPState does not coexist with // config.BPFEnabled. panic("Not implemented") } func (m *IPSets) ApplyUpdates() { } func (m *IPSets) ApplyDeletions() { } func (s *IPSets) SetFilter(ipSetNames set.Set) { // Not needed for Windows. }
1
19,772
I would avoid importing `felix/ipsets` package because this package is the windows equivalent and should be at the same level of `felix/ipsets`. We could add linux specific dependencies into `felix/ipsets` later and it will break Windows build.
projectcalico-felix
c
@@ -19,6 +19,7 @@ # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Generate the html documentation based on the asciidoc files.""" +from typing import List, Tuple import re import os
1
#!/usr/bin/env python3 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]> # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Generate the html documentation based on the asciidoc files.""" import re import os import os.path import sys import subprocess import glob import shutil import tempfile import argparse import io sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) from scripts import utils class AsciiDoc: """Abstraction of an asciidoc subprocess.""" FILES = ['faq', 'changelog', 'contributing', 'quickstart', 'userscripts'] def __init__(self, asciidoc, website): self._cmd = None self._asciidoc = asciidoc self._website = website self._homedir = None self._themedir = None self._tempdir = None self._failed = False def prepare(self): """Get the asciidoc command and create the homedir to use.""" self._cmd = self._get_asciidoc_cmd() self._homedir = tempfile.mkdtemp() self._themedir = os.path.join( self._homedir, '.asciidoc', 'themes', 'qute') self._tempdir = os.path.join(self._homedir, 'tmp') os.makedirs(self._tempdir) os.makedirs(self._themedir) def cleanup(self): """Clean up the temporary home directory for asciidoc.""" if self._homedir is not None and not self._failed: shutil.rmtree(self._homedir) def build(self): """Build either the website or the docs.""" if self._website: self._build_website() else: self._build_docs() self._copy_images() def _build_docs(self): """Render .asciidoc files to .html sites.""" files = [('doc/{}.asciidoc'.format(f), 'qutebrowser/html/doc/{}.html'.format(f)) for f in self.FILES] for src in glob.glob('doc/help/*.asciidoc'): name, _ext = os.path.splitext(os.path.basename(src)) dst = 'qutebrowser/html/doc/{}.html'.format(name) files.append((src, dst)) # patch image links to use local copy replacements = [ ("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-big.png", "qute://help/img/cheatsheet-big.png"), ("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-small.png", "qute://help/img/cheatsheet-small.png") ] asciidoc_args = ['-a', 'source-highlighter=pygments'] for src, dst in files: src_basename = os.path.basename(src) modified_src = os.path.join(self._tempdir, src_basename) with open(modified_src, 'w', encoding='utf-8') as modified_f, \ open(src, 'r', encoding='utf-8') as f: for line in f: for orig, repl in replacements: line = line.replace(orig, repl) modified_f.write(line) self.call(modified_src, dst, *asciidoc_args) def _copy_images(self): """Copy image files to qutebrowser/html/doc.""" print("Copying files...") dst_path = os.path.join('qutebrowser', 'html', 'doc', 'img') try: os.mkdir(dst_path) except FileExistsError: pass for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']: src = os.path.join('doc', 'img', filename) dst = os.path.join(dst_path, filename) shutil.copy(src, dst) def _build_website_file(self, root, filename): """Build a single website file.""" src = os.path.join(root, filename) src_basename = os.path.basename(src) parts = [self._website[0]] dirname = os.path.dirname(src) if dirname: parts.append(os.path.relpath(os.path.dirname(src))) parts.append( os.extsep.join((os.path.splitext(src_basename)[0], 'html'))) dst = os.path.join(*parts) os.makedirs(os.path.dirname(dst), exist_ok=True) modified_src = os.path.join(self._tempdir, src_basename) shutil.copy('www/header.asciidoc', modified_src) outfp = io.StringIO() with open(modified_src, 'r', encoding='utf-8') as header_file: header = header_file.read() header += "\n\n" with open(src, 'r', encoding='utf-8') as infp: outfp.write("\n\n") hidden = False found_title = False title = "" last_line = "" for line in infp: line = line.rstrip() if line == '// QUTE_WEB_HIDE': assert not hidden hidden = True elif line == '// QUTE_WEB_HIDE_END': assert hidden hidden = False elif line == "The Compiler <[email protected]>": continue elif re.fullmatch(r':\w+:.*', line): # asciidoc field continue if not found_title: if re.fullmatch(r'=+', line): line = line.replace('=', '-') found_title = True title = last_line + " | qutebrowser\n" title += "=" * (len(title) - 1) elif re.fullmatch(r'= .+', line): line = '==' + line[1:] found_title = True title = last_line + " | qutebrowser\n" title += "=" * (len(title) - 1) if not hidden: outfp.write(line.replace(".asciidoc[", ".html[") + '\n') last_line = line current_lines = outfp.getvalue() outfp.close() with open(modified_src, 'w+', encoding='utf-8') as final_version: final_version.write(title + "\n\n" + header + current_lines) asciidoc_args = ['--theme=qute', '-a toc', '-a toc-placement=manual', '-a', 'source-highlighter=pygments'] self.call(modified_src, dst, *asciidoc_args) def _build_website(self): """Prepare and build the website.""" theme_file = os.path.abspath(os.path.join('www', 'qute.css')) shutil.copy(theme_file, self._themedir) outdir = self._website[0] for root, _dirs, files in os.walk(os.getcwd()): for filename in files: basename, ext = os.path.splitext(filename) if (ext != '.asciidoc' or basename in ['header', 'OpenSans-License']): continue self._build_website_file(root, filename) copy = {'icons': 'icons', 'doc/img': 'doc/img', 'www/media': 'media/'} for src, dest in copy.items(): full_dest = os.path.join(outdir, dest) try: shutil.rmtree(full_dest) except FileNotFoundError: pass shutil.copytree(src, full_dest) for dst, link_name in [ ('README.html', 'index.html'), (os.path.join('doc', 'quickstart.html'), 'quickstart.html')]: try: os.symlink(dst, os.path.join(outdir, link_name)) except FileExistsError: pass def _get_asciidoc_cmd(self): """Try to find out what commandline to use to invoke asciidoc.""" if self._asciidoc is not None: return self._asciidoc try: subprocess.run(['asciidoc'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) except OSError: pass else: return ['asciidoc'] try: subprocess.run(['asciidoc.py'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) except OSError: pass else: return ['asciidoc.py'] raise FileNotFoundError def call(self, src, dst, *args): """Call asciidoc for the given files. Args: src: The source .asciidoc file. dst: The destination .html file, or None to auto-guess. *args: Additional arguments passed to asciidoc. """ print("Calling asciidoc for {}...".format(os.path.basename(src))) cmdline = self._cmd[:] if dst is not None: cmdline += ['--out-file', dst] cmdline += args cmdline.append(src) try: env = os.environ.copy() env['HOME'] = self._homedir subprocess.run(cmdline, check=True, env=env) except (subprocess.CalledProcessError, OSError) as e: self._failed = True utils.print_col(str(e), 'red') print("Keeping modified sources in {}.".format(self._homedir)) sys.exit(1) def parse_args(): """Parse command-line arguments.""" parser = argparse.ArgumentParser() parser.add_argument('--website', help="Build website into a given " "directory.", nargs=1) parser.add_argument('--asciidoc', help="Full path to python and " "asciidoc.py. If not given, it's searched in PATH.", nargs=2, required=False, metavar=('PYTHON', 'ASCIIDOC')) return parser.parse_args() def run(**kwargs): """Regenerate documentation.""" try: os.mkdir('qutebrowser/html/doc') except FileExistsError: pass asciidoc = AsciiDoc(**kwargs) try: asciidoc.prepare() except FileNotFoundError: utils.print_col("Could not find asciidoc! Please install it, or use " "the --asciidoc argument to point this script to the " "correct python/asciidoc.py location!", 'red') sys.exit(1) try: asciidoc.build() finally: asciidoc.cleanup() def main(colors=False): """Generate html files for the online documentation.""" utils.change_cwd() utils.use_color = colors args = parse_args() run(asciidoc=args.asciidoc, website=args.website) if __name__ == '__main__': main(colors=True)
1
24,308
nitpick: Please move this down to the other imports, as it's a Python stdlib import.
qutebrowser-qutebrowser
py
@@ -13,7 +13,8 @@ // limitations under the License. // Package blob provides an easy and portable way to interact with blobs -// within a storage location, hereafter called a "bucket". +// within a storage location, hereafter called a "bucket". See +// https://gocloud.dev/howto/blob/ for getting started guides. // // It supports operations like reading and writing blobs (using standard // interfaces from the io package), deleting blobs, and listing blobs in a
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package blob provides an easy and portable way to interact with blobs // within a storage location, hereafter called a "bucket". // // It supports operations like reading and writing blobs (using standard // interfaces from the io package), deleting blobs, and listing blobs in a // bucket. // // Subpackages contain distinct implementations of blob for various providers, // including Cloud and on-prem solutions. For example, "fileblob" supports // blobs backed by a filesystem. Your application should import one of these // provider-specific subpackages and use its exported function(s) to create a // *Bucket; do not use the NewBucket function in this package. For example: // // bucket, err := fileblob.OpenBucket("path/to/dir", nil) // if err != nil { // return fmt.Errorf("could not open bucket: %v", err) // } // buf, err := bucket.ReadAll(context.Background(), "myfile.txt") // ... // // Then, write your application code using the *Bucket type. You can easily // reconfigure your initialization code to choose a different provider. // You can develop your application locally using fileblob, or deploy it to // multiple Cloud providers. You may find http://github.com/google/wire useful // for managing your initialization code. // // Alternatively, you can construct a *Bucket via a URL and OpenBucket. // See https://godoc.org/gocloud.dev#hdr-URLs for more information. // // // Errors // // The errors returned from this package can be inspected in several ways: // // The Code function from gocloud.dev/gcerrors will return an error code, also // defined in that package, when invoked on an error. // // The Bucket.ErrorAs method can retrieve the driver error underlying the returned // error. // // // OpenCensus Integration // // OpenCensus supports tracing and metric collection for multiple languages and // backend providers. See https://opencensus.io. // // This API collects OpenCensus traces and metrics for the following methods: // - Attributes // - Copy // - Delete // - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll // are included because they call NewRangeReader.) // - NewWriter, from creation until the call to Close. // All trace and metric names begin with the package import path. // The traces add the method name. // For example, "gocloud.dev/blob/Attributes". // The metrics are "completed_calls", a count of completed method calls by provider, // method and status (error code); and "latency", a distribution of method latency // by provider and method. // For example, "gocloud.dev/blob/latency". // // It also collects the following metrics: // - gocloud.dev/blob/bytes_read: the total number of bytes read, by provider. // - gocloud.dev/blob/bytes_written: the total number of bytes written, by provider. // // To enable trace collection in your application, see "Configure Exporter" at // https://opencensus.io/quickstart/go/tracing. // To enable metric collection in your application, see "Exporting stats" at // https://opencensus.io/quickstart/go/metrics. package blob // import "gocloud.dev/blob" import ( "bytes" "context" "crypto/md5" "fmt" "hash" "io" "io/ioutil" "log" "mime" "net/http" "net/url" "runtime" "strings" "sync" "time" "unicode/utf8" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "gocloud.dev/blob/driver" "gocloud.dev/gcerrors" "gocloud.dev/internal/gcerr" "gocloud.dev/internal/oc" "gocloud.dev/internal/openurl" ) // Reader reads bytes from a blob. // It implements io.ReadCloser, and must be closed after // reads are finished. type Reader struct { b driver.Bucket r driver.Reader end func(error) // called at Close to finish trace and metric collection provider string // for metric collection closed bool } // Read implements io.Reader (https://golang.org/pkg/io/#Reader). func (r *Reader) Read(p []byte) (int, error) { n, err := r.r.Read(p) stats.RecordWithTags(context.Background(), []tag.Mutator{tag.Upsert(oc.ProviderKey, r.provider)}, bytesReadMeasure.M(int64(n))) return n, wrapError(r.b, err) } // Close implements io.Closer (https://golang.org/pkg/io/#Closer). func (r *Reader) Close() error { r.closed = true err := wrapError(r.b, r.r.Close()) r.end(err) return err } // ContentType returns the MIME type of the blob. func (r *Reader) ContentType() string { return r.r.Attributes().ContentType } // ModTime returns the time the blob was last modified. func (r *Reader) ModTime() time.Time { return r.r.Attributes().ModTime } // Size returns the size of the blob content in bytes. func (r *Reader) Size() int64 { return r.r.Attributes().Size } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (r *Reader) As(i interface{}) bool { return r.r.As(i) } // Attributes contains attributes about a blob. type Attributes struct { // CacheControl specifies caching attributes that providers may use // when serving the blob. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control CacheControl string // ContentDisposition specifies whether the blob content is expected to be // displayed inline or as an attachment. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition ContentDisposition string // ContentEncoding specifies the encoding used for the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding ContentEncoding string // ContentLanguage specifies the language used in the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language ContentLanguage string // ContentType is the MIME type of the blob. It will not be empty. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type ContentType string // Metadata holds key/value pairs associated with the blob. // Keys are guaranteed to be in lowercase, even if the backend provider // has case-sensitive keys (although note that Metadata written via // this package will always be lowercased). If there are duplicate // case-insensitive keys (e.g., "foo" and "FOO"), only one value // will be kept, and it is undefined which one. Metadata map[string]string // ModTime is the time the blob was last modified. ModTime time.Time // Size is the size of the blob's content in bytes. Size int64 // MD5 is an MD5 hash of the blob contents or nil if not available. MD5 []byte asFunc func(interface{}) bool } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (a *Attributes) As(i interface{}) bool { if a.asFunc == nil { return false } return a.asFunc(i) } // Writer writes bytes to a blob. // // It implements io.WriteCloser (https://golang.org/pkg/io/#Closer), and must be // closed after all writes are done. type Writer struct { b driver.Bucket w driver.Writer end func(error) // called at Close to finish trace and metric collection cancel func() // cancels the ctx provided to NewTypedWriter if contentMD5 verification fails contentMD5 []byte md5hash hash.Hash provider string // for metric collection closed bool // These fields exist only when w is not yet created. // // A ctx is stored in the Writer since we need to pass it into NewTypedWriter // when we finish detecting the content type of the blob and create the // underlying driver.Writer. This step happens inside Write or Close and // neither of them take a context.Context as an argument. The ctx is set // to nil after we have passed it to NewTypedWriter. ctx context.Context key string opts *driver.WriterOptions buf *bytes.Buffer } // sniffLen is the byte size of Writer.buf used to detect content-type. const sniffLen = 512 // Write implements the io.Writer interface (https://golang.org/pkg/io/#Writer). // // Writes may happen asynchronously, so the returned error can be nil // even if the actual write eventually fails. The write is only guaranteed to // have succeeded if Close returns no error. func (w *Writer) Write(p []byte) (n int, err error) { if len(w.contentMD5) > 0 { if _, err := w.md5hash.Write(p); err != nil { return 0, err } } if w.w != nil { return w.write(p) } // If w is not yet created due to no content-type being passed in, try to sniff // the MIME type based on at most 512 bytes of the blob content of p. // Detect the content-type directly if the first chunk is at least 512 bytes. if w.buf.Len() == 0 && len(p) >= sniffLen { return w.open(p) } // Store p in w.buf and detect the content-type when the size of content in // w.buf is at least 512 bytes. w.buf.Write(p) if w.buf.Len() >= sniffLen { return w.open(w.buf.Bytes()) } return len(p), nil } // Close closes the blob writer. The write operation is not guaranteed to have succeeded until // Close returns with no error. // Close may return an error if the context provided to create the Writer is // canceled or reaches its deadline. func (w *Writer) Close() (err error) { w.closed = true defer func() { w.end(err) }() if len(w.contentMD5) > 0 { // Verify the MD5 hash of what was written matches the ContentMD5 provided // by the user. md5sum := w.md5hash.Sum(nil) if !bytes.Equal(md5sum, w.contentMD5) { // No match! Return an error, but first cancel the context and call the // driver's Close function to ensure the write is aborted. w.cancel() if w.w != nil { _ = w.w.Close() } return gcerr.Newf(gcerr.FailedPrecondition, nil, "blob: the WriterOptions.ContentMD5 you specified (%X) did not match what was written (%X)", w.contentMD5, md5sum) } } defer w.cancel() if w.w != nil { return wrapError(w.b, w.w.Close()) } if _, err := w.open(w.buf.Bytes()); err != nil { return err } return wrapError(w.b, w.w.Close()) } // open tries to detect the MIME type of p and write it to the blob. // The error it returns is wrapped. func (w *Writer) open(p []byte) (int, error) { ct := http.DetectContentType(p) var err error if w.w, err = w.b.NewTypedWriter(w.ctx, w.key, ct, w.opts); err != nil { return 0, wrapError(w.b, err) } w.buf = nil w.ctx = nil w.key = "" w.opts = nil return w.write(p) } func (w *Writer) write(p []byte) (int, error) { n, err := w.w.Write(p) stats.RecordWithTags(context.Background(), []tag.Mutator{tag.Upsert(oc.ProviderKey, w.provider)}, bytesWrittenMeasure.M(int64(n))) return n, wrapError(w.b, err) } // ListOptions sets options for listing blobs via Bucket.List. type ListOptions struct { // Prefix indicates that only blobs with a key starting with this prefix // should be returned. Prefix string // Delimiter sets the delimiter used to define a hierarchical namespace, // like a filesystem with "directories". It is highly recommended that you // use "" or "/" as the Delimiter. Other values should work through this API, // but provider UIs generally assume "/". // // An empty delimiter means that the bucket is treated as a single flat // namespace. // // A non-empty delimiter means that any result with the delimiter in its key // after Prefix is stripped will be returned with ListObject.IsDir = true, // ListObject.Key truncated after the delimiter, and zero values for other // ListObject fields. These results represent "directories". Multiple results // in a "directory" are returned as a single result. Delimiter string // BeforeList is a callback that will be called before each call to the // the underlying provider's list functionality. // asFunc converts its argument to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information. BeforeList func(asFunc func(interface{}) bool) error } // ListIterator iterates over List results. type ListIterator struct { b *Bucket opts *driver.ListOptions page *driver.ListPage nextIdx int } // Next returns a *ListObject for the next blob. It returns (nil, io.EOF) if // there are no more. func (i *ListIterator) Next(ctx context.Context) (*ListObject, error) { if i.page != nil { // We've already got a page of results. if i.nextIdx < len(i.page.Objects) { // Next object is in the page; return it. dobj := i.page.Objects[i.nextIdx] i.nextIdx++ return &ListObject{ Key: dobj.Key, ModTime: dobj.ModTime, Size: dobj.Size, MD5: dobj.MD5, IsDir: dobj.IsDir, asFunc: dobj.AsFunc, }, nil } if len(i.page.NextPageToken) == 0 { // Done with current page, and there are no more; return io.EOF. return nil, io.EOF } // We need to load the next page. i.opts.PageToken = i.page.NextPageToken } i.b.mu.RLock() defer i.b.mu.RUnlock() if i.b.closed { return nil, errClosed } // Loading a new page. p, err := i.b.b.ListPaged(ctx, i.opts) if err != nil { return nil, wrapError(i.b.b, err) } i.page = p i.nextIdx = 0 return i.Next(ctx) } // ListObject represents a single blob returned from List. type ListObject struct { // Key is the key for this blob. Key string // ModTime is the time the blob was last modified. ModTime time.Time // Size is the size of the blob's content in bytes. Size int64 // MD5 is an MD5 hash of the blob contents or nil if not available. MD5 []byte // IsDir indicates that this result represents a "directory" in the // hierarchical namespace, ending in ListOptions.Delimiter. Key can be // passed as ListOptions.Prefix to list items in the "directory". // Fields other than Key and IsDir will not be set if IsDir is true. IsDir bool asFunc func(interface{}) bool } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (o *ListObject) As(i interface{}) bool { if o.asFunc == nil { return false } return o.asFunc(i) } // Bucket provides an easy and portable way to interact with blobs // within a "bucket", including read, write, and list operations. // To create a Bucket, use constructors found in provider-specific // subpackages. type Bucket struct { b driver.Bucket tracer *oc.Tracer // mu protects the closed variable. // Read locks are kept to prevent closing until a call finishes. mu sync.RWMutex closed bool } const pkgName = "gocloud.dev/blob" var ( latencyMeasure = oc.LatencyMeasure(pkgName) bytesReadMeasure = stats.Int64(pkgName+"/bytes_read", "Total bytes read", stats.UnitBytes) bytesWrittenMeasure = stats.Int64(pkgName+"/bytes_written", "Total bytes written", stats.UnitBytes) // OpenCensusViews are predefined views for OpenCensus metrics. // The views include counts and latency distributions for API method calls, // and total bytes read and written. // See the example at https://godoc.org/go.opencensus.io/stats/view for usage. OpenCensusViews = append( oc.Views(pkgName, latencyMeasure), &view.View{ Name: pkgName + "/bytes_read", Measure: bytesReadMeasure, Description: "Sum of bytes read from the provider service.", TagKeys: []tag.Key{oc.ProviderKey}, Aggregation: view.Sum(), }, &view.View{ Name: pkgName + "/bytes_written", Measure: bytesWrittenMeasure, Description: "Sum of bytes written to the provider service.", TagKeys: []tag.Key{oc.ProviderKey}, Aggregation: view.Sum(), }) ) // NewBucket is intended for use by provider implementations. var NewBucket = newBucket // newBucket creates a new *Bucket based on a specific driver implementation. // End users should use subpackages to construct a *Bucket instead of this // function; see the package documentation for details. func newBucket(b driver.Bucket) *Bucket { return &Bucket{ b: b, tracer: &oc.Tracer{ Package: pkgName, Provider: oc.ProviderName(b), LatencyMeasure: latencyMeasure, }, } } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (b *Bucket) As(i interface{}) bool { if i == nil { return false } return b.b.As(i) } // ErrorAs converts err to provider-specific types. // ErrorAs panics if i is nil or not a pointer. // ErrorAs returns false if err == nil. // See https://godoc.org/gocloud.dev#hdr-As for background information. func (b *Bucket) ErrorAs(err error, i interface{}) bool { return gcerr.ErrorAs(err, i, b.b.ErrorAs) } // ReadAll is a shortcut for creating a Reader via NewReader with nil // ReaderOptions, and reading the entire blob. func (b *Bucket) ReadAll(ctx context.Context, key string) (_ []byte, err error) { b.mu.RLock() defer b.mu.RUnlock() if b.closed { return nil, errClosed } r, err := b.NewReader(ctx, key, nil) if err != nil { return nil, err } defer r.Close() return ioutil.ReadAll(r) } // List returns a ListIterator that can be used to iterate over blobs in a // bucket, in lexicographical order of UTF-8 encoded keys. The underlying // implementation fetches results in pages. // // A nil ListOptions is treated the same as the zero value. // // List is not guaranteed to include all recently-written blobs; // some providers are only eventually consistent. func (b *Bucket) List(opts *ListOptions) *ListIterator { if opts == nil { opts = &ListOptions{} } dopts := &driver.ListOptions{ Prefix: opts.Prefix, Delimiter: opts.Delimiter, BeforeList: opts.BeforeList, } return &ListIterator{b: b, opts: dopts} } // Exists returns true if a blob exists at key, false if it does not exist, or // an error. // It is a shortcut for calling Attributes and checking if it returns an error // with code gcerrors.NotFound. func (b *Bucket) Exists(ctx context.Context, key string) (bool, error) { _, err := b.Attributes(ctx, key) if err == nil { return true, nil } if gcerrors.Code(err) == gcerrors.NotFound { return false, nil } return false, err } // Attributes returns attributes for the blob stored at key. // // If the blob does not exist, Attributes returns an error for which // gcerrors.Code will return gcerrors.NotFound. func (b *Bucket) Attributes(ctx context.Context, key string) (_ Attributes, err error) { if !utf8.ValidString(key) { return Attributes{}, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Attributes key must be a valid UTF-8 string: %q", key) } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return Attributes{}, errClosed } ctx = b.tracer.Start(ctx, "Attributes") defer func() { b.tracer.End(ctx, err) }() a, err := b.b.Attributes(ctx, key) if err != nil { return Attributes{}, wrapError(b.b, err) } var md map[string]string if len(a.Metadata) > 0 { // Providers are inconsistent, but at least some treat keys // as case-insensitive. To make the behavior consistent, we // force-lowercase them when writing and reading. md = make(map[string]string, len(a.Metadata)) for k, v := range a.Metadata { md[strings.ToLower(k)] = v } } return Attributes{ CacheControl: a.CacheControl, ContentDisposition: a.ContentDisposition, ContentEncoding: a.ContentEncoding, ContentLanguage: a.ContentLanguage, ContentType: a.ContentType, Metadata: md, ModTime: a.ModTime, Size: a.Size, MD5: a.MD5, asFunc: a.AsFunc, }, nil } // NewReader is a shortcut for NewRangedReader with offset=0 and length=-1. func (b *Bucket) NewReader(ctx context.Context, key string, opts *ReaderOptions) (*Reader, error) { return b.newRangeReader(ctx, key, 0, -1, opts) } // NewRangeReader returns a Reader to read content from the blob stored at key. // It reads at most length bytes starting at offset (>= 0). // If length is negative, it will read till the end of the blob. // // If the blob does not exist, NewRangeReader returns an error for which // gcerrors.Code will return gcerrors.NotFound. Exists is a lighter-weight way // to check for existence. // // A nil ReaderOptions is treated the same as the zero value. // // The caller must call Close on the returned Reader when done reading. func (b *Bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *ReaderOptions) (_ *Reader, err error) { return b.newRangeReader(ctx, key, offset, length, opts) } func (b *Bucket) newRangeReader(ctx context.Context, key string, offset, length int64, opts *ReaderOptions) (_ *Reader, err error) { b.mu.RLock() defer b.mu.RUnlock() if b.closed { return nil, errClosed } if offset < 0 { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: NewRangeReader offset must be non-negative (%d)", offset) } if !utf8.ValidString(key) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: NewRangeReader key must be a valid UTF-8 string: %q", key) } if opts == nil { opts = &ReaderOptions{} } dopts := &driver.ReaderOptions{} tctx := b.tracer.Start(ctx, "NewRangeReader") defer func() { // If err == nil, we handed the end closure off to the returned *Writer; it // will be called when the Writer is Closed. if err != nil { b.tracer.End(tctx, err) } }() dr, err := b.b.NewRangeReader(ctx, key, offset, length, dopts) if err != nil { return nil, wrapError(b.b, err) } end := func(err error) { b.tracer.End(tctx, err) } r := &Reader{b: b.b, r: dr, end: end, provider: b.tracer.Provider} _, file, lineno, ok := runtime.Caller(2) runtime.SetFinalizer(r, func(r *Reader) { if !r.closed { var caller string if ok { caller = fmt.Sprintf(" (%s:%d)", file, lineno) } log.Printf("A blob.Reader reading from %q was never closed%s", key, caller) } }) return r, nil } // WriteAll is a shortcut for creating a Writer via NewWriter and writing p. // // If opts.ContentMD5 is not set, WriteAll will compute the MD5 of p and use it // as the ContentMD5 option for the Writer it creates. func (b *Bucket) WriteAll(ctx context.Context, key string, p []byte, opts *WriterOptions) (err error) { realOpts := new(WriterOptions) if opts != nil { *realOpts = *opts } if len(realOpts.ContentMD5) == 0 { sum := md5.Sum(p) realOpts.ContentMD5 = sum[:] } w, err := b.NewWriter(ctx, key, realOpts) if err != nil { return err } if _, err := w.Write(p); err != nil { _ = w.Close() return err } return w.Close() } // NewWriter returns a Writer that writes to the blob stored at key. // A nil WriterOptions is treated the same as the zero value. // // If a blob with this key already exists, it will be replaced. // The blob being written is not guaranteed to be readable until Close // has been called; until then, any previous blob will still be readable. // Even after Close is called, newly written blobs are not guaranteed to be // returned from List; some providers are only eventually consistent. // // The returned Writer will store ctx for later use in Write and/or Close. // To abort a write, cancel ctx; otherwise, it must remain open until // Close is called. // // The caller must call Close on the returned Writer, even if the write is // aborted. func (b *Bucket) NewWriter(ctx context.Context, key string, opts *WriterOptions) (_ *Writer, err error) { if !utf8.ValidString(key) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: NewWriter key must be a valid UTF-8 string: %q", key) } if opts == nil { opts = &WriterOptions{} } dopts := &driver.WriterOptions{ CacheControl: opts.CacheControl, ContentDisposition: opts.ContentDisposition, ContentEncoding: opts.ContentEncoding, ContentLanguage: opts.ContentLanguage, ContentMD5: opts.ContentMD5, BufferSize: opts.BufferSize, BeforeWrite: opts.BeforeWrite, } if len(opts.Metadata) > 0 { // Providers are inconsistent, but at least some treat keys // as case-insensitive. To make the behavior consistent, we // force-lowercase them when writing and reading. md := make(map[string]string, len(opts.Metadata)) for k, v := range opts.Metadata { if k == "" { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata keys may not be empty strings") } if !utf8.ValidString(k) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata keys must be valid UTF-8 strings: %q", k) } if !utf8.ValidString(v) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata values must be valid UTF-8 strings: %q", v) } lowerK := strings.ToLower(k) if _, found := md[lowerK]; found { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata has a duplicate case-insensitive metadata key: %q", lowerK) } md[lowerK] = v } dopts.Metadata = md } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return nil, errClosed } ctx, cancel := context.WithCancel(ctx) tctx := b.tracer.Start(ctx, "NewWriter") end := func(err error) { b.tracer.End(tctx, err) } defer func() { if err != nil { end(err) } }() w := &Writer{ b: b.b, end: end, cancel: cancel, key: key, opts: dopts, buf: bytes.NewBuffer([]byte{}), contentMD5: opts.ContentMD5, md5hash: md5.New(), provider: b.tracer.Provider, } if opts.ContentType != "" { t, p, err := mime.ParseMediaType(opts.ContentType) if err != nil { cancel() return nil, err } ct := mime.FormatMediaType(t, p) dw, err := b.b.NewTypedWriter(ctx, key, ct, dopts) if err != nil { cancel() return nil, wrapError(b.b, err) } w.w = dw } else { // Save the fields needed to called NewTypedWriter later, once we've gotten // sniffLen bytes. w.ctx = ctx w.key = key w.opts = dopts w.buf = bytes.NewBuffer([]byte{}) } _, file, lineno, ok := runtime.Caller(1) runtime.SetFinalizer(w, func(w *Writer) { if !w.closed { var caller string if ok { caller = fmt.Sprintf(" (%s:%d)", file, lineno) } log.Printf("A blob.Writer writing to %q was never closed%s", key, caller) } }) return w, nil } // Copy the blob stored at srcKey to dstKey. // A nil CopyOptions is treated the same as the zero value. // // If the source blob does not exist, Copy returns an error for which // gcerrors.Code will return gcerrors.NotFound. // // If the destination blob already exists, it is overwritten. func (b *Bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *CopyOptions) (err error) { if !utf8.ValidString(srcKey) { return gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Copy srcKey must be a valid UTF-8 string: %q", srcKey) } if !utf8.ValidString(dstKey) { return gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Copy dstKey must be a valid UTF-8 string: %q", dstKey) } if opts == nil { opts = &CopyOptions{} } dopts := &driver.CopyOptions{ BeforeCopy: opts.BeforeCopy, } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return errClosed } ctx = b.tracer.Start(ctx, "Copy") defer func() { b.tracer.End(ctx, err) }() return wrapError(b.b, b.b.Copy(ctx, dstKey, srcKey, dopts)) } // Delete deletes the blob stored at key. // // If the blob does not exist, Delete returns an error for which // gcerrors.Code will return gcerrors.NotFound. func (b *Bucket) Delete(ctx context.Context, key string) (err error) { if !utf8.ValidString(key) { return gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Delete key must be a valid UTF-8 string: %q", key) } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return errClosed } ctx = b.tracer.Start(ctx, "Delete") defer func() { b.tracer.End(ctx, err) }() return wrapError(b.b, b.b.Delete(ctx, key)) } // SignedURL returns a URL that can be used to GET the blob for the duration // specified in opts.Expiry. // // A nil SignedURLOptions is treated the same as the zero value. // // It is valid to call SignedURL for a key that does not exist. // // If the provider implementation does not support this functionality, SignedURL // will return an error for which gcerrors.Code will return gcerrors.Unimplemented. func (b *Bucket) SignedURL(ctx context.Context, key string, opts *SignedURLOptions) (string, error) { if !utf8.ValidString(key) { return "", gcerr.Newf(gcerr.InvalidArgument, nil, "blob: SignedURL key must be a valid UTF-8 string: %q", key) } if opts == nil { opts = &SignedURLOptions{} } if opts.Expiry < 0 { return "", gcerr.Newf(gcerr.InvalidArgument, nil, "blob: SignedURLOptions.Expiry must be >= 0 (%v)", opts.Expiry) } if opts.Expiry == 0 { opts.Expiry = DefaultSignedURLExpiry } dopts := driver.SignedURLOptions{ Expiry: opts.Expiry, } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return "", errClosed } url, err := b.b.SignedURL(ctx, key, &dopts) return url, wrapError(b.b, err) } // Close releases any resources used for the bucket. func (b *Bucket) Close() error { b.mu.Lock() prev := b.closed b.closed = true b.mu.Unlock() if prev { return errClosed } return b.b.Close() } // DefaultSignedURLExpiry is the default duration for SignedURLOptions.Expiry. const DefaultSignedURLExpiry = 1 * time.Hour // SignedURLOptions sets options for SignedURL. type SignedURLOptions struct { // Expiry sets how long the returned URL is valid for. // Defaults to DefaultSignedURLExpiry. Expiry time.Duration } // ReaderOptions sets options for NewReader and NewRangedReader. // It is provided for future extensibility. type ReaderOptions struct{} // WriterOptions sets options for NewWriter. type WriterOptions struct { // BufferSize changes the default size in bytes of the chunks that // Writer will upload in a single request; larger blobs will be split into // multiple requests. // // This option may be ignored by some provider implementations. // // If 0, the provider implementation will choose a reasonable default. // // If the Writer is used to do many small writes concurrently, using a // smaller BufferSize may reduce memory usage. BufferSize int // CacheControl specifies caching attributes that providers may use // when serving the blob. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control CacheControl string // ContentDisposition specifies whether the blob content is expected to be // displayed inline or as an attachment. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition ContentDisposition string // ContentEncoding specifies the encoding used for the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding ContentEncoding string // ContentLanguage specifies the language used in the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language ContentLanguage string // ContentType specifies the MIME type of the blob being written. If not set, // it will be inferred from the content using the algorithm described at // http://mimesniff.spec.whatwg.org/. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type ContentType string // ContentMD5 is used as a message integrity check. // If len(ContentMD5) > 0, the MD5 hash of the bytes written must match // ContentMD5, or Close will return an error without completing the write. // https://tools.ietf.org/html/rfc1864 ContentMD5 []byte // Metadata holds key/value strings to be associated with the blob, or nil. // Keys may not be empty, and are lowercased before being written. // Duplicate case-insensitive keys (e.g., "foo" and "FOO") will result in // an error. Metadata map[string]string // BeforeWrite is a callback that will be called exactly once, before // any data is written (unless NewWriter returns an error, in which case // it will not be called at all). Note that this is not necessarily during // or after the first Write call, as providers may buffer bytes before // sending an upload request. // // asFunc converts its argument to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information. BeforeWrite func(asFunc func(interface{}) bool) error } // CopyOptions sets options for Copy. type CopyOptions struct { // BeforeCopy is a callback that will be called before the copy is // initiated. // // asFunc converts its argument to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information. BeforeCopy func(asFunc func(interface{}) bool) error } // BucketURLOpener represents types that can open buckets based on a URL. // The opener must not modify the URL argument. OpenBucketURL must be safe to // call from multiple goroutines. // // This interface is generally implemented by types in driver packages. type BucketURLOpener interface { OpenBucketURL(ctx context.Context, u *url.URL) (*Bucket, error) } // URLMux is a URL opener multiplexer. It matches the scheme of the URLs // against a set of registered schemes and calls the opener that matches the // URL's scheme. // See https://godoc.org/gocloud.dev#hdr-URLs for more information. // // The zero value is a multiplexer with no registered schemes. type URLMux struct { schemes openurl.SchemeMap } // BucketSchemes returns a sorted slice of the registered Bucket schemes. func (mux *URLMux) BucketSchemes() []string { return mux.schemes.Schemes() } // ValidBucketScheme returns true iff scheme has been registered for Buckets. func (mux *URLMux) ValidBucketScheme(scheme string) bool { return mux.schemes.ValidScheme(scheme) } // RegisterBucket registers the opener with the given scheme. If an opener // already exists for the scheme, RegisterBucket panics. func (mux *URLMux) RegisterBucket(scheme string, opener BucketURLOpener) { mux.schemes.Register("blob", "Bucket", scheme, opener) } // OpenBucket calls OpenBucketURL with the URL parsed from urlstr. // OpenBucket is safe to call from multiple goroutines. func (mux *URLMux) OpenBucket(ctx context.Context, urlstr string) (*Bucket, error) { opener, u, err := mux.schemes.FromString("Bucket", urlstr) if err != nil { return nil, err } return opener.(BucketURLOpener).OpenBucketURL(ctx, u) } // OpenBucketURL dispatches the URL to the opener that is registered with the // URL's scheme. OpenBucketURL is safe to call from multiple goroutines. func (mux *URLMux) OpenBucketURL(ctx context.Context, u *url.URL) (*Bucket, error) { opener, err := mux.schemes.FromURL("Bucket", u) if err != nil { return nil, err } return opener.(BucketURLOpener).OpenBucketURL(ctx, u) } var defaultURLMux = new(URLMux) // DefaultURLMux returns the URLMux used by OpenBucket. // // Driver packages can use this to register their BucketURLOpener on the mux. func DefaultURLMux() *URLMux { return defaultURLMux } // OpenBucket opens the bucket identified by the URL given. // See the URLOpener documentation in provider-specific subpackages for // details on supported URL formats, and https://godoc.org/gocloud.dev#hdr-URLs // for more information. func OpenBucket(ctx context.Context, urlstr string) (*Bucket, error) { return defaultURLMux.OpenBucket(ctx, urlstr) } func wrapError(b driver.Bucket, err error) error { if err == nil { return nil } if gcerr.DoNotWrap(err) { return err } return gcerr.New(b.ErrorCode(err), err, 2, "blob") } var errClosed = gcerr.Newf(gcerr.FailedPrecondition, nil, "blob: Bucket has been closed")
1
16,878
"for getting started guides" reads a bit weird, like it's missing punctuation. Maybe just "for a tutorial"?
google-go-cloud
go
@@ -50,9 +50,9 @@ class Formats extends AbstractBase * Constructor * * @param bool $enabled is this tab enabled? - * @param bool $urc use recaptcha? + * @param bool $uc use captcha? */ - public function __construct($enabled = true, $urc = false) + public function __construct($enabled = true, $uc = false) { $this->enabled = $enabled; }
1
<?php /** * Digital Content Formats tab * * PHP version 7 * * Copyright (C) Villanova University 2010. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @category VuFind * @package RecordTabs * @author Demian Katz <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development:plugins:record_tabs Wiki */ namespace VuFind\RecordTab; /** * Digital Content Formats * * @category VuFind * @package RecordTabs * @author Demian Katz <[email protected]> * @author Brent Palmer <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development:plugins:record_tabs Wiki */ class Formats extends AbstractBase { /** * Is this tab enabled? * * @var bool */ protected $enabled; /** * Constructor * * @param bool $enabled is this tab enabled? * @param bool $urc use recaptcha? */ public function __construct($enabled = true, $urc = false) { $this->enabled = $enabled; } /** * Is this tab active? * * @return bool */ public function isActive() { return $this->enabled; } /** * Get the on-screen description for this tab. * * @return string */ public function getDescription() { return 'Formats'; } }
1
28,997
This looks like a dead parameter -- maybe a copy and paste error. If it's truly unused, maybe you can open a separate PR to simply delete it, and then it's one less detail to worry about here.
vufind-org-vufind
php
@@ -42,7 +42,11 @@ func (m *VerticaRowReader) GetNextRow() ([]values.Value, error) { row := make([]values.Value, len(m.columns)) for i, col := range m.columns { switch col := col.(type) { - case bool, int, uint, int64, uint64, float64, string: + case int: + row[i] = values.NewInt(int64(col)) + case uint: + row[i] = values.NewUInt(uint64(col)) + case bool, int64, uint64, float64, string: row[i] = values.New(col) case time.Time: row[i] = values.NewTime(values.ConvertTime(col))
1
package sql import ( "database/sql" "fmt" "time" "github.com/influxdata/flux" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/internal/errors" "github.com/influxdata/flux/values" ) type VerticaRowReader struct { Cursor *sql.Rows columns []interface{} columnTypes []flux.ColType columnNames []string } func (m *VerticaRowReader) Next() bool { next := m.Cursor.Next() if next { columnNames, err := m.Cursor.Columns() if err != nil { return false } m.columns = make([]interface{}, len(columnNames)) columnPointers := make([]interface{}, len(columnNames)) for i := 0; i < len(columnNames); i++ { columnPointers[i] = &m.columns[i] } if err := m.Cursor.Scan(columnPointers...); err != nil { return false } } return next } func (m *VerticaRowReader) GetNextRow() ([]values.Value, error) { row := make([]values.Value, len(m.columns)) for i, col := range m.columns { switch col := col.(type) { case bool, int, uint, int64, uint64, float64, string: row[i] = values.New(col) case time.Time: row[i] = values.NewTime(values.ConvertTime(col)) case []uint8: switch m.columnTypes[i] { case flux.TInt: newInt, err := UInt8ToInt64(col) if err != nil { return nil, err } row[i] = values.NewInt(newInt) case flux.TFloat: newFloat, err := UInt8ToFloat(col) if err != nil { return nil, err } row[i] = values.NewFloat(newFloat) case flux.TTime: t, err := time.Parse(layout, string(col)) if err != nil { fmt.Print(err) } row[i] = values.NewTime(values.ConvertTime(t)) default: row[i] = values.NewString(string(col)) } case nil: row[i] = values.NewNull(flux.SemanticType(m.columnTypes[i])) default: execute.PanicUnknownType(flux.TInvalid) } } return row, nil } func (m *VerticaRowReader) InitColumnNames(n []string) { m.columnNames = n } func (m *VerticaRowReader) InitColumnTypes(types []*sql.ColumnType) { stringTypes := make([]flux.ColType, len(types)) for i := 0; i < len(types); i++ { switch types[i].DatabaseTypeName() { case "INT", "INTEGER", "BIGINT", "SMALLINT", "TINYINT", "INT2", "INT4", "INT8", "SERIAL2", "SERIAL4", "SERIAL8": stringTypes[i] = flux.TInt case "FLOAT", "FLOAT4", "FLOAT8": stringTypes[i] = flux.TFloat case "DATE", "TIME", "TIMESTAMP": stringTypes[i] = flux.TTime case "BOOL": stringTypes[i] = flux.TBool case "TEXT", "VARCHAR", "VARBINARY": stringTypes[i] = flux.TString default: stringTypes[i] = flux.TString } } m.columnTypes = stringTypes } func (m *VerticaRowReader) ColumnNames() []string { return m.columnNames } func (m *VerticaRowReader) ColumnTypes() []flux.ColType { return m.columnTypes } func (m *VerticaRowReader) SetColumns(i []interface{}) { m.columns = i } func (m *VerticaRowReader) Close() error { if err := m.Cursor.Err(); err != nil { return err } return m.Cursor.Close() } func NewVerticaRowReader(r *sql.Rows) (execute.RowReader, error) { reader := &VerticaRowReader{ Cursor: r, } cols, err := r.Columns() if err != nil { return nil, err } reader.InitColumnNames(cols) types, err := r.ColumnTypes() if err != nil { return nil, err } reader.InitColumnTypes(types) return reader, nil } // VerticaTranslateColumn translates flux colTypes into their corresponding Vertica column type func VerticaColumnTranslateFunc() translationFunc { c := map[string]string{ flux.TFloat.String(): "FLOAT", flux.TInt.String(): "INTEGER", flux.TUInt.String(): "INTEGER", flux.TString.String(): "VARCHAR", flux.TTime.String(): "TIMESTAMP", flux.TBool.String(): "BOOL", } return func(f flux.ColType, colName string) (string, error) { s, found := c[f.String()] if !found { return "", errors.Newf(codes.Internal, "Vertica does not support column type %s", f.String()) } return colName + " " + s, nil } }
1
17,509
Nit: Should we add support for `int` and `uint` in the `values.New()` function? That would allow us to to handle all of these types in one case. Maybe there's a good reason why we don't do that already, but I'm not sure what it is.
influxdata-flux
go
@@ -14,7 +14,7 @@ import net.sourceforge.pmd.RuleViolation; /** * A {@link RuleViolation} implementation that is immutable, and therefore cache friendly */ -public final class CachedRuleViolation implements RuleViolation { +public class CachedRuleViolation implements RuleViolation { private final CachedRuleMapper mapper;
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.cache; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import net.sourceforge.pmd.Rule; import net.sourceforge.pmd.RuleViolation; /** * A {@link RuleViolation} implementation that is immutable, and therefore cache friendly */ public final class CachedRuleViolation implements RuleViolation { private final CachedRuleMapper mapper; private final String description; private final String fileName; private final String ruleClassName; private final int beginLine; private final int beginColumn; private final int endLine; private final int endColumn; private final String packageName; private final String className; private final String methodName; private final String variableName; private CachedRuleViolation(final CachedRuleMapper mapper, final String description, final String fileName, final String ruleClassName, final int beginLine, final int beginColumn, final int endLine, final int endColumn, final String packageName, final String className, final String methodName, final String variableName) { this.mapper = mapper; this.description = description; this.fileName = fileName; this.ruleClassName = ruleClassName; this.beginLine = beginLine; this.beginColumn = beginColumn; this.endLine = endLine; this.endColumn = endColumn; this.packageName = packageName; this.className = className; this.methodName = methodName; this.variableName = variableName; } @Override public Rule getRule() { // The mapper may be initialized after cache is loaded, so use it lazily return mapper.getRuleForClass(ruleClassName); } @Override public String getDescription() { return description; } @Override public boolean isSuppressed() { return false; // By definition, if cached, it was not suppressed } @Override public String getFilename() { return fileName; } @Override public int getBeginLine() { return beginLine; } @Override public int getBeginColumn() { return beginColumn; } @Override public int getEndLine() { return endLine; } @Override public int getEndColumn() { return endColumn; } @Override public String getPackageName() { return packageName; } @Override public String getClassName() { return className; } @Override public String getMethodName() { return methodName; } @Override public String getVariableName() { return variableName; } /** * Helper method to load a {@link CachedRuleViolation} from an input stream. * * @param stream The stream from which to load the violation. * @param fileName The name of the file on which this rule was reported. * @param mapper The mapper to be used to obtain rule instances from the active rulesets. * @return The loaded rule violation. * @throws IOException */ /* package */ static CachedRuleViolation loadFromStream(final DataInputStream stream, final String fileName, final CachedRuleMapper mapper) throws IOException { final String description = stream.readUTF(); final String ruleClassName = stream.readUTF(); final int beginLine = stream.readInt(); final int beginColumn = stream.readInt(); final int endLine = stream.readInt(); final int endColumn = stream.readInt(); final String packageName = stream.readUTF(); final String className = stream.readUTF(); final String methodName = stream.readUTF(); final String variableName = stream.readUTF(); return new CachedRuleViolation(mapper, description, fileName, ruleClassName, beginLine, beginColumn, endLine, endColumn, packageName, className, methodName, variableName); } /** * Helper method to store a {@link RuleViolation} in an output stream to be later * retrieved as a {@link CachedRuleViolation} * * @param stream The stream on which to store the violation. * @param violation The rule violation to cache. * @throws IOException */ /* package */ static void storeToStream(final DataOutputStream stream, final RuleViolation violation) throws IOException { stream.writeUTF(getValueOrEmpty(violation.getDescription())); stream.writeUTF(getValueOrEmpty(violation.getRule().getRuleClass())); stream.writeInt(violation.getBeginLine()); stream.writeInt(violation.getBeginColumn()); stream.writeInt(violation.getEndLine()); stream.writeInt(violation.getEndColumn()); stream.writeUTF(getValueOrEmpty(violation.getPackageName())); stream.writeUTF(getValueOrEmpty(violation.getClassName())); stream.writeUTF(getValueOrEmpty(violation.getMethodName())); stream.writeUTF(getValueOrEmpty(violation.getVariableName())); } private static String getValueOrEmpty(final String value) { return value == null ? "" : value; } }
1
13,447
if you are not dealing with cache now, please revert these changes. On their own they make little sense
pmd-pmd
java
@@ -56,7 +56,7 @@ public abstract class NodeGenerator extends Generator { throw new AssertionError(f("Wanted to regenerate a method with signature %s in %s, but it wasn't there.", callable.getSignature(), containingClassOrInterface.getNameAsString())); }); } - + private void addMethod( ClassOrInterfaceDeclaration containingClassOrInterface, CallableDeclaration<?> callable,
1
package com.github.javaparser.generator; import com.github.javaparser.ast.CompilationUnit; import com.github.javaparser.ast.body.CallableDeclaration; import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration; import com.github.javaparser.metamodel.BaseNodeMetaModel; import com.github.javaparser.metamodel.JavaParserMetaModel; import com.github.javaparser.utils.Log; import com.github.javaparser.utils.SourceRoot; import java.io.IOException; import java.util.List; import static com.github.javaparser.utils.CodeGenerationUtils.f; /** * Makes it easier to generate code in the core AST nodes. The generateNode method will get every node type passed to * it, ready for modification. */ public abstract class NodeGenerator extends Generator { protected NodeGenerator(SourceRoot sourceRoot) { super(sourceRoot); } public final void generate() throws Exception { Log.info("Running %s", getClass().getSimpleName()); for (BaseNodeMetaModel nodeMetaModel : JavaParserMetaModel.getNodeMetaModels()) { CompilationUnit nodeCu = sourceRoot.parse(nodeMetaModel.getPackageName(), nodeMetaModel.getTypeName() + ".java"); ClassOrInterfaceDeclaration nodeCoid = nodeCu.getClassByName(nodeMetaModel.getTypeName()).orElseThrow(() -> new IOException("Can't find class")); generateNode(nodeMetaModel, nodeCu, nodeCoid); } after(); } protected void after() throws Exception { } protected abstract void generateNode(BaseNodeMetaModel nodeMetaModel, CompilationUnit nodeCu, ClassOrInterfaceDeclaration nodeCoid); /** * Utility method that looks for a method or constructor with an identical signature as "callable" and replaces it * with callable. If not found, adds callable. When the new callable has no javadoc, any old javadoc will be kept. */ protected void addOrReplaceWhenSameSignature(ClassOrInterfaceDeclaration containingClassOrInterface, CallableDeclaration<?> callable) { addMethod(containingClassOrInterface, callable, () -> containingClassOrInterface.addMember(callable)); } /** * Utility method that looks for a method or constructor with an identical signature as "callable" and replaces it * with callable. If not found, fails. When the new callable has no javadoc, any old javadoc will be kept. */ protected void replaceWhenSameSignature(ClassOrInterfaceDeclaration containingClassOrInterface, CallableDeclaration<?> callable) { addMethod(containingClassOrInterface, callable, () -> { throw new AssertionError(f("Wanted to regenerate a method with signature %s in %s, but it wasn't there.", callable.getSignature(), containingClassOrInterface.getNameAsString())); }); } private void addMethod( ClassOrInterfaceDeclaration containingClassOrInterface, CallableDeclaration<?> callable, Runnable onNoExistingMethod) { List<CallableDeclaration<?>> existingCallables = containingClassOrInterface.getCallablesWithSignature(callable.getSignature()); if (existingCallables.isEmpty()) { onNoExistingMethod.run(); return; } if (existingCallables.size() > 1) { throw new AssertionError(f("Wanted to regenerate a method with signature %s in %s, but found more than one.", callable.getSignature(), containingClassOrInterface.getNameAsString())); } final CallableDeclaration<?> existingCallable = existingCallables.get(0); callable.setJavadocComment(callable.getJavadocComment().orElse(existingCallable.getJavadocComment().orElse(null))); containingClassOrInterface.getMembers().replace(existingCallable, callable); } }
1
11,044
Wearing my extra-douche-bag hat I would say not spaces on a blank line. Maybe at some point we could have some automated process remove these things. For now I would not bother changing it.
javaparser-javaparser
java
@@ -0,0 +1,7 @@ +package de.danoeh.antennapod.core.event; + +public class ShowRemainTimeUpdateEvent { + public ShowRemainTimeUpdateEvent() { + + } +}
1
1
18,377
I think it would be better to use an `ItemUpdatedEvent` like for the "prefer streaming" preference. We already have a ton of events that need to be handled in all list fragments that just do the same everywhere. I think we could even remove some of the existing events in the future.
AntennaPod-AntennaPod
java
@@ -0,0 +1,17 @@ +class AddSlugToProducts < ActiveRecord::Migration + def change + add_column :products, :slug, :string, null: true + + products = select_all("select id, name from products") + products.each do |product| + update(<<-SQL) + UPDATE products + SET slug='#{product["name"].parameterize}' + WHERE id=#{product["id"]} + SQL + end + + change_column_null :products, :slug, false + add_index :products, :slug, unique: true + end +end
1
1
10,750
I think we have to manually write a down for this migration.
thoughtbot-upcase
rb
@@ -454,14 +454,13 @@ class _InternalFrame(object): assert isinstance(sdf, spark.DataFrame) if index_map is None: - # Here is when Koalas DataFrame is created directly from Spark DataFrame. - assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in sdf.schema.names), \ + assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in sdf.columns), \ "Index columns should not appear in columns of the Spark DataFrame. Avoid " \ - "index colum names [%s]." % SPARK_INDEX_NAME_PATTERN + "index column names [%s]." % SPARK_INDEX_NAME_PATTERN # Create default index. - index_map = [(SPARK_INDEX_NAME_FORMAT(0), None)] - sdf = _InternalFrame.attach_default_index(sdf) + sdf, index_column = _InternalFrame.attach_default_index(sdf) + index_map = [(index_column, None)] if NATURAL_ORDER_COLUMN_NAME not in sdf.columns: sdf = sdf.withColumn(NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id())
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ An internal immutable DataFrame with some metadata to manage indexes. """ import re from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING from itertools import accumulate import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like from pyspark import sql as spark from pyspark._globals import _NoValue, _NoValueType from pyspark.sql import functions as F, Window from pyspark.sql.functions import PandasUDFType, pandas_udf from pyspark.sql.types import BooleanType, DataType, StructField, StructType, LongType try: from pyspark.sql.types import to_arrow_type except ImportError: from pyspark.sql.pandas.types import to_arrow_type from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. if TYPE_CHECKING: # This is required in old Python 3.5 to prevent circular reference. from databricks.koalas.series import Series from databricks.koalas.config import get_option from databricks.koalas.typedef import infer_pd_series_spark_type, spark_type_to_pandas_dtype from databricks.koalas.utils import (column_index_level, default_session, lazy_property, name_like_string, scol_for) # A function to turn given numbers to Spark columns that represent Koalas index. SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format # A pattern to check if the name of a Spark column is a Koalas index name or not. SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__") NATURAL_ORDER_COLUMN_NAME = '__natural_order__' HIDDEN_COLUMNS = set([NATURAL_ORDER_COLUMN_NAME]) IndexMap = Tuple[str, Optional[Tuple[str, ...]]] class _InternalFrame(object): """ The internal immutable DataFrame which manages Spark DataFrame and column names and index information. .. note:: this is an internal class. It is not supposed to be exposed to users and users should not directly access to it. The internal immutable DataFrame represents the index information for a DataFrame it belongs to. For instance, if we have a Koalas DataFrame as below, Pandas DataFrame does not store the index as columns. >>> kdf = ks.DataFrame({ ... 'A': [1, 2, 3, 4], ... 'B': [5, 6, 7, 8], ... 'C': [9, 10, 11, 12], ... 'D': [13, 14, 15, 16], ... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E']) >>> kdf # doctest: +NORMALIZE_WHITESPACE A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 However, all columns including index column are also stored in Spark DataFrame internally as below. >>> kdf._internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ In order to fill this gap, the current metadata is used by mapping Spark's internal column to Koalas' index. See the method below: * `sdf` represents the internal Spark DataFrame * `data_columns` represents non-indexing columns * `index_columns` represents internal index columns * `columns` represents all columns * `index_names` represents the external index name * `index_map` is zipped pairs of `index_columns` and `index_names` * `spark_df` represents Spark DataFrame derived by the metadata * `pandas_df` represents pandas DataFrame derived by the metadata >>> internal = kdf._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.data_columns ['A', 'B', 'C', 'D', 'E'] >>> internal.index_columns ['__index_level_0__'] >>> internal.columns ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None] >>> internal.index_map [('__index_level_0__', None)] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.pandas_df A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 In case that index is set to one of the existing column as below: >>> kdf1 = kdf.set_index("A") >>> kdf1 # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 >>> kdf1._internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal = kdf1._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.data_columns ['B', 'C', 'D', 'E'] >>> internal.index_columns ['A'] >>> internal.columns ['A', 'B', 'C', 'D', 'E'] >>> internal.index_names [('A',)] >>> internal.index_map [('A', ('A',))] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 The `spark_df` will drop the index columns: >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+ | B| C| D| E| +---+---+---+---+ | 5| 9| 13| 17| | 6| 10| 14| 18| | 7| 11| 15| 19| | 8| 12| 16| 20| +---+---+---+---+ but if `drop=False`, the columns will still remain in `spark_df`: >>> kdf.set_index("A", drop=False)._internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ In case that index becomes a multi index as below: >>> kdf2 = kdf.set_index("A", append=True) >>> kdf2 # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 >>> kdf2._internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal = kdf2._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.data_columns ['B', 'C', 'D', 'E'] >>> internal.index_columns ['__index_level_0__', 'A'] >>> internal.columns ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None, ('A',)] >>> internal.index_map [('__index_level_0__', None), ('A', ('A',))] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 For multi-level columns, it also holds column_index >>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ... ('Y', 'C'), ('Y', 'D')]) >>> kdf3 = ks.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16], ... [17, 18, 19, 20]], columns = columns) >>> kdf3 # doctest: +NORMALIZE_WHITESPACE X Y A B C D 0 1 2 3 4 1 5 6 7 8 2 9 10 11 12 3 13 14 15 16 4 17 18 19 20 >>> internal = kdf3._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+------+------+------+------+-----------------+ |__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__| +-----------------+------+------+------+------+-----------------+ | 0| 1| 2| 3| 4|...| | 1| 5| 6| 7| 8|...| | 2| 9| 10| 11| 12|...| | 3| 13| 14| 15| 16|...| | 4| 17| 18| 19| 20|...| +-----------------+------+------+------+------+-----------------+ >>> internal.data_columns ['(X, A)', '(X, B)', '(Y, C)', '(Y, D)'] >>> internal.column_index [('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')] For series, it also holds scol to represent the column. >>> kseries = kdf1.B >>> kseries A 1 5 2 6 3 7 4 8 Name: B, dtype: int64 >>> internal = kseries._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.scol Column<b'B'> >>> internal.data_columns ['B'] >>> internal.index_columns ['A'] >>> internal.columns ['A', 'B'] >>> internal.index_names [('A',)] >>> internal.index_map [('A', ('A',))] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+ | A| B| +---+---+ | 1| 5| | 2| 6| | 3| 7| | 4| 8| +---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B A 1 5 2 6 3 7 4 8 """ def __init__(self, sdf: spark.DataFrame, index_map: Optional[List[IndexMap]], column_index: Optional[List[Tuple[str, ...]]] = None, column_scols: Optional[List[spark.Column]] = None, column_index_names: Optional[List[str]] = None, scol: Optional[spark.Column] = None) -> None: """ Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and index fields and names. :param sdf: Spark DataFrame to be managed. :param index_map: list of string pair Each pair holds the index field name which exists in Spark fields, and the index name. :param column_index: list of tuples with the same length The multi-level values in the tuples. :param column_scols: list of Spark Column Spark Columns to appear as columns. If scol is not None, this argument is ignored, otherwise if this is None, calculated from sdf. :param column_index_names: Names for each of the index levels. :param scol: Spark Column to be managed. See the examples below to refer what each parameter means. >>> column_index = pd.MultiIndex.from_tuples( ... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_index_a", "column_index_b"]) >>> row_index = pd.MultiIndex.from_tuples( ... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')], ... names=["row_index_a", "row_index_b"]) >>> kdf = ks.DataFrame( ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_index) >>> kdf.set_index(('a', 'x'), append=True, inplace=True) >>> kdf # doctest: +NORMALIZE_WHITESPACE column_index_a a b column_index_b y z row_index_a row_index_b (a, x) foo bar 1 2 3 4 5 6 zoo bar 7 8 9 >>> internal = kdf[('a', 'y')]._internal >>> internal._sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------+-----------+------+------+------+... |row_index_a|row_index_b|(a, x)|(a, y)|(b, z)|... +-----------+-----------+------+------+------+... | foo| bar| 1| 2| 3|... | foo| bar| 4| 5| 6|... | zoo| bar| 7| 8| 9|... +-----------+-----------+------+------+------+... >>> internal._index_map # doctest: +NORMALIZE_WHITESPACE [('row_index_a', ('row_index_a',)), ('row_index_b', ('row_index_b',)), ('(a, x)', ('a', 'x'))] >>> internal._column_index [('a', 'y')] >>> internal._column_scols [Column<b'(a, y)'>] >>> list(internal._column_index_names) ['column_index_a', 'column_index_b'] >>> internal._scol Column<b'(a, y)'> """ assert isinstance(sdf, spark.DataFrame) if index_map is None: # Here is when Koalas DataFrame is created directly from Spark DataFrame. assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in sdf.schema.names), \ "Index columns should not appear in columns of the Spark DataFrame. Avoid " \ "index colum names [%s]." % SPARK_INDEX_NAME_PATTERN # Create default index. index_map = [(SPARK_INDEX_NAME_FORMAT(0), None)] sdf = _InternalFrame.attach_default_index(sdf) if NATURAL_ORDER_COLUMN_NAME not in sdf.columns: sdf = sdf.withColumn(NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()) assert all(isinstance(index_field, str) and (index_name is None or (isinstance(index_name, tuple) and all(isinstance(name, str) for name in index_name))) for index_field, index_name in index_map), index_map assert scol is None or isinstance(scol, spark.Column) assert column_scols is None or all(isinstance(scol, spark.Column) for scol in column_scols) self._sdf = sdf # type: spark.DataFrame self._index_map = index_map # type: List[IndexMap] self._scol = scol # type: Optional[spark.Column] if scol is not None: self._column_scols = [scol] elif column_scols is None: index_columns = set(index_column for index_column, _ in self._index_map) self._column_scols = [scol_for(sdf, col) for col in sdf.columns if col not in index_columns and col not in HIDDEN_COLUMNS] else: self._column_scols = column_scols if scol is not None: assert column_index is not None and len(column_index) == 1, column_index assert all(idx is None or (isinstance(idx, tuple) and len(idx) > 0) for idx in column_index), column_index self._column_index = column_index elif column_index is None: self._column_index = [(sdf.select(scol).columns[0],) for scol in self._column_scols] else: assert len(column_index) == len(self._column_scols), \ (len(column_index), len(self._column_scols)) assert all(isinstance(i, tuple) for i in column_index), column_index assert len(set(len(i) for i in column_index)) <= 1, column_index self._column_index = column_index if column_index_names is not None and not is_list_like(column_index_names): raise ValueError('Column_index_names should be list-like or None for a MultiIndex') if isinstance(column_index_names, list): if all(name is None for name in column_index_names): self._column_index_names = None else: self._column_index_names = column_index_names else: self._column_index_names = column_index_names @staticmethod def attach_default_index(sdf, default_index_type=None): """ This method attaches a default index to Spark DataFrame. Spark does not have the index notion so corresponding column should be generated. There are several types of default index can be configured by `compute.default_index_type`. """ if default_index_type is None: default_index_type = get_option("compute.default_index_type") scols = [scol_for(sdf, column) for column in sdf.columns] if default_index_type == "sequence": sequential_index = F.row_number().over( Window.orderBy(F.monotonically_increasing_id())) - 1 return sdf.select(sequential_index.alias(SPARK_INDEX_NAME_FORMAT(0)), *scols) elif default_index_type == "distributed-sequence": # 1. Calculates counts per each partition ID. `counts` here is, for instance, # { # 1: 83, # 6: 83, # 3: 83, # ... # } sdf = sdf.withColumn("__spark_partition_id", F.spark_partition_id()) counts = map(lambda x: (x["key"], x["count"]), sdf.groupby(sdf['__spark_partition_id'].alias("key")).count().collect()) # 2. Calculates cumulative sum in an order of partition id. # Note that it does not matter if partition id guarantees its order or not. # We just need a one-by-one sequential id. # sort by partition key. sorted_counts = sorted(counts, key=lambda x: x[0]) # get cumulative sum in an order of partition key. cumulative_counts = [0] + list(accumulate(map(lambda count: count[1], sorted_counts))) # zip it with partition key. sums = dict(zip(map(lambda count: count[0], sorted_counts), cumulative_counts)) # 3. Attach offset for each partition. @pandas_udf(LongType(), PandasUDFType.SCALAR) def offset(id): current_partition_offset = sums[id.iloc[0]] return pd.Series(current_partition_offset).repeat(len(id)) sdf = sdf.withColumn('__offset__', offset('__spark_partition_id')) # 4. Calculate row_number in each partition. w = Window.partitionBy('__spark_partition_id').orderBy(F.monotonically_increasing_id()) row_number = F.row_number().over(w) sdf = sdf.withColumn('__row_number__', row_number) # 5. Calcuate the index. return sdf.select( F.expr('__offset__ + __row_number__ - 1').alias(SPARK_INDEX_NAME_FORMAT(0)), *scols) elif default_index_type == "distributed": return sdf.select( F.monotonically_increasing_id().alias(SPARK_INDEX_NAME_FORMAT(0)), *scols) else: raise ValueError("'compute.default_index_type' should be one of 'sequence'," " 'distributed-sequence' and 'distributed'") @lazy_property def _column_index_to_name(self) -> Dict[Tuple[str, ...], str]: return dict(zip(self.column_index, self.data_columns)) def column_name_for(self, column_name_or_index: Union[str, Tuple[str, ...]]) -> str: """ Return the actual Spark column name for the given column name or index. """ if column_name_or_index in self._column_index_to_name: return self._column_index_to_name[column_name_or_index] else: if not isinstance(column_name_or_index, str): raise KeyError(name_like_string(column_name_or_index)) return column_name_or_index @lazy_property def _column_index_to_scol(self) -> Dict[Tuple[str, ...], spark.Column]: return dict(zip(self.column_index, self.column_scols)) def scol_for(self, column_name_or_index: Union[str, Tuple[str, ...]]): """ Return Spark Column for the given column name or index. """ if column_name_or_index in self._column_index_to_scol: return self._column_index_to_scol[column_name_or_index] else: return scol_for(self._sdf, self.column_name_for(column_name_or_index)) def spark_type_for(self, column_name_or_index: Union[str, Tuple[str, ...]]) -> DataType: """ Return DataType for the given column name or index. """ return self._sdf.select(self.scol_for(column_name_or_index)).schema[0].dataType @property def sdf(self) -> spark.DataFrame: """ Return the managed Spark DataFrame. """ return self._sdf @lazy_property def data_columns(self) -> List[str]: """ Return the managed column field names. """ return self.sdf.select(self.column_scols).columns @property def column_scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed data columns. """ return self._column_scols @lazy_property def index_columns(self) -> List[str]: """ Return the managed index field names. """ return [index_column for index_column, _ in self._index_map] @lazy_property def index_scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed index columns. """ return [self.scol_for(column) for column in self.index_columns] @lazy_property def columns(self) -> List[str]: """ Return all the field names including index field names. """ index_columns = set(self.index_columns) return self.index_columns + [column for column in self.data_columns if column not in index_columns] @lazy_property def scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed columns including index columns. """ return [self.scol_for(column) for column in self.columns] @property def index_map(self) -> List[IndexMap]: """ Return the managed index information. """ assert len(self._index_map) > 0 return self._index_map @lazy_property def index_names(self) -> List[Optional[Tuple[str, ...]]]: """ Return the managed index names. """ return [index_name for _, index_name in self.index_map] @property def scol(self) -> Optional[spark.Column]: """ Return the managed Spark Column. """ return self._scol @property def column_index(self) -> List[Tuple[str, ...]]: """ Return the managed column index. """ return self._column_index @lazy_property def column_index_level(self) -> int: """ Return the level of the column index. """ return column_index_level(self._column_index) @property def column_index_names(self) -> Optional[List[str]]: """ Return names of the index levels. """ return self._column_index_names @lazy_property def spark_internal_df(self) -> spark.DataFrame: """ Return as Spark DataFrame. This contains index columns as well and should be only used for internal purposes. """ index_columns = set(self.index_columns) data_columns = [] for i, (column, idx) in enumerate(zip(self.data_columns, self.column_index)): if column not in index_columns: scol = self.scol_for(idx) name = str(i) if idx is None else name_like_string(idx) if column != name: scol = scol.alias(name) data_columns.append(scol) return self._sdf.select(self.index_scols + data_columns) @lazy_property def spark_df(self) -> spark.DataFrame: """ Return as Spark DataFrame. """ data_columns = [] for i, (column, idx) in enumerate(zip(self.data_columns, self.column_index)): scol = self.scol_for(idx) name = str(i) if idx is None else name_like_string(idx) if column != name: scol = scol.alias(name) data_columns.append(scol) return self._sdf.select(data_columns) @lazy_property def pandas_df(self): """ Return as pandas DataFrame. """ sdf = self.spark_internal_df pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: pdf = pdf.astype({field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}) index_columns = self.index_columns if len(index_columns) > 0: append = False for index_field in index_columns: drop = index_field not in self.data_columns pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[[col if col in index_columns else str(i) if idx is None else name_like_string(idx) for i, (col, idx) in enumerate(zip(self.data_columns, self.column_index))]] if self.column_index_level > 1: pdf.columns = pd.MultiIndex.from_tuples(self._column_index) else: pdf.columns = [None if idx is None else idx[0] for idx in self._column_index] if self._column_index_names is not None: pdf.columns.names = self._column_index_names index_names = self.index_names if len(index_names) > 0: pdf.index.names = [name if name is None or len(name) > 1 else name[0] for name in index_names] return pdf def with_new_columns(self, scols_or_ksers: List[Union[spark.Column, 'Series']], column_index: Optional[List[Tuple[str, ...]]] = None, keep_order: bool = True) -> '_InternalFrame': """ Copy the immutable DataFrame with the updates by the specified Spark Columns or Series. :param scols_or_ksers: the new Spark Columns or Series. :param column_index: the new column index. If None, the its column_index is used when the corresponding `scols_or_ksers` is Series, otherwise the original one is used. :return: the copied immutable DataFrame. """ from databricks.koalas.series import Series if column_index is None: if all(isinstance(scol_or_kser, Series) for scol_or_kser in scols_or_ksers): column_index = [kser._internal.column_index[0] for kser in scols_or_ksers] else: assert len(scols_or_ksers) == len(self.column_index), \ (len(scols_or_ksers), len(self.column_index)) column_index = [] for scol_or_kser, idx in zip(scols_or_ksers, self.column_index): if isinstance(scol_or_kser, Series): column_index.append(scol_or_kser._internal.column_index[0]) else: column_index.append(idx) else: assert len(scols_or_ksers) == len(column_index), \ (len(scols_or_ksers), len(column_index)) column_scols = [] for scol_or_kser, idx in zip(scols_or_ksers, column_index): if isinstance(scol_or_kser, Series): scol = scol_or_kser._internal.scol else: scol = scol_or_kser column_scols.append(scol.alias(name_like_string(idx))) # type: ignore hidden_columns = [] if keep_order: hidden_columns.append(NATURAL_ORDER_COLUMN_NAME) sdf = self._sdf.select(self.index_scols + column_scols + hidden_columns) return self.copy( sdf=sdf, column_index=column_index, column_scols=[scol_for(sdf, name_like_string(idx)) for idx in column_index], scol=None) def with_filter(self, pred: Union[spark.Column, 'Series']): """ Copy the immutable DataFrame with the updates by the predicate. :param pred: the predicate to filter. :return: the copied immutable DataFrame. """ from databricks.koalas.series import Series if isinstance(pred, Series): assert isinstance(pred.spark_type, BooleanType), pred.spark_type pred = pred._scol else: spark_type = self._sdf.select(pred).schema[0].dataType assert isinstance(spark_type, BooleanType), spark_type return self.copy(sdf=self._sdf.drop(NATURAL_ORDER_COLUMN_NAME).filter(pred)) def copy(self, sdf: Union[spark.DataFrame, _NoValueType] = _NoValue, index_map: Union[List[IndexMap], _NoValueType] = _NoValue, column_index: Union[List[Tuple[str, ...]], _NoValueType] = _NoValue, column_scols: Union[List[spark.Column], _NoValueType] = _NoValue, column_index_names: Optional[Union[List[str], _NoValueType]] = _NoValue, scol: Union[spark.Column, _NoValueType] = _NoValue) -> '_InternalFrame': """ Copy the immutable DataFrame. :param sdf: the new Spark DataFrame. If None, then the original one is used. :param index_map: the new index information. If None, then the original one is used. :param column_index: the new column index. :param column_scols: the new Spark Columns. If None, then the original ones are used. :param column_index_names: the new names of the index levels. :param scol: the new Spark Column. If None, then the original one is used. :return: the copied immutable DataFrame. """ if sdf is _NoValue: sdf = self._sdf if index_map is _NoValue: index_map = self._index_map if column_index is _NoValue: column_index = self._column_index if column_scols is _NoValue: column_scols = self._column_scols if column_index_names is _NoValue: column_index_names = self._column_index_names if scol is _NoValue: scol = self._scol return _InternalFrame(sdf, index_map=index_map, column_index=column_index, column_scols=column_scols, column_index_names=column_index_names, scol=scol) @staticmethod def from_pandas(pdf: pd.DataFrame) -> '_InternalFrame': """ Create an immutable DataFrame from pandas DataFrame. :param pdf: :class:`pd.DataFrame` :return: the created immutable DataFrame """ columns = pdf.columns data_columns = [name_like_string(col) for col in columns] if isinstance(columns, pd.MultiIndex): column_index = columns.tolist() else: column_index = None column_index_names = columns.names index = pdf.index index_map = [] # type: List[IndexMap] if isinstance(index, pd.MultiIndex): if index.names is None: index_map = [(SPARK_INDEX_NAME_FORMAT(i), None) for i in range(len(index.levels))] else: index_map = [ (SPARK_INDEX_NAME_FORMAT(i) if name is None else name_like_string(name), name if name is None or isinstance(name, tuple) else (name,)) for i, name in enumerate(index.names)] else: name = index.name index_map = [(name_like_string(name) if name is not None else SPARK_INDEX_NAME_FORMAT(0), name if name is None or isinstance(name, tuple) else (name,))] index_columns = [index_column for index_column, _ in index_map] reset_index = pdf.reset_index() reset_index.columns = index_columns + data_columns schema = StructType([StructField(name_like_string(name), infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) sdf = default_session().createDataFrame(reset_index, schema=schema) return _InternalFrame(sdf=sdf, index_map=index_map, column_index=column_index, column_scols=[scol_for(sdf, col) for col in data_columns], column_index_names=column_index_names)
1
13,991
I don't think we still need this check, but I'd leave it as an assertion for now.
databricks-koalas
py
@@ -203,4 +203,9 @@ class Cart < ActiveRecord::Base 0.0 end end + + # may be replaced with paper-trail or similar at some point + def version + self.updated_at.to_i + end end
1
require 'csv' class Cart < ActiveRecord::Base include PropMixin include ProposalDelegate has_one :approval_group has_many :user_roles, through: :approval_group has_many :api_tokens, through: :approvals has_many :comments, as: :commentable has_many :properties, as: :hasproperties #TODO: validates_uniqueness_of :name ORIGINS = %w(navigator ncr gsa18f) def rejections self.approvals.rejected end def awaiting_approvals self.approvals.pending end def awaiting_approvers self.approvers.merge(self.awaiting_approvals) end def ordered_approvals self.approvals.ordered end def ordered_awaiting_approvals self.ordered_approvals.pending end # users with outstanding cart_notification_emails def currently_awaiting_approvers if self.parallel? self.awaiting_approvers else # linear. Assumes the cart is open self.approvers.merge(self.ordered_awaiting_approvals).limit(1) end end def approved_approvals self.approvals.approved end def all_approvals_received? self.approvals.where.not(status: 'approved').empty? end def self.initialize_cart params cart = self.existing_or_new_cart params cart.initialize_approval_group params cart.setup_proposal(params) self.copy_existing_approvals_to(cart, name) cart end def self.existing_or_new_cart(params) name = params['cartName'].presence || params['cartNumber'].to_s pending_cart = Cart.pending.find_by(name: name) if pending_cart cart = reset_existing_cart(pending_cart) else #There is no existing cart or the existing cart is already approved cart = self.new(name: name, external_id: params['cartNumber']) end cart end def initialize_approval_group(params) if params['approvalGroup'] self.approval_group = ApprovalGroup.find_by_name(params['approvalGroup']) if self.approval_group.nil? raise ApprovalGroupError.new('Approval Group Not Found') end end end def determine_flow(params) params['flow'].presence || self.approval_group.try(:flow) || 'parallel' end def setup_proposal(params) flow = self.determine_flow(params) self.create_proposal!(flow: flow, status: 'pending') end def import_initial_comments(comments) self.comments.create!(user_id: self.proposal.requester_id, comment_text: comments.strip) end def create_approvals(emails) emails.each do |email| self.add_approver(email) end end def process_approvals_without_approval_group(params) if params['approvalGroup'].present? raise ApprovalGroupError.new('Approval Group already exists') end approver_emails = params['toAddress'].select(&:present?) self.create_approvals(approver_emails) requester_email = params['fromAddress'] if requester_email self.add_requester(requester_email) end end def create_approval_from_user_role(user_role) case user_role.role when 'approver' Approval.create!( position: user_role.position, proposal_id: self.proposal_id, user_id: user_role.user_id ) when 'observer' Observation.create!( proposal_id: self.proposal_id, user_id: user_role.user_id ) when 'requester' self.set_requester(user_role.user) else raise "Unknown role #{user_role.inspect}" end end def process_approvals_from_approval_group approval_group.user_roles.each do |user_role| self.create_approval_from_user_role(user_role) end end def copy_existing_approval(approval) new_approval = self.approvals.create!(user_id: approval.user_id, role: approval.role) Dispatcher.new.email_approver(new_approval) end def self.reset_existing_cart(cart) cart.approvals.map(&:destroy) cart.approval_group = nil cart end def self.copy_existing_approvals_to(new_cart, cart_name) previous_cart = Cart.where(name: cart_name).last if previous_cart && previous_cart.rejected? previous_cart.approvals.each do |approval| new_cart.copy_existing_approval(approval) end end end def gsa_advantage? self.client == 'gsa_advantage' end def parallel? self.flow == 'parallel' end def linear? self.flow == 'linear' end # Some fields aren't meant for the clients' eyes EXCLUDE_FIELDS_FROM_DISPLAY = ['origin', 'contractingVehicle', 'location', 'configType'] # The following methods are an interface which should be matched by client # models def fields_for_display self.properties_with_names.reject{ |key,value,label| EXCLUDE_FIELDS_FROM_DISPLAY.include? key}.map{ |key,value,label| [label, value] } end def client self.getProp('origin') || 'gsa_advantage' end # @todo - the method name (e.g. :external_id) should live on a "client" # model def public_identifier self.external_id end def total_price if self.client == 'gsa18f' self.getProp('cost_per_unit').to_f * self.getProp('quantity').to_f else 0.0 end end end
1
12,772
Since this may be the case, wondering if we should call the param `updated_at_i` or something so that we don't run into a problem distinguishing them down the road?
18F-C2
rb
@@ -215,6 +215,10 @@ namespace ScenarioMeasurement } TraceEventSession.Merge(files.ToArray(), traceFileName); + if (guiApp) + { + appExe = Path.Join(workingDir, appExe); + } string commandLine = $"\"{appExe}\""; if (!String.IsNullOrEmpty(appArgs)) {
1
using Microsoft.Diagnostics.Tracing.Parsers; using Microsoft.Diagnostics.Tracing.Session; using Reporting; using System; using System.Collections.Generic; using System.IO; using System.Linq; namespace ScenarioMeasurement { enum MetricType { TimeToMain, GenericStartup, ProcessTime, WPF } class Startup { /// <summary> /// /// </summary> /// <param name="appExe">Full path to test executable</param> /// <param name="metricType">Type of interval measurement</param> /// <param name="scenarioName">Scenario name for reporting</param> /// <param name="processWillExit">true: process exits on its own. False: process does not exit, send close.</param> /// <param name="timeout">Max wait for process to exit</param> /// <param name="measurementDelay">Allowed time for startup window</param> /// <param name="iterations">Number of measured iterations</param> /// <param name="appArgs">optional arguments to test executable</param> /// <param name="logFileName">optional log file. Default is appExe.startup.log</param> /// <param name="workingDir">optional working directory</param> /// <param name="warmup">enables/disables warmup iteration</param> /// <param name="traceFileName">trace file name</param> /// <param name="guiApp">true: app under test is a GUI app, false: console</param> /// <param name="skipProfileIteration">true: skip full results iteration</param> /// <param name="reportJsonPath">path to save report json</param> /// <param name="iterationSetup">command to set up before each iteration</param> /// <param name="setupArgs">arguments of iterationSetup</param> /// <param name="iterationCleanup">command to clean up after each iteration</param> /// <param name="cleanupArgs">arguments of iterationCleanup</param> /// <param name="traceDirectory">Directory to put files in (defaults to current directory)</param> /// <param name="environmentVariables">Environment variables set for test processes (example: var1=value1;var2=value2)</param> /// <returns></returns> static int Main(string appExe, MetricType metricType, string scenarioName, string traceFileName, bool processWillExit = false, int iterations = 5, string iterationSetup = "", string setupArgs = "", string iterationCleanup = "", string cleanupArgs = "", int timeout = 60, int measurementDelay = 15, string appArgs = "", string logFileName = "", string workingDir = "", bool warmup = true, bool guiApp = true, bool skipProfileIteration = false, string reportJsonPath = "", string traceDirectory = null, string environmentVariables = null ) { Logger logger = new Logger(String.IsNullOrEmpty(logFileName) ? $"{appExe}.startup.log" : logFileName); static void checkArg(string arg, string name) { if (String.IsNullOrEmpty(arg)) throw new ArgumentException(name); }; checkArg(scenarioName, nameof(scenarioName)); checkArg(appExe, nameof(appExe)); checkArg(traceFileName, nameof(traceFileName)); if (String.IsNullOrEmpty(traceDirectory)) { traceDirectory = Environment.CurrentDirectory; } else { if(!Directory.Exists(traceDirectory)) { Directory.CreateDirectory(traceDirectory); } } Dictionary<string, string> envVariables = null; if (!String.IsNullOrEmpty(environmentVariables)) { envVariables = ParseStringToDictionary(environmentVariables); } bool failed = false; logger.Log($"Running {appExe} (args: \"{appArgs}\")"); logger.Log($"Working Directory: {workingDir}"); var procHelper = new ProcessHelper(logger) { ProcessWillExit = processWillExit, Timeout = timeout, MeasurementDelay = measurementDelay, Executable = appExe, Arguments = appArgs, WorkingDirectory = workingDir, GuiApp = guiApp, EnvironmentVariables = envVariables }; // create iteration setup process helper logger.Log($"Iteration set up: {iterationSetup} (args: {setupArgs})"); ProcessHelper setupProcHelper = null; if (!String.IsNullOrEmpty(iterationSetup)) { setupProcHelper = CreateProcHelper(iterationSetup, setupArgs, logger); } // create iteration cleanup process helper logger.Log($"Iteration clean up: {iterationCleanup} (args: {cleanupArgs})"); ProcessHelper cleanupProcHelper = null; if (!String.IsNullOrEmpty(iterationCleanup)) { cleanupProcHelper = CreateProcHelper(iterationCleanup, cleanupArgs, logger); } Util.Init(); if (warmup) { logger.Log("=============== Warm up ================"); procHelper.Run(); } string kernelTraceFile = Path.ChangeExtension(traceFileName, "perflabkernel.etl"); string userTraceFile = Path.ChangeExtension(traceFileName, "perflabuser.etl"); traceFileName = Path.Join(traceDirectory, traceFileName); kernelTraceFile = Path.Join(traceDirectory, kernelTraceFile); userTraceFile = Path.Join(traceDirectory, userTraceFile); IParser parser = null; switch (metricType) { case MetricType.TimeToMain: parser = new TimeToMainParser(); break; case MetricType.GenericStartup: parser = new GenericStartupParser(); break; case MetricType.ProcessTime: parser = new ProcessTimeParser(); break; //case MetricType.WPF: // parser = new WPFParser(); // break; } var pids = new List<int>(); using (var kernel = new TraceEventSession(KernelTraceEventParser.KernelSessionName, kernelTraceFile)) { parser.EnableKernelProvider(kernel); using (var user = new TraceEventSession("StartupSession", userTraceFile)) { parser.EnableUserProviders(user); for (int i = 0; i < iterations; i++) { logger.Log($"=============== Iteration {i} ================ "); // set up iteration if (setupProcHelper != null) { var setupResult = setupProcHelper.Run().result; if (setupResult != ProcessHelper.Result.Success) { logger.Log($"Failed to set up. Result: {setupResult}"); failed = true; break; } } // run iteration var runResult = procHelper.Run(); if (runResult.result == ProcessHelper.Result.Success) { pids.Add(runResult.pid); } else { logger.Log($"Failed to run. Result: {runResult.result}"); failed = true; break; } // clean up iteration if (cleanupProcHelper != null) { var cleanupResult = cleanupProcHelper.Run().result; if (cleanupResult != ProcessHelper.Result.Success) { logger.Log($"Failed to clean up. Result: {cleanupResult}"); failed = true; break; } } } } } if (!failed) { logger.Log("Parsing.."); var files = new List<string> { kernelTraceFile }; if (File.Exists(userTraceFile)) { files.Add(userTraceFile); } TraceEventSession.Merge(files.ToArray(), traceFileName); string commandLine = $"\"{appExe}\""; if (!String.IsNullOrEmpty(appArgs)) { commandLine = commandLine + " " + appArgs; } var counters = parser.Parse(traceFileName, Path.GetFileNameWithoutExtension(appExe), pids, commandLine); WriteResultTable(counters, logger); var reporter = Reporter.CreateReporter(); if (reporter != null) { var test = new Test(); test.Categories.Add("Startup"); test.Name = scenarioName; test.AddCounter(counters); reporter.AddTest(test); if (!String.IsNullOrEmpty(reportJsonPath)) { File.WriteAllText(reportJsonPath, reporter.GetJson()); } } } File.Delete(kernelTraceFile); File.Delete(userTraceFile); if (!skipProfileIteration) { string profileTraceFileName = $"{Path.GetFileNameWithoutExtension(traceFileName)}_profile.etl"; string profileKernelTraceFile = Path.ChangeExtension(profileTraceFileName, ".kernel.etl"); string profileUserTraceFile = Path.ChangeExtension(profileTraceFileName, ".user.etl"); profileTraceFileName = Path.Join(traceDirectory, profileTraceFileName); profileKernelTraceFile = Path.Join(traceDirectory, profileKernelTraceFile); profileUserTraceFile = Path.Join(traceDirectory, profileUserTraceFile); logger.Log($"=============== Profile Iteration ================ "); ProfileParser profiler = new ProfileParser(parser); using (var kernel = new TraceEventSession(KernelTraceEventParser.KernelSessionName, profileKernelTraceFile)) { profiler.EnableKernelProvider(kernel); using (var user = new TraceEventSession("ProfileSession", profileUserTraceFile)) { profiler.EnableUserProviders(user); // setup iteration if (setupProcHelper != null) { var setupResult = setupProcHelper.Run().result; if (setupResult != ProcessHelper.Result.Success) { logger.Log($"Failed to set up. Result: {setupResult}"); failed = true; } } var result = procHelper.Run().result; if (result != ProcessHelper.Result.Success) { logger.Log($"Failed to run. Result: {result}"); failed = true; } // cleanup iteration if (cleanupProcHelper != null) { var cleanupResult = cleanupProcHelper.Run().result; if (cleanupResult != ProcessHelper.Result.Success) { logger.Log($"Failed to clean up. Result: {cleanupResult}"); failed = true; } } } } if (!failed) { logger.Log("Merging profile.."); TraceEventSession.Merge(new[] { profileKernelTraceFile, profileUserTraceFile }, profileTraceFileName); } File.Delete(profileKernelTraceFile); File.Delete(profileUserTraceFile); } return (failed ? -1 : 0); } private static ProcessHelper CreateProcHelper(string command, string args, Logger logger) { var procHelper = new ProcessHelper(logger) { ProcessWillExit = true, Executable = command, Arguments = args, Timeout = 300 }; return procHelper; } private static void WriteResultTable(IEnumerable<Counter> counters, Logger logger) { logger.Log($"{"Metric",-15}|{"Average",-15}|{"Min",-15}|{"Max",-15}"); logger.Log($"---------------|---------------|---------------|---------------"); foreach (var counter in counters) { string average = $"{counter.Results.Average():F3} {counter.MetricName}"; string max = $"{counter.Results.Max():F3} {counter.MetricName}"; string min = $"{counter.Results.Min():F3} {counter.MetricName}"; logger.Log($"{counter.Name,-15}|{average,-15}|{min,-15}|{max,-15}"); } } private static Dictionary<string, string> ParseStringToDictionary(string s) { var dict = new Dictionary<string, string>(); foreach (string substring in s.Split(';', StringSplitOptions.RemoveEmptyEntries)) { var pair = substring.Split('='); dict.Add(pair[0], pair[1]); } return dict; } } }
1
10,471
wondering why we need to join the paths here; seems evt.commandLine only takes whatever appExe is
dotnet-performance
.cs
@@ -329,8 +329,14 @@ public class ConfigCenterClient { public void refreshConfig(String configcenter, boolean wait) { CountDownLatch latch = new CountDownLatch(1); + String encodeServiceName = ""; + try { + encodeServiceName = URLEncoder.encode(StringUtils.deleteWhitespace(serviceName), "UTF-8"); + } catch (UnsupportedEncodingException e) { + LOGGER.error("encode error: {}",e.getMessage()); + } + String path = URIConst.ITEMS + "?dimensionsInfo=" + encodeServiceName; clientMgr.findThreadBindClientPool().runOnContext(client -> { - String path = URIConst.ITEMS + "?dimensionsInfo=" + StringUtils.deleteWhitespace(serviceName); IpPort ipPort = NetUtils.parseIpPortFromURI(configcenter); HttpClientRequest request = client.get(ipPort.getPort(), ipPort.getHostOrIp(), path, rsp -> { if (rsp.statusCode() == HttpResponseStatus.OK.code()) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.config.client; import java.io.IOException; import java.io.InputStream; import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; import java.net.URLEncoder; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.ServiceLoader; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.commons.lang.StringUtils; import org.apache.servicecomb.config.archaius.sources.ConfigCenterConfigurationSourceImpl; import org.apache.servicecomb.foundation.auth.AuthHeaderProvider; import org.apache.servicecomb.foundation.auth.SignRequest; import org.apache.servicecomb.foundation.common.event.EventManager; import org.apache.servicecomb.foundation.common.net.IpPort; import org.apache.servicecomb.foundation.common.net.NetUtils; import org.apache.servicecomb.foundation.common.utils.JsonUtils; import org.apache.servicecomb.foundation.ssl.SSLCustom; import org.apache.servicecomb.foundation.ssl.SSLOption; import org.apache.servicecomb.foundation.ssl.SSLOptionFactory; import org.apache.servicecomb.foundation.vertx.AddressResolverConfig; import org.apache.servicecomb.foundation.vertx.VertxTLSBuilder; import org.apache.servicecomb.foundation.vertx.VertxUtils; import org.apache.servicecomb.foundation.vertx.client.ClientPoolManager; import org.apache.servicecomb.foundation.vertx.client.ClientVerticle; import org.apache.servicecomb.foundation.vertx.client.http.HttpClientPoolFactory; import org.apache.servicecomb.foundation.vertx.client.http.HttpClientWithContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.core.type.TypeReference; import io.netty.handler.codec.http.HttpResponseStatus; import io.vertx.core.DeploymentOptions; import io.vertx.core.Vertx; import io.vertx.core.VertxOptions; import io.vertx.core.http.CaseInsensitiveHeaders; import io.vertx.core.http.HttpClientOptions; import io.vertx.core.http.HttpClientRequest; import io.vertx.core.http.WebSocket; import io.vertx.core.http.impl.FrameType; import io.vertx.core.http.impl.ws.WebSocketFrameImpl; import io.vertx.core.net.ProxyOptions; /** * Created by on 2016/5/17. */ public class ConfigCenterClient { private static final Logger LOGGER = LoggerFactory.getLogger(ConfigCenterClient.class); private static final ConfigCenterConfig CONFIG_CENTER_CONFIG = ConfigCenterConfig.INSTANCE; private static final String SSL_KEY = "cc.consumer"; private static final ScheduledExecutorService EXECUTOR = Executors.newScheduledThreadPool(1); private static final long HEARTBEAT_INTERVAL = 30000; private static final long BOOTUP_WAIT_TIME = 10; private ScheduledExecutorService heartbeatTask = null; private int refreshMode = CONFIG_CENTER_CONFIG.getRefreshMode(); private int refreshInterval = CONFIG_CENTER_CONFIG.getRefreshInterval(); private int firstRefreshInterval = CONFIG_CENTER_CONFIG.getFirstRefreshInterval(); private int refreshPort = CONFIG_CENTER_CONFIG.getRefreshPort(); private String tenantName = CONFIG_CENTER_CONFIG.getTenantName(); private String serviceName = CONFIG_CENTER_CONFIG.getServiceName(); private String environment = CONFIG_CENTER_CONFIG.getEnvironment(); private MemberDiscovery memberDiscovery = new MemberDiscovery(CONFIG_CENTER_CONFIG.getServerUri()); private ConfigCenterConfigurationSourceImpl.UpdateHandler updateHandler; private static ClientPoolManager<HttpClientWithContext> clientMgr; private boolean isWatching = false; private static final ServiceLoader<AuthHeaderProvider> authHeaderProviders = ServiceLoader.load(AuthHeaderProvider.class); public ConfigCenterClient(ConfigCenterConfigurationSourceImpl.UpdateHandler updateHandler) { this.updateHandler = updateHandler; } public void connectServer() { if (refreshMode != 0 && refreshMode != 1) { LOGGER.error("refreshMode must be 0 or 1."); return; } ParseConfigUtils parseConfigUtils = new ParseConfigUtils(updateHandler); try { deployConfigClient(); } catch (InterruptedException e) { throw new IllegalStateException(e); } refreshMembers(memberDiscovery); ConfigRefresh refreshTask = new ConfigRefresh(parseConfigUtils, memberDiscovery); refreshTask.run(true); EXECUTOR.scheduleWithFixedDelay(refreshTask, firstRefreshInterval, refreshInterval, TimeUnit.MILLISECONDS); } private void refreshMembers(MemberDiscovery memberDiscovery) { if (CONFIG_CENTER_CONFIG.getAutoDiscoveryEnabled()) { String configCenter = memberDiscovery.getConfigServer(); IpPort ipPort = NetUtils.parseIpPortFromURI(configCenter); clientMgr.findThreadBindClientPool().runOnContext(client -> { HttpClientRequest request = client.get(ipPort.getPort(), ipPort.getHostOrIp(), URIConst.MEMBERS, rsp -> { if (rsp.statusCode() == HttpResponseStatus.OK.code()) { rsp.bodyHandler(buf -> { memberDiscovery.refreshMembers(buf.toJsonObject()); }); } }); SignRequest signReq = createSignRequest(request.method().toString(), configCenter + URIConst.MEMBERS, new HashMap<>(), null); if (ConfigCenterConfig.INSTANCE.getToken() != null) { request.headers().add("X-Auth-Token", ConfigCenterConfig.INSTANCE.getToken()); } authHeaderProviders.forEach(provider -> request.headers().addAll(provider.getSignAuthHeaders(signReq))); request.exceptionHandler(e -> { LOGGER.error("Fetch member from {} failed. Error message is [{}].", configCenter, e.getMessage()); }); request.end(); }); } } private void deployConfigClient() throws InterruptedException { VertxOptions vertxOptions = new VertxOptions(); vertxOptions.setAddressResolverOptions(AddressResolverConfig.getAddressResover(SSL_KEY, ConfigCenterConfig.INSTANCE.getConcurrentCompositeConfiguration())); Vertx vertx = VertxUtils.getOrCreateVertxByName("config-center", vertxOptions); HttpClientOptions httpClientOptions = createHttpClientOptions(); clientMgr = new ClientPoolManager<>(vertx, new HttpClientPoolFactory(httpClientOptions)); DeploymentOptions deployOptions = VertxUtils.createClientDeployOptions(clientMgr, 1); VertxUtils.blockDeploy(vertx, ClientVerticle.class, deployOptions); } private HttpClientOptions createHttpClientOptions() { HttpClientOptions httpClientOptions = new HttpClientOptions(); if (ConfigCenterConfig.INSTANCE.isProxyEnable()) { ProxyOptions proxy = new ProxyOptions() .setHost(ConfigCenterConfig.INSTANCE.getProxyHost()) .setPort(ConfigCenterConfig.INSTANCE.getProxyPort()) .setUsername(ConfigCenterConfig.INSTANCE.getProxyUsername()) .setPassword(ConfigCenterConfig.INSTANCE.getProxyPasswd()); httpClientOptions.setProxyOptions(proxy); } httpClientOptions.setConnectTimeout(CONFIG_CENTER_CONFIG.getConnectionTimeout()); if (this.memberDiscovery.getConfigServer().toLowerCase().startsWith("https")) { LOGGER.debug("config center client performs requests over TLS"); SSLOptionFactory factory = SSLOptionFactory.createSSLOptionFactory(SSL_KEY, ConfigCenterConfig.INSTANCE.getConcurrentCompositeConfiguration()); SSLOption sslOption; if (factory == null) { sslOption = SSLOption.buildFromYaml(SSL_KEY, ConfigCenterConfig.INSTANCE.getConcurrentCompositeConfiguration()); } else { sslOption = factory.createSSLOption(); } SSLCustom sslCustom = SSLCustom.createSSLCustom(sslOption.getSslCustomClass()); VertxTLSBuilder.buildHttpClientOptions(sslOption, sslCustom, httpClientOptions); } return httpClientOptions; } class ConfigRefresh implements Runnable { private ParseConfigUtils parseConfigUtils; private MemberDiscovery memberdis; ConfigRefresh(ParseConfigUtils parseConfigUtils, MemberDiscovery memberdis) { this.parseConfigUtils = parseConfigUtils; this.memberdis = memberdis; } public void run(boolean wait) { // this will be single threaded, so we don't care about concurrent // staffs try { String configCenter = memberdis.getConfigServer(); if (refreshMode == 1) { refreshConfig(configCenter, wait); } else if (!isWatching) { // 重新监听时需要先加载,避免在断开期间丢失变更 refreshConfig(configCenter, wait); doWatch(configCenter); } } catch (Exception e) { LOGGER.error("client refresh thread exception", e); } } // 具体动作 @Override public void run() { run(false); } // create watch and wait for done public void doWatch(String configCenter) throws UnsupportedEncodingException, InterruptedException { CountDownLatch waiter = new CountDownLatch(1); IpPort ipPort = NetUtils.parseIpPortFromURI(configCenter); String url = URIConst.REFRESH_ITEMS + "?dimensionsInfo=" + StringUtils.deleteWhitespace(URLEncoder.encode(serviceName, "UTF-8")); Map<String, String> headers = new HashMap<>(); headers.put("x-domain-name", tenantName); if (ConfigCenterConfig.INSTANCE.getToken() != null) { headers.put("X-Auth-Token", ConfigCenterConfig.INSTANCE.getToken()); } headers.put("x-environment", environment); HttpClientWithContext vertxHttpClient = clientMgr.findThreadBindClientPool(); vertxHttpClient.runOnContext(client -> { Map<String, String> authHeaders = new HashMap<>(); authHeaderProviders.forEach(provider -> authHeaders.putAll(provider.getSignAuthHeaders( createSignRequest(null, configCenter + url, headers, null)))); client.websocket(refreshPort, ipPort.getHostOrIp(), url, new CaseInsensitiveHeaders().addAll(headers) .addAll(authHeaders), ws -> { ws.exceptionHandler(e -> { LOGGER.error("watch config read fail", e); stopHeartBeatThread(); isWatching = false; }); ws.closeHandler(v -> { LOGGER.warn("watching config connection is closed accidentally"); stopHeartBeatThread(); isWatching = false; }); ws.handler(action -> { LOGGER.info("watching config recieved {}", action); Map<String, Object> mAction = action.toJsonObject().getMap(); if ("CREATE".equals(mAction.get("action"))) { refreshConfig(configCenter, false); } else if ("MEMBER_CHANGE".equals(mAction.get("action"))) { refreshMembers(memberdis); } else { parseConfigUtils.refreshConfigItemsIncremental(mAction); } }); startHeartBeatThread(ws); isWatching = true; waiter.countDown(); }, e -> { LOGGER.error("watcher connect to config center {} refresh port {} failed. Error message is [{}]", configCenter, refreshPort, e.getMessage()); waiter.countDown(); }); }); waiter.await(); } private void startHeartBeatThread(WebSocket ws) { heartbeatTask = Executors.newScheduledThreadPool(1); heartbeatTask.scheduleWithFixedDelay(() -> sendHeartbeat(ws), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS); } private void stopHeartBeatThread() { if (heartbeatTask != null) { heartbeatTask.shutdownNow(); } } private void sendHeartbeat(WebSocket ws) { try { ws.writeFrame(new WebSocketFrameImpl(FrameType.PING)); EventManager.post(new ConnSuccEvent()); } catch (IllegalStateException e) { EventManager.post(new ConnFailEvent("heartbeat fail, " + e.getMessage())); LOGGER.error("heartbeat fail", e); } } public void refreshConfig(String configcenter, boolean wait) { CountDownLatch latch = new CountDownLatch(1); clientMgr.findThreadBindClientPool().runOnContext(client -> { String path = URIConst.ITEMS + "?dimensionsInfo=" + StringUtils.deleteWhitespace(serviceName); IpPort ipPort = NetUtils.parseIpPortFromURI(configcenter); HttpClientRequest request = client.get(ipPort.getPort(), ipPort.getHostOrIp(), path, rsp -> { if (rsp.statusCode() == HttpResponseStatus.OK.code()) { rsp.bodyHandler(buf -> { try { parseConfigUtils .refreshConfigItems(JsonUtils.OBJ_MAPPER.readValue(buf.toString(), new TypeReference<LinkedHashMap<String, Map<String, String>>>() { })); EventManager.post(new ConnSuccEvent()); } catch (IOException e) { EventManager.post(new ConnFailEvent("config refresh result parse fail " + e.getMessage())); LOGGER.error("Config refresh from {} failed. Error message is [{}].", configcenter, e.getMessage()); } latch.countDown(); }); } else { rsp.bodyHandler(buf -> { LOGGER.error("Server error message is [{}].", buf); latch.countDown(); }); EventManager.post(new ConnFailEvent("fetch config fail")); LOGGER.error("Config refresh from {} failed.", configcenter); } }); Map<String, String> headers = new HashMap<>(); headers.put("x-domain-name", tenantName); if (ConfigCenterConfig.INSTANCE.getToken() != null) { headers.put("X-Auth-Token", ConfigCenterConfig.INSTANCE.getToken()); } headers.put("x-environment", environment); request.headers().addAll(headers); authHeaderProviders.forEach(provider -> request.headers() .addAll(provider.getSignAuthHeaders(createSignRequest(request.method().toString(), configcenter + path, headers, null)))); request.exceptionHandler(e -> { EventManager.post(new ConnFailEvent("fetch config fail")); LOGGER.error("Config refresh from {} failed. Error message is [{}].", configcenter, e.getMessage()); latch.countDown(); }); request.end(); }); if (wait) { LOGGER.info("Refreshing remote config..."); try { latch.await(BOOTUP_WAIT_TIME, TimeUnit.SECONDS); } catch (InterruptedException e) { LOGGER.warn(e.getMessage()); } LOGGER.info("Refreshing remote config is done."); } } } public static SignRequest createSignRequest(String method, String endpoint, Map<String, String> headers, InputStream content) { SignRequest signReq = new SignRequest(); try { signReq.setEndpoint(new URI(endpoint)); } catch (URISyntaxException e) { LOGGER.warn("set uri failed, uri is {}, message: {}", endpoint, e.getMessage()); } Map<String, String[]> queryParams = new HashMap<>(); if (endpoint.contains("?")) { String parameters = endpoint.substring(endpoint.indexOf("?") + 1); if (null != parameters && !"".equals(parameters)) { String[] parameterarray = parameters.split("&"); for (String p : parameterarray) { String key = p.split("=")[0]; String value = p.split("=")[1]; if (!queryParams.containsKey(key)) { queryParams.put(key, new String[] {value}); } else { List<String> vals = new ArrayList<>(Arrays.asList(queryParams.get(key))); vals.add(value); queryParams.put(key, vals.toArray(new String[vals.size()])); } } } } signReq.setQueryParams(queryParams); signReq.setHeaders(headers); signReq.setHttpMethod(method); signReq.setContent(content); return signReq; } }
1
9,613
1.not format code 2.failed but still continue? 3."UTF-8" can changed to java.nio.charset.StandardCharsets.UTF_8.name()
apache-servicecomb-java-chassis
java
@@ -74,6 +74,7 @@ from typing import Any, Callable, Iterator, List, Optional, Pattern, Tuple import astroid import astroid.exceptions from astroid import bases, nodes +from astroid.brain import brain_dataclasses from pylint.checkers import BaseChecker, utils from pylint.checkers.utils import (
1
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2009 James Lingard <[email protected]> # Copyright (c) 2012-2014 Google, Inc. # Copyright (c) 2014-2020 Claudiu Popa <[email protected]> # Copyright (c) 2014 David Shea <[email protected]> # Copyright (c) 2014 Steven Myint <[email protected]> # Copyright (c) 2014 Holger Peters <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Anentropic <[email protected]> # Copyright (c) 2015 Dmitry Pribysh <[email protected]> # Copyright (c) 2015 Rene Zhang <[email protected]> # Copyright (c) 2015 Radu Ciorba <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016, 2019 Ashley Whetter <[email protected]> # Copyright (c) 2016 Alexander Todorov <[email protected]> # Copyright (c) 2016 Jürgen Hermann <[email protected]> # Copyright (c) 2016 Jakub Wilk <[email protected]> # Copyright (c) 2016 Filipe Brandenburger <[email protected]> # Copyright (c) 2017, 2021 Ville Skyttä <[email protected]> # Copyright (c) 2017-2018, 2020 hippo91 <[email protected]> # Copyright (c) 2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2017 Derek Gustafson <[email protected]> # Copyright (c) 2018-2019, 2021 Nick Drozd <[email protected]> # Copyright (c) 2018 Pablo Galindo <[email protected]> # Copyright (c) 2018 Jim Robertson <[email protected]> # Copyright (c) 2018 Lucas Cimon <[email protected]> # Copyright (c) 2018 Mike Frysinger <[email protected]> # Copyright (c) 2018 Ben Green <[email protected]> # Copyright (c) 2018 Konstantin <[email protected]> # Copyright (c) 2018 Justin Li <[email protected]> # Copyright (c) 2018 Bryce Guinta <[email protected]> # Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]> # Copyright (c) 2019 Andy Palmer <[email protected]> # Copyright (c) 2019 mattlbeck <[email protected]> # Copyright (c) 2019 Martin Vielsmaier <[email protected]> # Copyright (c) 2019 Santiago Castro <[email protected]> # Copyright (c) 2019 yory8 <[email protected]> # Copyright (c) 2019 Federico Bond <[email protected]> # Copyright (c) 2019 Pascal Corpet <[email protected]> # Copyright (c) 2020 Peter Kolbus <[email protected]> # Copyright (c) 2020 Julien Palard <[email protected]> # Copyright (c) 2020 Ram Rachum <[email protected]> # Copyright (c) 2020 Anthony Sottile <[email protected]> # Copyright (c) 2020 Anubhav <[email protected]> # Copyright (c) 2021 Marc Mueller <[email protected]> # Copyright (c) 2021 Tushar Sadhwani <[email protected]> # Copyright (c) 2021 Daniël van Noord <[email protected]> # Copyright (c) 2021 David Liu <[email protected]> # Copyright (c) 2021 doranid <[email protected]> # Copyright (c) 2021 Yu Shao, Pang <[email protected]> # Copyright (c) 2021 Andrew Haigh <[email protected]> # Copyright (c) 2021 Jens H. Nielsen <[email protected]> # Copyright (c) 2021 Ikraduya Edian <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE """try to find more bugs in the code using astroid inference capabilities """ import fnmatch import heapq import itertools import operator import re import shlex import sys import types from collections import deque from collections.abc import Sequence from functools import singledispatch from typing import Any, Callable, Iterator, List, Optional, Pattern, Tuple import astroid import astroid.exceptions from astroid import bases, nodes from pylint.checkers import BaseChecker, utils from pylint.checkers.utils import ( check_messages, decorated_with, decorated_with_property, has_known_bases, is_builtin_object, is_classdef_type, is_comprehension, is_inside_abstract_class, is_iterable, is_mapping, is_overload_stub, is_postponed_evaluation_enabled, is_super, node_ignores_exception, safe_infer, supports_delitem, supports_getitem, supports_membership_test, supports_setitem, ) from pylint.interfaces import INFERENCE, IAstroidChecker from pylint.utils import get_global_option STR_FORMAT = {"builtins.str.format"} ASYNCIO_COROUTINE = "asyncio.coroutines.coroutine" BUILTIN_TUPLE = "builtins.tuple" TYPE_ANNOTATION_NODES_TYPES = ( nodes.AnnAssign, nodes.Arguments, nodes.FunctionDef, ) def _unflatten(iterable): for index, elem in enumerate(iterable): if isinstance(elem, Sequence) and not isinstance(elem, str): yield from _unflatten(elem) elif elem and not index: # We're interested only in the first element. yield elem def _flatten_container(iterable): # Flatten nested containers into a single iterable for item in iterable: if isinstance(item, (list, tuple, types.GeneratorType)): yield from _flatten_container(item) else: yield item def _is_owner_ignored(owner, attrname, ignored_classes, ignored_modules): """Check if the given owner should be ignored This will verify if the owner's module is in *ignored_modules* or the owner's module fully qualified name is in *ignored_modules* or if the *ignored_modules* contains a pattern which catches the fully qualified name of the module. Also, similar checks are done for the owner itself, if its name matches any name from the *ignored_classes* or if its qualified name can be found in *ignored_classes*. """ ignored_modules = set(ignored_modules) module_name = owner.root().name module_qname = owner.root().qname() for ignore in ignored_modules: # Try to match the module name / fully qualified name directly if module_qname in ignored_modules or module_name in ignored_modules: return True # Try to see if the ignores pattern match against the module name. if fnmatch.fnmatch(module_qname, ignore): return True # Otherwise we might have a root module name being ignored, # and the qualified owner has more levels of depth. parts = deque(module_name.split(".")) current_module = "" while parts: part = parts.popleft() if not current_module: current_module = part else: current_module += f".{part}" if current_module in ignored_modules: return True # Match against ignored classes. ignored_classes = set(ignored_classes) qname = owner.qname() if hasattr(owner, "qname") else "" return any(ignore in (attrname, qname) for ignore in ignored_classes) @singledispatch def _node_names(node): if not hasattr(node, "locals"): return [] return node.locals.keys() @_node_names.register(nodes.ClassDef) @_node_names.register(astroid.Instance) def _(node): values = itertools.chain(node.instance_attrs.keys(), node.locals.keys()) try: mro = node.mro()[1:] except (NotImplementedError, TypeError, astroid.MroError): mro = node.ancestors() other_values = [value for cls in mro for value in _node_names(cls)] return itertools.chain(values, other_values) def _string_distance(seq1, seq2): seq2_length = len(seq2) row = list(range(1, seq2_length + 1)) + [0] for seq1_index, seq1_char in enumerate(seq1): last_row = row row = [0] * seq2_length + [seq1_index + 1] for seq2_index, seq2_char in enumerate(seq2): row[seq2_index] = min( last_row[seq2_index] + 1, row[seq2_index - 1] + 1, last_row[seq2_index - 1] + (seq1_char != seq2_char), ) return row[seq2_length - 1] def _similar_names(owner, attrname, distance_threshold, max_choices): """Given an owner and a name, try to find similar names The similar names are searched given a distance metric and only a given number of choices will be returned. """ possible_names = [] names = _node_names(owner) for name in names: if name == attrname: continue distance = _string_distance(attrname, name) if distance <= distance_threshold: possible_names.append((name, distance)) # Now get back the values with a minimum, up to the given # limit or choices. picked = [ name for (name, _) in heapq.nsmallest( max_choices, possible_names, key=operator.itemgetter(1) ) ] return sorted(picked) def _missing_member_hint(owner, attrname, distance_threshold, max_choices): names = _similar_names(owner, attrname, distance_threshold, max_choices) if not names: # No similar name. return "" names = [repr(name) for name in names] if len(names) == 1: names = ", ".join(names) else: names = f"one of {', '.join(names[:-1])} or {names[-1]}" return f"; maybe {names}?" MSGS = { "E1101": ( "%s %r has no %r member%s", "no-member", "Used when a variable is accessed for an unexistent member.", {"old_names": [("E1103", "maybe-no-member")]}, ), "I1101": ( "%s %r has no %r member%s, but source is unavailable. Consider " "adding this module to extension-pkg-allow-list if you want " "to perform analysis based on run-time introspection of living objects.", "c-extension-no-member", "Used when a variable is accessed for non-existent member of C " "extension. Due to unavailability of source static analysis is impossible, " "but it may be performed by introspecting living objects in run-time.", ), "E1102": ( "%s is not callable", "not-callable", "Used when an object being called has been inferred to a non " "callable object.", ), "E1111": ( "Assigning result of a function call, where the function has no return", "assignment-from-no-return", "Used when an assignment is done on a function call but the " "inferred function doesn't return anything.", ), "E1120": ( "No value for argument %s in %s call", "no-value-for-parameter", "Used when a function call passes too few arguments.", ), "E1121": ( "Too many positional arguments for %s call", "too-many-function-args", "Used when a function call passes too many positional arguments.", ), "E1123": ( "Unexpected keyword argument %r in %s call", "unexpected-keyword-arg", "Used when a function call passes a keyword argument that " "doesn't correspond to one of the function's parameter names.", ), "E1124": ( "Argument %r passed by position and keyword in %s call", "redundant-keyword-arg", "Used when a function call would result in assigning multiple " "values to a function parameter, one value from a positional " "argument and one from a keyword argument.", ), "E1125": ( "Missing mandatory keyword argument %r in %s call", "missing-kwoa", ( "Used when a function call does not pass a mandatory" " keyword-only argument." ), ), "E1126": ( "Sequence index is not an int, slice, or instance with __index__", "invalid-sequence-index", "Used when a sequence type is indexed with an invalid type. " "Valid types are ints, slices, and objects with an __index__ " "method.", ), "E1127": ( "Slice index is not an int, None, or instance with __index__", "invalid-slice-index", "Used when a slice index is not an integer, None, or an object " "with an __index__ method.", ), "E1128": ( "Assigning result of a function call, where the function returns None", "assignment-from-none", "Used when an assignment is done on a function call but the " "inferred function returns nothing but None.", {"old_names": [("W1111", "old-assignment-from-none")]}, ), "E1129": ( "Context manager '%s' doesn't implement __enter__ and __exit__.", "not-context-manager", "Used when an instance in a with statement doesn't implement " "the context manager protocol(__enter__/__exit__).", ), "E1130": ( "%s", "invalid-unary-operand-type", "Emitted when a unary operand is used on an object which does not " "support this type of operation.", ), "E1131": ( "%s", "unsupported-binary-operation", "Emitted when a binary arithmetic operation between two " "operands is not supported.", ), "E1132": ( "Got multiple values for keyword argument %r in function call", "repeated-keyword", "Emitted when a function call got multiple values for a keyword.", ), "E1135": ( "Value '%s' doesn't support membership test", "unsupported-membership-test", "Emitted when an instance in membership test expression doesn't " "implement membership protocol (__contains__/__iter__/__getitem__).", ), "E1136": ( "Value '%s' is unsubscriptable", "unsubscriptable-object", "Emitted when a subscripted value doesn't support subscription " "(i.e. doesn't define __getitem__ method or __class_getitem__ for a class).", ), "E1137": ( "%r does not support item assignment", "unsupported-assignment-operation", "Emitted when an object does not support item assignment " "(i.e. doesn't define __setitem__ method).", ), "E1138": ( "%r does not support item deletion", "unsupported-delete-operation", "Emitted when an object does not support item deletion " "(i.e. doesn't define __delitem__ method).", ), "E1139": ( "Invalid metaclass %r used", "invalid-metaclass", "Emitted whenever we can detect that a class is using, " "as a metaclass, something which might be invalid for using as " "a metaclass.", ), "E1140": ( "Dict key is unhashable", "unhashable-dict-key", "Emitted when a dict key is not hashable " "(i.e. doesn't define __hash__ method).", ), "E1141": ( "Unpacking a dictionary in iteration without calling .items()", "dict-iter-missing-items", "Emitted when trying to iterate through a dict without calling .items()", ), "E1142": ( "'await' should be used within an async function", "await-outside-async", "Emitted when await is used outside an async function.", ), "W1113": ( "Keyword argument before variable positional arguments list " "in the definition of %s function", "keyword-arg-before-vararg", "When defining a keyword argument before variable positional arguments, one can " "end up in having multiple values passed for the aforementioned parameter in " "case the method is called with keyword arguments.", ), "W1114": ( "Positional arguments appear to be out of order", "arguments-out-of-order", "Emitted when the caller's argument names fully match the parameter " "names in the function signature but do not have the same order.", ), "W1115": ( "Non-string value assigned to __name__", "non-str-assignment-to-dunder-name", "Emitted when a non-string value is assigned to __name__", ), "W1116": ( "Second argument of isinstance is not a type", "isinstance-second-argument-not-valid-type", "Emitted when the second argument of an isinstance call is not a type.", ), } # builtin sequence types in Python 2 and 3. SEQUENCE_TYPES = { "str", "unicode", "list", "tuple", "bytearray", "xrange", "range", "bytes", "memoryview", } def _emit_no_member( node, owner, owner_name, mixin_class_rgx: Pattern[str], ignored_mixins=True, ignored_none=True, ): """Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except. * The node is guarded behind and `IF` or `IFExp` node """ # pylint: disable=too-many-return-statements if node_ignores_exception(node, AttributeError): return False if ignored_none and isinstance(owner, nodes.Const) and owner.value is None: return False if is_super(owner) or getattr(owner, "type", None) == "metaclass": return False if owner_name and ignored_mixins and mixin_class_rgx.match(owner_name): return False if isinstance(owner, nodes.FunctionDef) and ( owner.decorators or owner.is_abstract() ): return False if isinstance(owner, (astroid.Instance, nodes.ClassDef)): if owner.has_dynamic_getattr(): # Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not # invoked at this point. try: metaclass = owner.metaclass() except astroid.MroError: return False if metaclass: # Renamed in Python 3.10 to `EnumType` return metaclass.qname() in {"enum.EnumMeta", "enum.EnumType"} return False if not has_known_bases(owner): return False # Exclude typed annotations, since these might actually exist # at some point during the runtime of the program. if utils.is_attribute_typed_annotation(owner, node.attrname): return False if isinstance(owner, astroid.objects.Super): # Verify if we are dealing with an invalid Super object. # If it is invalid, then there's no point in checking that # it has the required attribute. Also, don't fail if the # MRO is invalid. try: owner.super_mro() except (astroid.MroError, astroid.SuperError): return False if not all(has_known_bases(base) for base in owner.type.mro()): return False if isinstance(owner, nodes.Module): try: owner.getattr("__getattr__") return False except astroid.NotFoundError: pass if owner_name and node.attrname.startswith("_" + owner_name): # Test if an attribute has been mangled ('private' attribute) unmangled_name = node.attrname.split("_" + owner_name)[-1] try: if owner.getattr(unmangled_name, context=None) is not None: return False except astroid.NotFoundError: return True if ( owner.parent and isinstance(owner.parent, nodes.ClassDef) and owner.parent.name == "EnumMeta" and owner_name == "__members__" and node.attrname in {"items", "values", "keys"} ): # Avoid false positive on Enum.__members__.{items(), values, keys} # See https://github.com/PyCQA/pylint/issues/4123 return False # Don't emit no-member if guarded behind `IF` or `IFExp` # * Walk up recursively until if statement is found. # * Check if condition can be inferred as `Const`, # would evaluate as `False`, # and wheater the node is part of the `body`. # * Continue checking until scope of node is reached. scope: nodes.NodeNG = node.scope() node_origin: nodes.NodeNG = node parent: nodes.NodeNG = node.parent while parent != scope: if isinstance(parent, (nodes.If, nodes.IfExp)): inferred = safe_infer(parent.test) if ( # pylint: disable=too-many-boolean-expressions isinstance(inferred, nodes.Const) and inferred.bool_value() is False and ( isinstance(parent, nodes.If) and node_origin in parent.body or isinstance(parent, nodes.IfExp) and node_origin == parent.body ) ): return False node_origin, parent = parent, parent.parent return True def _determine_callable(callable_obj): # Ordering is important, since BoundMethod is a subclass of UnboundMethod, # and Function inherits Lambda. parameters = 0 if hasattr(callable_obj, "implicit_parameters"): parameters = callable_obj.implicit_parameters() if isinstance(callable_obj, astroid.BoundMethod): # Bound methods have an extra implicit 'self' argument. return callable_obj, parameters, callable_obj.type if isinstance(callable_obj, astroid.UnboundMethod): return callable_obj, parameters, "unbound method" if isinstance(callable_obj, nodes.FunctionDef): return callable_obj, parameters, callable_obj.type if isinstance(callable_obj, nodes.Lambda): return callable_obj, parameters, "lambda" if isinstance(callable_obj, nodes.ClassDef): # Class instantiation, lookup __new__ instead. # If we only find object.__new__, we can safely check __init__ # instead. If __new__ belongs to builtins, then we look # again for __init__ in the locals, since we won't have # argument information for the builtin __new__ function. try: # Use the last definition of __new__. new = callable_obj.local_attr("__new__")[-1] except astroid.NotFoundError: new = None from_object = new and new.parent.scope().name == "object" from_builtins = new and new.root().name in sys.builtin_module_names if not new or from_object or from_builtins: try: # Use the last definition of __init__. callable_obj = callable_obj.local_attr("__init__")[-1] except astroid.NotFoundError as e: # do nothing, covered by no-init. raise ValueError from e else: callable_obj = new if not isinstance(callable_obj, nodes.FunctionDef): raise ValueError # both have an extra implicit 'cls'/'self' argument. return callable_obj, parameters, "constructor" raise ValueError def _has_parent_of_type(node, node_type, statement): """Check if the given node has a parent of the given type.""" parent = node.parent while not isinstance(parent, node_type) and statement.parent_of(parent): parent = parent.parent return isinstance(parent, node_type) def _no_context_variadic_keywords(node, scope): statement = node.statement(future=True) variadics = () if isinstance(scope, nodes.Lambda) and not isinstance(scope, nodes.FunctionDef): variadics = list(node.keywords or []) + node.kwargs elif isinstance(statement, (nodes.Return, nodes.Expr, nodes.Assign)) and isinstance( statement.value, nodes.Call ): call = statement.value variadics = list(call.keywords or []) + call.kwargs return _no_context_variadic(node, scope.args.kwarg, nodes.Keyword, variadics) def _no_context_variadic_positional(node, scope): variadics = node.starargs + node.kwargs return _no_context_variadic(node, scope.args.vararg, nodes.Starred, variadics) def _no_context_variadic(node, variadic_name, variadic_type, variadics): """Verify if the given call node has variadic nodes without context This is a workaround for handling cases of nested call functions which don't have the specific call context at hand. Variadic arguments (variable positional arguments and variable keyword arguments) are inferred, inherently wrong, by astroid as a Tuple, respectively a Dict with empty elements. This can lead pylint to believe that a function call receives too few arguments. """ scope = node.scope() is_in_lambda_scope = not isinstance(scope, nodes.FunctionDef) and isinstance( scope, nodes.Lambda ) statement = node.statement(future=True) for name in statement.nodes_of_class(nodes.Name): if name.name != variadic_name: continue inferred = safe_infer(name) if isinstance(inferred, (nodes.List, nodes.Tuple)): length = len(inferred.elts) elif isinstance(inferred, nodes.Dict): length = len(inferred.items) else: continue if is_in_lambda_scope and isinstance(inferred.parent, nodes.Arguments): # The statement of the variadic will be the assignment itself, # so we need to go the lambda instead inferred_statement = inferred.parent.parent else: inferred_statement = inferred.statement(future=True) if not length and isinstance(inferred_statement, nodes.Lambda): is_in_starred_context = _has_parent_of_type(node, variadic_type, statement) used_as_starred_argument = any( variadic.value == name or variadic.value.parent_of(name) for variadic in variadics ) if is_in_starred_context or used_as_starred_argument: return True return False def _is_invalid_metaclass(metaclass): try: mro = metaclass.mro() except NotImplementedError: # Cannot have a metaclass which is not a newstyle class. return True else: if not any(is_builtin_object(cls) and cls.name == "type" for cls in mro): return True return False def _infer_from_metaclass_constructor(cls, func: nodes.FunctionDef): """Try to infer what the given *func* constructor is building :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef """ context = astroid.context.InferenceContext() class_bases = nodes.List() class_bases.postinit(elts=cls.bases) attrs = nodes.Dict() local_names = [(name, values[-1]) for name, values in cls.locals.items()] attrs.postinit(local_names) builder_args = nodes.Tuple() builder_args.postinit([cls.name, class_bases, attrs]) context.callcontext = astroid.context.CallContext(builder_args) try: inferred = next(func.infer_call_result(func, context), None) except astroid.InferenceError: return None return inferred or None def _is_c_extension(module_node): return ( not astroid.modutils.is_standard_module(module_node.name) and not module_node.fully_defined() ) def _is_invalid_isinstance_type(arg): # Return True if we are sure that arg is not a type inferred = utils.safe_infer(arg) if not inferred: # Cannot infer it so skip it. return False if isinstance(inferred, nodes.Tuple): return any(_is_invalid_isinstance_type(elt) for elt in inferred.elts) if isinstance(inferred, nodes.ClassDef): return False if isinstance(inferred, astroid.Instance) and inferred.qname() == BUILTIN_TUPLE: return False return True class TypeChecker(BaseChecker): """try to find bugs in the code using type inference""" __implements__ = (IAstroidChecker,) # configuration section name name = "typecheck" # messages msgs = MSGS priority = -1 # configuration options options = ( ( "ignore-on-opaque-inference", { "default": True, "type": "yn", "metavar": "<y or n>", "help": "This flag controls whether pylint should warn about " "no-member and similar checks whenever an opaque object " "is returned when inferring. The inference can return " "multiple potential results while evaluating a Python object, " "but some branches might not be evaluated, which results in " "partial inference. In that case, it might be useful to still emit " "no-member and other checks for the rest of the inferred objects.", }, ), ( "mixin-class-rgx", { "default": ".*[Mm]ixin", "type": "regexp", "metavar": "<regexp>", "help": "Regex pattern to define which classes are considered mixins " "ignore-mixin-members is set to 'yes'", }, ), ( "ignore-mixin-members", { "default": True, "type": "yn", "metavar": "<y or n>", "help": "Tells whether missing members accessed in mixin " "class should be ignored. A class is considered mixin if its name matches " "the mixin-class-rgx option.", }, ), ( "ignore-none", { "default": True, "type": "yn", "metavar": "<y or n>", "help": "Tells whether to warn about missing members when the owner " "of the attribute is inferred to be None.", }, ), ( "ignored-modules", { "default": (), "type": "csv", "metavar": "<module names>", "help": "List of module names for which member attributes " "should not be checked (useful for modules/projects " "where namespaces are manipulated during runtime and " "thus existing member attributes cannot be " "deduced by static analysis). It supports qualified " "module names, as well as Unix pattern matching.", }, ), # the defaults here are *stdlib* names that (almost) always # lead to false positives, since their idiomatic use is # 'too dynamic' for pylint to grok. ( "ignored-classes", { "default": ("optparse.Values", "thread._local", "_thread._local"), "type": "csv", "metavar": "<members names>", "help": "List of class names for which member attributes " "should not be checked (useful for classes with " "dynamically set attributes). This supports " "the use of qualified names.", }, ), ( "generated-members", { "default": (), "type": "string", "metavar": "<members names>", "help": "List of members which are set dynamically and \ missed by pylint inference system, and so shouldn't trigger E1101 when \ accessed. Python regular expressions are accepted.", }, ), ( "contextmanager-decorators", { "default": ["contextlib.contextmanager"], "type": "csv", "metavar": "<decorator names>", "help": "List of decorators that produce context managers, " "such as contextlib.contextmanager. Add to this list " "to register other decorators that produce valid " "context managers.", }, ), ( "missing-member-hint-distance", { "default": 1, "type": "int", "metavar": "<member hint edit distance>", "help": "The minimum edit distance a name should have in order " "to be considered a similar match for a missing member name.", }, ), ( "missing-member-max-choices", { "default": 1, "type": "int", "metavar": "<member hint max choices>", "help": "The total number of similar names that should be taken in " "consideration when showing a hint for a missing member.", }, ), ( "missing-member-hint", { "default": True, "type": "yn", "metavar": "<missing member hint>", "help": "Show a hint with possible names when a member name was not " "found. The aspect of finding the hint is based on edit distance.", }, ), ( "signature-mutators", { "default": [], "type": "csv", "metavar": "<decorator names>", "help": "List of decorators that change the signature of " "a decorated function.", }, ), ) def open(self) -> None: py_version = get_global_option(self, "py-version") self._py310_plus = py_version >= (3, 10) self._mixin_class_rgx = get_global_option(self, "mixin-class-rgx") @astroid.decorators.cachedproperty def _suggestion_mode(self): return get_global_option(self, "suggestion-mode", default=True) @astroid.decorators.cachedproperty def _compiled_generated_members(self) -> Tuple[Pattern, ...]: # do this lazily since config not fully initialized in __init__ # generated_members may contain regular expressions # (surrounded by quote `"` and followed by a comma `,`) # REQUEST,aq_parent,"[a-zA-Z]+_set{1,2}"' => # ('REQUEST', 'aq_parent', '[a-zA-Z]+_set{1,2}') generated_members = self.config.generated_members if isinstance(generated_members, str): gen = shlex.shlex(generated_members) gen.whitespace += "," gen.wordchars += r"[]-+\.*?()|" generated_members = tuple(tok.strip('"') for tok in gen) return tuple(re.compile(exp) for exp in generated_members) @check_messages("keyword-arg-before-vararg") def visit_functiondef(self, node: nodes.FunctionDef) -> None: # check for keyword arg before varargs if node.args.vararg and node.args.defaults: self.add_message("keyword-arg-before-vararg", node=node, args=(node.name)) visit_asyncfunctiondef = visit_functiondef @check_messages("invalid-metaclass") def visit_classdef(self, node: nodes.ClassDef) -> None: def _metaclass_name(metaclass): # pylint: disable=unidiomatic-typecheck if isinstance(metaclass, (nodes.ClassDef, nodes.FunctionDef)): return metaclass.name if type(metaclass) is bases.Instance: # Really do mean type, not isinstance, since subclasses of bases.Instance # like Const or Dict should use metaclass.as_string below. return str(metaclass) return metaclass.as_string() metaclass = node.declared_metaclass() if not metaclass: return if isinstance(metaclass, nodes.FunctionDef): # Try to infer the result. metaclass = _infer_from_metaclass_constructor(node, metaclass) if not metaclass: # Don't do anything if we cannot infer the result. return if isinstance(metaclass, nodes.ClassDef): if _is_invalid_metaclass(metaclass): self.add_message( "invalid-metaclass", node=node, args=(_metaclass_name(metaclass),) ) else: self.add_message( "invalid-metaclass", node=node, args=(_metaclass_name(metaclass),) ) def visit_assignattr(self, node: nodes.AssignAttr) -> None: if isinstance(node.assign_type(), nodes.AugAssign): self.visit_attribute(node) def visit_delattr(self, node: nodes.DelAttr) -> None: self.visit_attribute(node) @check_messages("no-member", "c-extension-no-member") def visit_attribute(self, node: nodes.Attribute) -> None: """check that the accessed attribute exists to avoid too much false positives for now, we'll consider the code as correct if a single of the inferred nodes has the accessed attribute. function/method, super call and metaclasses are ignored """ if any( pattern.match(name) for name in (node.attrname, node.as_string()) for pattern in self._compiled_generated_members ): return try: inferred = list(node.expr.infer()) except astroid.InferenceError: return # list of (node, nodename) which are missing the attribute missingattr = set() non_opaque_inference_results = [ owner for owner in inferred if owner is not astroid.Uninferable and not isinstance(owner, nodes.Unknown) ] if ( len(non_opaque_inference_results) != len(inferred) and self.config.ignore_on_opaque_inference ): # There is an ambiguity in the inference. Since we can't # make sure that we won't emit a false positive, we just stop # whenever the inference returns an opaque inference object. return for owner in non_opaque_inference_results: name = getattr(owner, "name", None) if _is_owner_ignored( owner, name, self.config.ignored_classes, self.config.ignored_modules ): continue qualname = f"{owner.pytype()}.{node.attrname}" if any( pattern.match(qualname) for pattern in self._compiled_generated_members ): return try: if not [ n for n in owner.getattr(node.attrname) if not isinstance(n.statement(future=True), nodes.AugAssign) ]: missingattr.add((owner, name)) continue except astroid.exceptions.StatementMissing: continue except AttributeError: continue except astroid.DuplicateBasesError: continue except astroid.NotFoundError: # This can't be moved before the actual .getattr call, # because there can be more values inferred and we are # stopping after the first one which has the attribute in question. # The problem is that if the first one has the attribute, # but we continue to the next values which doesn't have the # attribute, then we'll have a false positive. # So call this only after the call has been made. if not _emit_no_member( node, owner, name, self._mixin_class_rgx, ignored_mixins=self.config.ignore_mixin_members, ignored_none=self.config.ignore_none, ): continue missingattr.add((owner, name)) continue # stop on the first found break else: # we have not found any node with the attributes, display the # message for inferred nodes done = set() for owner, name in missingattr: if isinstance(owner, astroid.Instance): actual = owner._proxied else: actual = owner if actual in done: continue done.add(actual) msg, hint = self._get_nomember_msgid_hint(node, owner) self.add_message( msg, node=node, args=(owner.display_type(), name, node.attrname, hint), confidence=INFERENCE, ) def _get_nomember_msgid_hint(self, node, owner): suggestions_are_possible = self._suggestion_mode and isinstance( owner, nodes.Module ) if suggestions_are_possible and _is_c_extension(owner): msg = "c-extension-no-member" hint = "" else: msg = "no-member" if self.config.missing_member_hint: hint = _missing_member_hint( owner, node.attrname, self.config.missing_member_hint_distance, self.config.missing_member_max_choices, ) else: hint = "" return msg, hint @check_messages( "assignment-from-no-return", "assignment-from-none", "non-str-assignment-to-dunder-name", ) def visit_assign(self, node: nodes.Assign) -> None: """ Process assignments in the AST. """ self._check_assignment_from_function_call(node) self._check_dundername_is_string(node) def _check_assignment_from_function_call(self, node): """check that if assigning to a function call, the function is possibly returning something valuable """ if not isinstance(node.value, nodes.Call): return function_node = safe_infer(node.value.func) funcs = (nodes.FunctionDef, astroid.UnboundMethod, astroid.BoundMethod) if not isinstance(function_node, funcs): return # Unwrap to get the actual function object if isinstance(function_node, astroid.BoundMethod) and isinstance( function_node._proxied, astroid.UnboundMethod ): function_node = function_node._proxied._proxied # Make sure that it's a valid function that we can analyze. # Ordered from less expensive to more expensive checks. # pylint: disable=too-many-boolean-expressions if ( not function_node.is_function or isinstance(function_node, nodes.AsyncFunctionDef) or function_node.decorators or function_node.is_generator() or function_node.is_abstract(pass_is_abstract=False) or utils.is_error(function_node) or not function_node.root().fully_defined() ): return returns = list( function_node.nodes_of_class(nodes.Return, skip_klass=nodes.FunctionDef) ) if not returns: self.add_message("assignment-from-no-return", node=node) else: for rnode in returns: if not ( isinstance(rnode.value, nodes.Const) and rnode.value.value is None or rnode.value is None ): break else: self.add_message("assignment-from-none", node=node) def _check_dundername_is_string(self, node): """ Check a string is assigned to self.__name__ """ # Check the left hand side of the assignment is <something>.__name__ lhs = node.targets[0] if not isinstance(lhs, nodes.AssignAttr): return if not lhs.attrname == "__name__": return # If the right hand side is not a string rhs = node.value if isinstance(rhs, nodes.Const) and isinstance(rhs.value, str): return inferred = utils.safe_infer(rhs) if not inferred: return if not (isinstance(inferred, nodes.Const) and isinstance(inferred.value, str)): # Add the message self.add_message("non-str-assignment-to-dunder-name", node=node) def _check_uninferable_call(self, node): """ Check that the given uninferable Call node does not call an actual function. """ if not isinstance(node.func, nodes.Attribute): return # Look for properties. First, obtain # the lhs of the Attribute node and search the attribute # there. If that attribute is a property or a subclass of properties, # then most likely it's not callable. expr = node.func.expr klass = safe_infer(expr) if ( klass is None or klass is astroid.Uninferable or not isinstance(klass, astroid.Instance) ): return try: attrs = klass._proxied.getattr(node.func.attrname) except astroid.NotFoundError: return for attr in attrs: if attr is astroid.Uninferable: continue if not isinstance(attr, nodes.FunctionDef): continue # Decorated, see if it is decorated with a property. # Also, check the returns and see if they are callable. if decorated_with_property(attr): try: all_returns_are_callable = all( return_node.callable() or return_node is astroid.Uninferable for return_node in attr.infer_call_result(node) ) except astroid.InferenceError: continue if not all_returns_are_callable: self.add_message( "not-callable", node=node, args=node.func.as_string() ) break def _check_argument_order(self, node, call_site, called, called_param_names): """Match the supplied argument names against the function parameters. Warn if some argument names are not in the same order as they are in the function signature. """ # Check for called function being an object instance function # If so, ignore the initial 'self' argument in the signature try: is_classdef = isinstance(called.parent, nodes.ClassDef) if is_classdef and called_param_names[0] == "self": called_param_names = called_param_names[1:] except IndexError: return try: # extract argument names, if they have names calling_parg_names = [p.name for p in call_site.positional_arguments] # Additionally get names of keyword arguments to use in a full match # against parameters calling_kwarg_names = [ arg.name for arg in call_site.keyword_arguments.values() ] except AttributeError: # the type of arg does not provide a `.name`. In this case we # stop checking for out-of-order arguments because it is only relevant # for named variables. return # Don't check for ordering if there is an unmatched arg or param arg_set = set(calling_parg_names) | set(calling_kwarg_names) param_set = set(called_param_names) if arg_set != param_set: return # Warn based on the equality of argument ordering if calling_parg_names != called_param_names[: len(calling_parg_names)]: self.add_message("arguments-out-of-order", node=node, args=()) def _check_isinstance_args(self, node): if len(node.args) != 2: # isinstance called with wrong number of args return second_arg = node.args[1] if _is_invalid_isinstance_type(second_arg): self.add_message("isinstance-second-argument-not-valid-type", node=node) # pylint: disable=too-many-branches,too-many-locals @check_messages(*(list(MSGS.keys()))) def visit_call(self, node: nodes.Call) -> None: """check that called functions/methods are inferred to callable objects, and that the arguments passed to the function match the parameters in the inferred function's definition """ called = safe_infer(node.func) # only function, generator and object defining __call__ are allowed # Ignore instances of descriptors since astroid cannot properly handle them # yet if called and not called.callable(): if isinstance(called, astroid.Instance) and ( not has_known_bases(called) or ( called.parent is not None and isinstance(called.scope(), nodes.ClassDef) and "__get__" in called.locals ) ): # Don't emit if we can't make sure this object is callable. pass else: self.add_message("not-callable", node=node, args=node.func.as_string()) else: self._check_uninferable_call(node) try: called, implicit_args, callable_name = _determine_callable(called) except ValueError: # Any error occurred during determining the function type, most of # those errors are handled by different warnings. return if called.args.args is None: if called.name == "isinstance": # Verify whether second argument of isinstance is a valid type self._check_isinstance_args(node) # Built-in functions have no argument information. return if len(called.argnames()) != len(set(called.argnames())): # Duplicate parameter name (see duplicate-argument). We can't really # make sense of the function call in this case, so just return. return # Build the set of keyword arguments, checking for duplicate keywords, # and count the positional arguments. call_site = astroid.arguments.CallSite.from_call(node) # Warn about duplicated keyword arguments, such as `f=24, **{'f': 24}` for keyword in call_site.duplicated_keywords: self.add_message("repeated-keyword", node=node, args=(keyword,)) if call_site.has_invalid_arguments() or call_site.has_invalid_keywords(): # Can't make sense of this. return # Has the function signature changed in ways we cannot reliably detect? if hasattr(called, "decorators") and decorated_with( called, self.config.signature_mutators ): return num_positional_args = len(call_site.positional_arguments) keyword_args = list(call_site.keyword_arguments.keys()) overload_function = is_overload_stub(called) # Determine if we don't have a context for our call and we use variadics. node_scope = node.scope() if isinstance(node_scope, (nodes.Lambda, nodes.FunctionDef)): has_no_context_positional_variadic = _no_context_variadic_positional( node, node_scope ) has_no_context_keywords_variadic = _no_context_variadic_keywords( node, node_scope ) else: has_no_context_positional_variadic = ( has_no_context_keywords_variadic ) = False # These are coming from the functools.partial implementation in astroid already_filled_positionals = getattr(called, "filled_positionals", 0) already_filled_keywords = getattr(called, "filled_keywords", {}) keyword_args += list(already_filled_keywords) num_positional_args += implicit_args + already_filled_positionals # Analyze the list of formal parameters. args = list(itertools.chain(called.args.posonlyargs or (), called.args.args)) num_mandatory_parameters = len(args) - len(called.args.defaults) parameters: List[List[Any]] = [] parameter_name_to_index = {} for i, arg in enumerate(args): if isinstance(arg, nodes.Tuple): name = None # Don't store any parameter names within the tuple, since those # are not assignable from keyword arguments. else: assert isinstance(arg, nodes.AssignName) # This occurs with: # def f( (a), (b) ): pass name = arg.name parameter_name_to_index[name] = i if i >= num_mandatory_parameters: defval = called.args.defaults[i - num_mandatory_parameters] else: defval = None parameters.append([(name, defval), False]) kwparams = {} for i, arg in enumerate(called.args.kwonlyargs): if isinstance(arg, nodes.Keyword): name = arg.arg else: assert isinstance(arg, nodes.AssignName) name = arg.name kwparams[name] = [called.args.kw_defaults[i], False] self._check_argument_order( node, call_site, called, [p[0][0] for p in parameters] ) # 1. Match the positional arguments. for i in range(num_positional_args): if i < len(parameters): parameters[i][1] = True elif called.args.vararg is not None: # The remaining positional arguments get assigned to the *args # parameter. break elif not overload_function: # Too many positional arguments. self.add_message( "too-many-function-args", node=node, args=(callable_name,) ) break # 2. Match the keyword arguments. for keyword in keyword_args: if keyword in parameter_name_to_index: i = parameter_name_to_index[keyword] if parameters[i][1]: # Duplicate definition of function parameter. # Might be too hardcoded, but this can actually # happen when using str.format and `self` is passed # by keyword argument, as in `.format(self=self)`. # It's perfectly valid to so, so we're just skipping # it if that's the case. if not (keyword == "self" and called.qname() in STR_FORMAT): self.add_message( "redundant-keyword-arg", node=node, args=(keyword, callable_name), ) else: parameters[i][1] = True elif keyword in kwparams: if kwparams[keyword][1]: # Duplicate definition of function parameter. self.add_message( "redundant-keyword-arg", node=node, args=(keyword, callable_name), ) else: kwparams[keyword][1] = True elif called.args.kwarg is not None: # The keyword argument gets assigned to the **kwargs parameter. pass elif not overload_function: # Unexpected keyword argument. self.add_message( "unexpected-keyword-arg", node=node, args=(keyword, callable_name) ) # 3. Match the **kwargs, if any. if node.kwargs: for i, [(name, defval), assigned] in enumerate(parameters): # Assume that *kwargs provides values for all remaining # unassigned named parameters. if name is not None: parameters[i][1] = True else: # **kwargs can't assign to tuples. pass # Check that any parameters without a default have been assigned # values. for [(name, defval), assigned] in parameters: if (defval is None) and not assigned: display_name = "<tuple>" if name is None else repr(name) if not has_no_context_positional_variadic and not overload_function: self.add_message( "no-value-for-parameter", node=node, args=(display_name, callable_name), ) for name, val in kwparams.items(): defval, assigned = val if ( defval is None and not assigned and not has_no_context_keywords_variadic and not overload_function ): self.add_message("missing-kwoa", node=node, args=(name, callable_name)) def _check_invalid_sequence_index(self, subscript: nodes.Subscript): # Look for index operations where the parent is a sequence type. # If the types can be determined, only allow indices to be int, # slice or instances with __index__. parent_type = safe_infer(subscript.value) if not isinstance( parent_type, (nodes.ClassDef, astroid.Instance) ) or not has_known_bases(parent_type): return None # Determine what method on the parent this index will use # The parent of this node will be a Subscript, and the parent of that # node determines if the Subscript is a get, set, or delete operation. if subscript.ctx is astroid.Store: methodname = "__setitem__" elif subscript.ctx is astroid.Del: methodname = "__delitem__" else: methodname = "__getitem__" # Check if this instance's __getitem__, __setitem__, or __delitem__, as # appropriate to the statement, is implemented in a builtin sequence # type. This way we catch subclasses of sequence types but skip classes # that override __getitem__ and which may allow non-integer indices. try: methods = astroid.interpreter.dunder_lookup.lookup(parent_type, methodname) if methods is astroid.Uninferable: return None itemmethod = methods[0] except ( astroid.AttributeInferenceError, IndexError, ): return None if ( not isinstance(itemmethod, nodes.FunctionDef) or itemmethod.root().name != "builtins" or not itemmethod.parent or itemmethod.parent.name not in SEQUENCE_TYPES ): return None # For ExtSlice objects coming from visit_extslice, no further # inference is necessary, since if we got this far the ExtSlice # is an error. if isinstance(subscript.value, nodes.ExtSlice): index_type = subscript.value else: index_type = safe_infer(subscript.slice) if index_type is None or index_type is astroid.Uninferable: return None # Constants must be of type int if isinstance(index_type, nodes.Const): if isinstance(index_type.value, int): return None # Instance values must be int, slice, or have an __index__ method elif isinstance(index_type, astroid.Instance): if index_type.pytype() in {"builtins.int", "builtins.slice"}: return None try: index_type.getattr("__index__") return None except astroid.NotFoundError: pass elif isinstance(index_type, nodes.Slice): # A slice can be present # here after inferring the index node, which could # be a `slice(...)` call for instance. return self._check_invalid_slice_index(index_type) # Anything else is an error self.add_message("invalid-sequence-index", node=subscript) return None @check_messages("invalid-sequence-index") def visit_extslice(self, node: nodes.ExtSlice) -> None: if not node.parent or not hasattr(node.parent, "value"): return None # Check extended slice objects as if they were used as a sequence # index to check if the object being sliced can support them return self._check_invalid_sequence_index(node.parent) def _check_invalid_slice_index(self, node: nodes.Slice) -> None: # Check the type of each part of the slice invalid_slices_nodes: List[nodes.NodeNG] = [] for index in (node.lower, node.upper, node.step): if index is None: continue index_type = safe_infer(index) if index_type is None or index_type is astroid.Uninferable: continue # Constants must of type int or None if isinstance(index_type, nodes.Const): if isinstance(index_type.value, (int, type(None))): continue # Instance values must be of type int, None or an object # with __index__ elif isinstance(index_type, astroid.Instance): if index_type.pytype() in {"builtins.int", "builtins.NoneType"}: continue try: index_type.getattr("__index__") return except astroid.NotFoundError: pass invalid_slices_nodes.append(index) if not invalid_slices_nodes: return # Anything else is an error, unless the object that is indexed # is a custom object, which knows how to handle this kind of slices parent = node.parent if isinstance(parent, nodes.ExtSlice): parent = parent.parent if isinstance(parent, nodes.Subscript): inferred = safe_infer(parent.value) if inferred is None or inferred is astroid.Uninferable: # Don't know what this is return known_objects = ( nodes.List, nodes.Dict, nodes.Tuple, astroid.objects.FrozenSet, nodes.Set, ) if not isinstance(inferred, known_objects): # Might be an instance that knows how to handle this slice object return for snode in invalid_slices_nodes: self.add_message("invalid-slice-index", node=snode) @check_messages("not-context-manager") def visit_with(self, node: nodes.With) -> None: for ctx_mgr, _ in node.items: context = astroid.context.InferenceContext() inferred = safe_infer(ctx_mgr, context=context) if inferred is None or inferred is astroid.Uninferable: continue if isinstance(inferred, astroid.bases.Generator): # Check if we are dealing with a function decorated # with contextlib.contextmanager. if decorated_with( inferred.parent, self.config.contextmanager_decorators ): continue # If the parent of the generator is not the context manager itself, # that means that it could have been returned from another # function which was the real context manager. # The following approach is more of a hack rather than a real # solution: walk all the inferred statements for the # given *ctx_mgr* and if you find one function scope # which is decorated, consider it to be the real # manager and give up, otherwise emit not-context-manager. # See the test file for not_context_manager for a couple # of self explaining tests. # Retrieve node from all previusly visited nodes in the the inference history context_path_names: Iterator[Any] = filter( None, _unflatten(context.path) ) inferred_paths = _flatten_container( safe_infer(path) for path in context_path_names ) for inferred_path in inferred_paths: if not inferred_path: continue scope = inferred_path.scope() if not isinstance(scope, nodes.FunctionDef): continue if decorated_with(scope, self.config.contextmanager_decorators): break else: self.add_message( "not-context-manager", node=node, args=(inferred.name,) ) else: try: inferred.getattr("__enter__") inferred.getattr("__exit__") except astroid.NotFoundError: if isinstance(inferred, astroid.Instance): # If we do not know the bases of this class, # just skip it. if not has_known_bases(inferred): continue # Just ignore mixin classes. if self.config.ignore_mixin_members: if inferred.name[-5:].lower() == "mixin": continue self.add_message( "not-context-manager", node=node, args=(inferred.name,) ) @check_messages("invalid-unary-operand-type") def visit_unaryop(self, node: nodes.UnaryOp) -> None: """Detect TypeErrors for unary operands.""" for error in node.type_errors(): # Let the error customize its output. self.add_message("invalid-unary-operand-type", args=str(error), node=node) @check_messages("unsupported-binary-operation") def visit_binop(self, node: nodes.BinOp) -> None: if node.op == "|": self._detect_unsupported_alternative_union_syntax(node) def _detect_unsupported_alternative_union_syntax(self, node: nodes.BinOp) -> None: """Detect if unsupported alternative Union syntax (PEP 604) was used.""" if self._py310_plus: # 310+ supports the new syntax return if isinstance( node.parent, TYPE_ANNOTATION_NODES_TYPES ) and not is_postponed_evaluation_enabled(node): # Use in type annotations only allowed if # postponed evaluation is enabled. self._check_unsupported_alternative_union_syntax(node) if isinstance( node.parent, ( nodes.Assign, nodes.Call, nodes.Keyword, nodes.Dict, nodes.Tuple, nodes.Set, nodes.List, nodes.BinOp, ), ): # Check other contexts the syntax might appear, but are invalid. # Make sure to filter context if postponed evaluation is enabled # and parent is allowed node type. allowed_nested_syntax = False if is_postponed_evaluation_enabled(node): parent_node = node.parent while True: if isinstance(parent_node, TYPE_ANNOTATION_NODES_TYPES): allowed_nested_syntax = True break parent_node = parent_node.parent if isinstance(parent_node, nodes.Module): break if not allowed_nested_syntax: self._check_unsupported_alternative_union_syntax(node) def _check_unsupported_alternative_union_syntax(self, node: nodes.BinOp) -> None: """Check if left or right node is of type `type`.""" msg = "unsupported operand type(s) for |" for n in (node.left, node.right): n = astroid.helpers.object_type(n) if isinstance(n, nodes.ClassDef) and is_classdef_type(n): self.add_message("unsupported-binary-operation", args=msg, node=node) break @check_messages("unsupported-binary-operation") def _visit_binop(self, node: nodes.BinOp) -> None: """Detect TypeErrors for binary arithmetic operands.""" self._check_binop_errors(node) @check_messages("unsupported-binary-operation") def _visit_augassign(self, node: nodes.AugAssign) -> None: """Detect TypeErrors for augmented binary arithmetic operands.""" self._check_binop_errors(node) def _check_binop_errors(self, node): for error in node.type_errors(): # Let the error customize its output. if any( isinstance(obj, nodes.ClassDef) and not has_known_bases(obj) for obj in (error.left_type, error.right_type) ): continue self.add_message("unsupported-binary-operation", args=str(error), node=node) def _check_membership_test(self, node): if is_inside_abstract_class(node): return if is_comprehension(node): return inferred = safe_infer(node) if inferred is None or inferred is astroid.Uninferable: return if not supports_membership_test(inferred): self.add_message( "unsupported-membership-test", args=node.as_string(), node=node ) @check_messages("unsupported-membership-test") def visit_compare(self, node: nodes.Compare) -> None: if len(node.ops) != 1: return op, right = node.ops[0] if op in {"in", "not in"}: self._check_membership_test(right) @check_messages( "unsubscriptable-object", "unsupported-assignment-operation", "unsupported-delete-operation", "unhashable-dict-key", "invalid-sequence-index", "invalid-slice-index", ) def visit_subscript(self, node: nodes.Subscript) -> None: self._check_invalid_sequence_index(node) supported_protocol: Optional[Callable[[Any, Any], bool]] = None if isinstance(node.value, (nodes.ListComp, nodes.DictComp)): return if isinstance(node.value, nodes.Dict): # Assert dict key is hashable inferred = safe_infer(node.slice) if inferred and inferred != astroid.Uninferable: try: hash_fn = next(inferred.igetattr("__hash__")) except astroid.InferenceError: pass else: if getattr(hash_fn, "value", True) is None: self.add_message("unhashable-dict-key", node=node.value) if node.ctx == astroid.Load: supported_protocol = supports_getitem msg = "unsubscriptable-object" elif node.ctx == astroid.Store: supported_protocol = supports_setitem msg = "unsupported-assignment-operation" elif node.ctx == astroid.Del: supported_protocol = supports_delitem msg = "unsupported-delete-operation" if isinstance(node.value, nodes.SetComp): self.add_message(msg, args=node.value.as_string(), node=node.value) return if is_inside_abstract_class(node): return inferred = safe_infer(node.value) if inferred is None or inferred is astroid.Uninferable: return if getattr(inferred, "decorators", None): first_decorator = astroid.helpers.safe_infer(inferred.decorators.nodes[0]) if isinstance(first_decorator, nodes.ClassDef): inferred = first_decorator.instantiate_class() else: return # It would be better to handle function # decorators, but let's start slow. if supported_protocol and not supported_protocol(inferred, node): self.add_message(msg, args=node.value.as_string(), node=node.value) @check_messages("dict-items-missing-iter") def visit_for(self, node: nodes.For) -> None: if not isinstance(node.target, nodes.Tuple): # target is not a tuple return if not len(node.target.elts) == 2: # target is not a tuple of two elements return iterable = node.iter if not isinstance(iterable, nodes.Name): # it's not a bare variable return inferred = safe_infer(iterable) if not inferred: return if not isinstance(inferred, nodes.Dict): # the iterable is not a dict return if all(isinstance(i[0], nodes.Tuple) for i in inferred.items): # if all keys are tuples return self.add_message("dict-iter-missing-items", node=node) class IterableChecker(BaseChecker): """ Checks for non-iterables used in an iterable context. Contexts include: - for-statement - starargs in function call - `yield from`-statement - list, dict and set comprehensions - generator expressions Also checks for non-mappings in function call kwargs. """ __implements__ = (IAstroidChecker,) name = "typecheck" msgs = { "E1133": ( "Non-iterable value %s is used in an iterating context", "not-an-iterable", "Used when a non-iterable value is used in place where " "iterable is expected", ), "E1134": ( "Non-mapping value %s is used in a mapping context", "not-a-mapping", "Used when a non-mapping value is used in place where " "mapping is expected", ), } @staticmethod def _is_asyncio_coroutine(node): if not isinstance(node, nodes.Call): return False inferred_func = safe_infer(node.func) if not isinstance(inferred_func, nodes.FunctionDef): return False if not inferred_func.decorators: return False for decorator in inferred_func.decorators.nodes: inferred_decorator = safe_infer(decorator) if not isinstance(inferred_decorator, nodes.FunctionDef): continue if inferred_decorator.qname() != ASYNCIO_COROUTINE: continue return True return False def _check_iterable(self, node, check_async=False): if is_inside_abstract_class(node) or is_comprehension(node): return inferred = safe_infer(node) if not inferred: return if not is_iterable(inferred, check_async=check_async): self.add_message("not-an-iterable", args=node.as_string(), node=node) def _check_mapping(self, node): if is_inside_abstract_class(node): return if isinstance(node, nodes.DictComp): return inferred = safe_infer(node) if inferred is None or inferred is astroid.Uninferable: return if not is_mapping(inferred): self.add_message("not-a-mapping", args=node.as_string(), node=node) @check_messages("not-an-iterable") def visit_for(self, node: nodes.For) -> None: self._check_iterable(node.iter) @check_messages("not-an-iterable") def visit_asyncfor(self, node: nodes.AsyncFor) -> None: self._check_iterable(node.iter, check_async=True) @check_messages("not-an-iterable") def visit_yieldfrom(self, node: nodes.YieldFrom) -> None: if self._is_asyncio_coroutine(node.value): return self._check_iterable(node.value) @check_messages("not-an-iterable", "not-a-mapping") def visit_call(self, node: nodes.Call) -> None: for stararg in node.starargs: self._check_iterable(stararg.value) for kwarg in node.kwargs: self._check_mapping(kwarg.value) @check_messages("not-an-iterable") def visit_listcomp(self, node: nodes.ListComp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_dictcomp(self, node: nodes.DictComp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_setcomp(self, node: nodes.SetComp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("not-an-iterable") def visit_generatorexp(self, node: nodes.GeneratorExp) -> None: for gen in node.generators: self._check_iterable(gen.iter, check_async=gen.is_async) @check_messages("await-outside-async") def visit_await(self, node: nodes.Await) -> None: self._check_await_outside_coroutine(node) def _check_await_outside_coroutine(self, node: nodes.Await) -> None: node_scope = node.scope() while not isinstance(node_scope, nodes.Module): if isinstance(node_scope, nodes.AsyncFunctionDef): return if isinstance(node_scope, nodes.FunctionDef): break node_scope = node_scope.parent.scope() self.add_message("await-outside-async", node=node) def register(linter): """required method to auto register this checker""" linter.register_checker(TypeChecker(linter)) linter.register_checker(IterableChecker(linter))
1
19,197
I'm wondering if we should add this to a `utils` module in `astroid`. Porting the util to `pylint` doesn't make sense as we would need to duplicate the globals that are being used in the function, but importing from `brain` also feels weird. @Pierre-Sassoulas Do you have an opinion?
PyCQA-pylint
py
@@ -54,6 +54,12 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Http InitializeHeaders(); + if (_corruptedRequest) + { + await ProduceEnd(); + return; + } + while (!_requestProcessingStopping && !TakeMessageHeaders(SocketInput, FrameRequestHeaders)) { if (SocketInput.RemoteIntakeFin)
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Threading; using System.Threading.Tasks; using Microsoft.AspNetCore.Hosting.Server; using Microsoft.AspNetCore.Server.Kestrel.Exceptions; using Microsoft.Extensions.Logging; namespace Microsoft.AspNetCore.Server.Kestrel.Http { public class Frame<TContext> : Frame { private readonly IHttpApplication<TContext> _application; public Frame(IHttpApplication<TContext> application, ConnectionContext context) : base(context) { _application = application; } /// <summary> /// Primary loop which consumes socket input, parses it for protocol framing, and invokes the /// application delegate for as long as the socket is intended to remain open. /// The resulting Task from this loop is preserved in a field which is used when the server needs /// to drain and close all currently active connections. /// </summary> public override async Task RequestProcessingAsync() { try { while (!_requestProcessingStopping) { while (!_requestProcessingStopping && !TakeStartLine(SocketInput)) { if (SocketInput.RemoteIntakeFin) { // We need to attempt to consume start lines and headers even after // SocketInput.RemoteIntakeFin is set to true to ensure we don't close a // connection without giving the application a chance to respond to a request // sent immediately before the a FIN from the client. if (TakeStartLine(SocketInput)) { break; } return; } await SocketInput; } InitializeHeaders(); while (!_requestProcessingStopping && !TakeMessageHeaders(SocketInput, FrameRequestHeaders)) { if (SocketInput.RemoteIntakeFin) { // We need to attempt to consume start lines and headers even after // SocketInput.RemoteIntakeFin is set to true to ensure we don't close a // connection without giving the application a chance to respond to a request // sent immediately before the a FIN from the client. if (TakeMessageHeaders(SocketInput, FrameRequestHeaders)) { break; } return; } await SocketInput; } if (!_requestProcessingStopping) { var messageBody = MessageBody.For(HttpVersion, FrameRequestHeaders, this); _keepAlive = messageBody.RequestKeepAlive; InitializeStreams(messageBody); _abortedCts = null; _manuallySetRequestAbortToken = null; if (!_corruptedRequest) { var context = _application.CreateContext(this); try { await _application.ProcessRequestAsync(context).ConfigureAwait(false); } catch (Exception ex) { ReportApplicationError(ex); } finally { // Trigger OnStarting if it hasn't been called yet and the app hasn't // already failed. If an OnStarting callback throws we can go through // our normal error handling in ProduceEnd. // https://github.com/aspnet/KestrelHttpServer/issues/43 if (!_responseStarted && _applicationException == null && _onStarting != null) { await FireOnStarting(); } PauseStreams(); if (_onCompleted != null) { await FireOnCompleted(); } _application.DisposeContext(context, _applicationException); } // If _requestAbort is set, the connection has already been closed. if (Volatile.Read(ref _requestAborted) == 0) { ResumeStreams(); if (_keepAlive && !_corruptedRequest) { try { // Finish reading the request body in case the app did not. await messageBody.Consume(); } catch (BadHttpRequestException ex) { ReportCorruptedHttpRequest(ex); } } await ProduceEnd(); } StopStreams(); } if (!_keepAlive || _corruptedRequest) { // End the connection for non keep alive and Bad Requests // as data incoming may have been thrown off return; } } Reset(); } } catch (Exception ex) { Log.LogWarning(0, ex, "Connection processing ended abnormally"); } finally { try { ResetComponents(); _abortedCts = null; // If _requestAborted is set, the connection has already been closed. if (Volatile.Read(ref _requestAborted) == 0) { ConnectionControl.End(ProduceEndType.SocketShutdown); } } catch (Exception ex) { Log.LogWarning(0, ex, "Connection shutdown abnormally"); } } } } }
1
8,477
This seems different than what we do for corrupted request headers. I would like to determine the correct behavior and consolidate this logic.
aspnet-KestrelHttpServer
.cs
@@ -292,7 +292,7 @@ class _RunData(object): direct_access_sources = set() for backend in backend_service.backends: instance_group = self.find_instance_group_by_url( - backend.get('group')) + backend.get('resourceGroup')) if not instance_group: continue
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Scanner for the Identity-Aware Proxy rules engine.""" import collections from google.cloud.forseti.common.gcp_type import ( backend_service as backend_service_type) from google.cloud.forseti.common.gcp_type import ( firewall_rule as firewall_rule_type) from google.cloud.forseti.common.gcp_type import instance as instance_type from google.cloud.forseti.common.gcp_type import ( instance_group as instance_group_type) from google.cloud.forseti.common.gcp_type import ( instance_group_manager as instance_group_manager_type) from google.cloud.forseti.common.gcp_type import ( instance_template as instance_template_type) from google.cloud.forseti.common.gcp_type import project as project_type from google.cloud.forseti.common.gcp_type import network as network_type from google.cloud.forseti.common.gcp_type.resource import ResourceType from google.cloud.forseti.common.util import logger from google.cloud.forseti.scanner.audit import iap_rules_engine from google.cloud.forseti.scanner.scanners import base_scanner LOGGER = logger.get_logger(__name__) IapResource = collections.namedtuple( 'IapResource', ['project_full_name', 'backend_service', 'alternate_services', 'direct_access_sources', 'iap_enabled'] ) NetworkPort = collections.namedtuple( 'NetworkPort', ['network', 'port']) class _RunData(object): """Information needed to compute IAP properties.""" def __init__(self, backend_services, firewall_rules, instances, instance_groups, instance_group_managers, instance_templates): """Initialize. Args: backend_services (list): BackendService firewall_rules (list): FirewallRule instances (list): Instance instance_groups (list): InstanceGroup instance_group_managers (list): InstanceGroupMananger instance_templates (list): InstanceTemplate """ self.resource_counts = { ResourceType.BACKEND_SERVICE: len(backend_services), ResourceType.FIREWALL_RULE: len(firewall_rules), ResourceType.INSTANCE: len(instances), ResourceType.INSTANCE_GROUP: len(instance_groups), ResourceType.INSTANCE_GROUP_MANAGER: len(instance_group_managers), ResourceType.INSTANCE_TEMPLATE: len(instance_templates), } self.backend_services = backend_services self.firewall_rules = firewall_rules self.instances_by_key = dict((instance.key, instance) for instance in instances) self.instance_groups_by_key = dict((instance_group.key, instance_group) for instance_group in instance_groups) self.instance_templates_by_group_key = {} instance_templates_by_key = dict((instance_template.key, instance_template) for instance_template in instance_templates) for instance_group_manager in instance_group_managers: instance_group_url = instance_group_manager.instance_group if not instance_group_url: continue instance_group_key = instance_group_type.Key.from_url( instance_group_url) instance_template_url = instance_group_manager.instance_template instance_template_key = instance_template_type.Key.from_url( instance_template_url) instance_template = instance_templates_by_key.get( instance_template_key) if instance_template: self.instance_templates_by_group_key[ instance_group_key] = instance_template @staticmethod def instance_group_network_port(backend_service, instance_group): """Which network and port is used for a service's backends? A backend service can communicate with its backends on a different network and port number for each of the service's backend instance groups. Args: backend_service (BackendService): service to find port for instance_group (InstanceGroup): group to find port for Returns: NetworkPort: how the service communicates with backends """ # Field 'port' from backend service has been deprecated in favor of # portName. PortName is required when the load balancing scheme is # EXTERNAL. When the load balancing scheme is INTERNAL, this field # is not used, it has the same behavior of port so we can just use # portName to get the port from instance group. port = -1 if backend_service.port: # Although deprecated, it's still returned by the API and might # contain legacy data for customers who have not migrated. port = int(backend_service.port) if backend_service.port_name: for named_port in instance_group.named_ports or []: if named_port.get('name') == backend_service.port_name: port = int(named_port.get('port')) break if port == -1: LOGGER.error('NetworkPort can not be constructed. Unable to ' 'find the appropriate port from backend service ' 'or instance group.') return None return NetworkPort( network=network_type.Key.from_url( instance_group.network, project_id=instance_group.project_id), port=port) def find_instance_group_by_url(self, instance_group_url): """Find an instance group for the given URL. Args: instance_group_url (str): instance group URL Returns: InstanceGroup: instance group """ if not instance_group_url: return None target_key = instance_group_type.Key.from_url(instance_group_url) return self.instance_groups_by_key.get(target_key) def find_instance_by_url(self, instance_url): """Find an instance for the given URL. Args: instance_url (str): instance URL Returns: Instance: instance """ target_key = instance_type.Key.from_url(instance_url) return self.instances_by_key.get(target_key) def firewall_allowed_sources(self, network_port, tag): """Which source (networks, tags) can connect to the given destination? Args: network_port (NetworkPort): connection destination tag (str): instance tag for destination instance Returns: set: allowed source networks and tags """ allowed_sources = set() def firewall_entry_applies(firewall_entry): """Does a firewall entry match the current source? Args: firewall_entry (dict): An 'allowed' or 'denied' dict from a FirewallRule. Returns: bool: whether the entry is relevant to the source being evaluated """ if firewall_entry.get('IPProtocol') not in ( None, 6, '6', 'tcp', 'all'): return False if not firewall_entry.get('ports'): return True for fw_port_range in firewall_entry.get('ports'): fw_port_range = str(fw_port_range) if '-' in fw_port_range: range_ends = fw_port_range.split('-') fw_port_min = int(range_ends[0]) fw_port_max = int(range_ends[1]) else: fw_port_min = int(fw_port_range) fw_port_max = int(fw_port_range) if fw_port_min <= network_port.port <= fw_port_max: return True return False relevant_rules_by_priority = collections.defaultdict(lambda: []) for firewall_rule in self.firewall_rules: firewall_network = network_type.Key.from_url( firewall_rule.network, project_id=firewall_rule.project_id) if firewall_network != network_port.network: continue if (firewall_rule.target_tags and tag not in firewall_rule.target_tags): continue if firewall_rule.direction and firewall_rule.direction != 'INGRESS': continue relevant_rules_by_priority[firewall_rule.priority].append( firewall_rule) priorities = relevant_rules_by_priority.keys() priorities.sort(reverse=True) for priority in priorities: # DENY at a given priority takes precedence over ALLOW for firewall_rule in relevant_rules_by_priority[priority]: for allowed in firewall_rule.allowed or []: if firewall_entry_applies(allowed): allowed_sources.update( firewall_rule.source_ranges or []) allowed_sources.update( firewall_rule.source_tags or []) continue for firewall_rule in relevant_rules_by_priority[priority]: for denied in firewall_rule.denied or []: if firewall_entry_applies(denied): allowed_sources.difference_update( firewall_rule.source_ranges or []) allowed_sources.difference_update( firewall_rule.source_tags or []) return allowed_sources def tags_for_instance_group(self, instance_group): """Which instance tags are used for an instance group? Includes tags used by instances in the group and, for managed groups, tags in the group's template. Args: instance_group (InstanceGroup): the group to query tags for Returns: set: tags """ tags = set() # Get tags from actual instances. for instance_url in instance_group.instance_urls: instance = self.find_instance_by_url(instance_url) if not instance: continue tags.update(instance.tags.get('items', [])) # If it's a managed instance group, also get tags from the # instance template. instance_template = self.instance_templates_by_group_key.get( instance_group.key) if instance_template: template_tags = instance_template.properties.get('tags', {}) tags.update(template_tags.get('items', [])) return tags def make_iap_resource(self, backend_service, project_full_name): """Get an IapResource for a service. Args: backend_service (BackendService): service to create a resource for project_full_name (str): The full path to the parent project including all ancestors. Returns: IapResource: the corresponding resource """ alternate_services = set() direct_access_sources = set() for backend in backend_service.backends: instance_group = self.find_instance_group_by_url( backend.get('group')) if not instance_group: continue network_port = self.instance_group_network_port( backend_service, instance_group) if not network_port: continue direct_access_sources.update( self.firewall_allowed_sources(network_port, None)) tags = self.tags_for_instance_group(instance_group) for tag in tags: direct_access_sources.update( self.firewall_allowed_sources( network_port, tag)) # Don't count the load balancer as a direct access source. # The load balancer egress IPs are documented here: # https://cloud.google.com/compute/docs/load-balancing/http/ # (In theory they can change, but it's not common (since it's # a backwards-incompatible change for HTTP load balancer # customers.) 35.191/16 was recently announced; when Google # added that one, they sent out a mandatory service # announcement a year before the new range was used.) direct_access_sources.discard('130.211.0.0/22') direct_access_sources.discard('35.191.0.0/16') for backend_service2 in self.backend_services: if self.is_alternate_service(backend_service, backend_service2): alternate_services.add(backend_service2.key) return IapResource( project_full_name=project_full_name, backend_service=backend_service, alternate_services=alternate_services, direct_access_sources=direct_access_sources, iap_enabled=(backend_service.iap.get('enabled', False) if backend_service.iap else False)) def is_alternate_service(self, backend_service, backend_service2): """Do two backend services expose any of the same (instance, port) ? Args: backend_service (BackendService): One backend service backend_service2 (BackendService): The other backend service Returns: bool: whether the two services share any (instance, port) """ if backend_service2.key == backend_service.key: return False for backend in backend_service.backends: instance_group = self.find_instance_group_by_url( backend.get('group')) if not instance_group: continue network_port = self.instance_group_network_port( backend_service, instance_group) if not network_port: continue for backend2 in backend_service2.backends: instance_group2 = self.find_instance_group_by_url( backend2.get('group')) if not instance_group2: continue network_port2 = self.instance_group_network_port( backend_service2, instance_group2) if not network_port2: continue if network_port != network_port2: continue if instance_group == instance_group2: return True for instance_url in instance_group.instance_urls: if instance_url in instance_group2.instance_urls: return True return False class IapScanner(base_scanner.BaseScanner): """Pipeline to IAP-related data from DAO.""" SCANNER_OUTPUT_CSV_FMT = 'scanner_output_iap.{}.csv' def __init__(self, global_configs, scanner_configs, service_config, model_name, snapshot_timestamp, rules): """Initialization. Args: global_configs (dict): Global configurations. scanner_configs (dict): Scanner configurations. service_config (ServiceConfig): Forseti 2.0 service configs model_name (str): name of the data model snapshot_timestamp (str): The snapshot timestamp. rules (str): Fully-qualified path and filename of the rules file. """ super(IapScanner, self).__init__( global_configs, scanner_configs, service_config, model_name, snapshot_timestamp, rules) self.rules_engine = iap_rules_engine.IapRulesEngine( rules_file_path=self.rules, snapshot_timestamp=self.snapshot_timestamp) self.rules_engine.build_rule_book(self.global_configs) self.scoped_session, self.data_access = ( service_config.model_manager.get(model_name)) @staticmethod def _flatten_violations(violations): """Flatten RuleViolations into a dict for each RuleViolation member. Args: violations (list): The RuleViolations to flatten. Yields: dict: Iterator of RuleViolations as a dict per member. """ for violation in violations: alternate_services = ['%s/%s' % (bs_key.project_id, bs_key.name) for bs_key in violation.alternate_services_violations] alternate_services.sort() alternate_services_str = ', '.join(alternate_services) direct_access_sources = violation.direct_access_sources_violations direct_access_sources.sort() direct_access_str = ', '.join(direct_access_sources) violation_data = { 'alternate_services_violations': alternate_services_str, 'direct_access_sources_violations': direct_access_str, 'iap_enabled_violation': str(violation.iap_enabled_violation), 'resource_name': violation.resource_name } yield { 'resource_id': violation.resource_id, 'resource_name': violation.resource_name, 'resource_type': violation.resource_type, 'full_name': violation.full_name, 'rule_index': violation.rule_index, 'rule_name': violation.rule_name, 'violation_type': violation.violation_type, 'violation_data': violation_data, 'resource_data': violation.resource_data } def _output_results(self, all_violations): """Output results. Args: all_violations (list): A list of violations. """ all_violations = self._flatten_violations(all_violations) self._output_results_to_db(all_violations) def _get_backend_services(self, parent_type_name): """Retrieves backend services. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: BackendService """ backend_services = [] with self.scoped_session as session: for backend_service in self.data_access.scanner_iter( session, 'backendservice', parent_type_name=parent_type_name): backend_services.append( backend_service_type.BackendService.from_json( full_name=backend_service.full_name, project_id=backend_service.parent.name, json_string=backend_service.data)) return backend_services def _get_firewall_rules(self, parent_type_name): """Retrieves firewall rules. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: FirewallRule """ firewall_rules = [] with self.scoped_session as session: for firewall_rule in self.data_access.scanner_iter( session, 'firewall', parent_type_name=parent_type_name): firewall_rules.append( firewall_rule_type.FirewallRule.from_json( project_id=firewall_rule.parent.name, json_string=firewall_rule.data)) return firewall_rules def _get_instances(self, parent_type_name): """Retrieves instances. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: Instance """ instances = [] with self.scoped_session as session: for instance in self.data_access.scanner_iter( session, 'instance', parent_type_name=parent_type_name): project = project_type.Project( project_id=instance.parent.name, full_name=instance.parent.full_name, ) instances.append( instance_type.Instance.from_json( parent=project, json_string=instance.data)) return instances def _get_instance_groups(self, parent_type_name): """Retrieves instance groups. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: InstanceGroup """ instance_groups = [] with self.scoped_session as session: for instance_group in self.data_access.scanner_iter( session, 'instancegroup', parent_type_name=parent_type_name): instance_groups.append( instance_group_type.InstanceGroup.from_json( project_id=instance_group.parent.name, json_string=instance_group.data)) return instance_groups def _get_instance_group_managers(self, parent_type_name): """Retrieves instance group managers. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: InstanceGroupManager """ instance_group_managers = [] with self.scoped_session as session: for instance_group_manager in self.data_access.scanner_iter( session, 'instancegroupmanager', parent_type_name=parent_type_name): instance_group_managers.append( instance_group_manager_type.InstanceGroupManager.from_json( project_id=instance_group_manager.parent.name, json_string=instance_group_manager.data)) return instance_group_managers def _get_instance_templates(self, parent_type_name): """Retrieves instance templates. Args: parent_type_name (str): The parent resource type and name to pull. Returns: list: InstanceTemplate """ instance_templates = [] with self.scoped_session as session: for instance_template in self.data_access.scanner_iter( session, 'instancetemplate', parent_type_name=parent_type_name): instance_templates.append( instance_template_type.InstanceTemplate.from_json( project_id=instance_template.parent.name, json_string=instance_template.data)) return instance_templates def _retrieve(self): """Retrieves the data for the scanner. Yields: list: A list of IAP Resources for a project, to pass to the rules engine dict: A dict of resource counts for the project. """ projects = [] with self.scoped_session as session: for project in self.data_access.scanner_iter(session, 'project'): projects.append(project) for parent in projects: backend_services = self._get_backend_services(parent.type_name) firewall_rules = self._get_firewall_rules(parent.type_name) instances = self._get_instances(parent.type_name) instance_groups = self._get_instance_groups(parent.type_name) instance_group_managers = self._get_instance_group_managers( parent.type_name) instance_templates = self._get_instance_templates(parent.type_name) run_data = _RunData( backend_services=backend_services, firewall_rules=firewall_rules, instances=instances, instance_groups=instance_groups, instance_group_managers=instance_group_managers, instance_templates=instance_templates) iap_resources = [] for backend in backend_services: iap_resources.append( run_data.make_iap_resource(backend, parent.full_name)) yield iap_resources, run_data.resource_counts def _find_violations(self, iap_data): """Find IAP violations. Args: iap_data (iter): Generator of IAP resources and resource counts per project in the inventory. Returns: list: RuleViolation """ LOGGER.info('Finding IAP violations with %r...', self.rules_engine) ret = [] resource_counts = collections.defaultdict(int) for (iap_resources, project_resource_counts) in iap_data: for iap_resource in iap_resources: ret.extend(self.rules_engine.find_violations(iap_resource)) for key, value in project_resource_counts.items(): resource_counts[key] += value LOGGER.debug('find_violations returning %r', ret) return ret, dict(resource_counts) def run(self): """Runs the data collection.""" LOGGER.debug('In run') iap_data = self._retrieve() all_violations, _ = self._find_violations(iap_data) self._output_results(all_violations)
1
33,681
Please make the change in the CAI inventory, rather than changing the scanner.
forseti-security-forseti-security
py
@@ -6,7 +6,7 @@ package net.sourceforge.pmd.lang.jsp.ast; import net.sourceforge.pmd.lang.ast.AbstractNode; -public class AbstractJspNode extends AbstractNode implements JspNode { +public abstract class AbstractJspNode extends AbstractNode implements JspNode { protected JspParser parser;
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.jsp.ast; import net.sourceforge.pmd.lang.ast.AbstractNode; public class AbstractJspNode extends AbstractNode implements JspNode { protected JspParser parser; public AbstractJspNode(int id) { super(id); } public AbstractJspNode(JspParser parser, int id) { super(id); this.parser = parser; } public void jjtOpen() { if (beginLine == -1 && parser.token.next != null) { beginLine = parser.token.next.beginLine; beginColumn = parser.token.next.beginColumn; } } public void jjtClose() { if (beginLine == -1 && (children == null || children.length == 0)) { beginColumn = parser.token.beginColumn; } if (beginLine == -1) { beginLine = parser.token.beginLine; } endLine = parser.token.endLine; endColumn = parser.token.endColumn; } /** * Accept the visitor. * */ public Object jjtAccept(JspParserVisitor visitor, Object data) { return visitor.visit(this, data); } /** * Accept the visitor. * */ public Object childrenAccept(JspParserVisitor visitor, Object data) { if (children != null) { for (int i = 0; i < children.length; ++i) { ((JspNode) children[i]).jjtAccept(visitor, data); } } return data; } public String toString() { return JspParserTreeConstants.jjtNodeName[id]; } }
1
13,524
adding this `abstract` is a breaking API change. Even though I agree it makes perfect sense to have it, we can't break this API until PMD 7.0.0. We should revert this particular change. We could make a reminder ticket like we did with #463 for 6.0.0
pmd-pmd
java
@@ -16,6 +16,7 @@ describe IndividualPlan do it_behaves_like 'a Plan with countable subscriptions' it_behaves_like 'a Plan for public listing' + it_behaves_like 'Purchaseable plan' describe '.active' do it 'only includes active plans' do
1
require 'spec_helper' describe IndividualPlan do it { should have_many(:announcements) } it { should have_many(:purchases) } it { should have_many(:subscriptions) } it { should validate_presence_of(:description) } it { should validate_presence_of(:individual_price) } it { should validate_presence_of(:name) } it { should validate_presence_of(:short_description) } it { should validate_presence_of(:sku) } it { should_not be_fulfilled_with_github } it { should be_subscription } it_behaves_like 'a Plan with countable subscriptions' it_behaves_like 'a Plan for public listing' describe '.active' do it 'only includes active plans' do active = create(:plan, active: true) inactive = create(:plan, active: false) expect(IndividualPlan.active).to eq [active] end end describe '.default' do it 'returns the first, active, featured, ordered plan' do ordered = stub(first: stub()) featured = stub(ordered: ordered) active = stub(featured: featured) IndividualPlan.stubs(active: active) IndividualPlan.default expect(ordered).to have_received(:first) expect(featured).to have_received(:ordered) expect(active).to have_received(:featured) expect(IndividualPlan).to have_received(:active) end end describe '.basic' do it 'returns the basic plan' do basic_plan = create(:basic_plan) create(:plan) expect(IndividualPlan.basic).to eq basic_plan end end describe '#subscription_count' do it 'returns 0 when the plan has no subscriptions' do plan = create(:plan) expect(plan.subscription_count).to eq 0 end it 'returns 1 when the plan has a single active subscription that is paid' do plan = create(:plan) create(:active_subscription, plan: plan, paid: true) expect(plan.subscription_count).to eq 1 end it 'returns 0 when the plan has an active subscription that is unpaid' do plan = create(:plan) create(:active_subscription, plan: plan, paid: false) expect(plan.subscription_count).to eq 0 end it 'returns 0 when the plan has only an inactive subscription' do plan = create(:plan) create_inactive_subscription_for(plan) expect(plan.subscription_count).to eq 0 end end describe 'purchase_for' do it 'returns the purchase when a user has purchased the plan' do create_mentors user = create(:user, :with_github) purchase = create(:plan_purchase, user: user) plan = purchase.purchaseable expect(plan.purchase_for(user)).to eq purchase end it 'returns nil when a user has not purchased the plan' do create_mentors user = create(:user) purchase = create(:plan_purchase) plan = purchase.purchaseable expect(plan.purchase_for(user)).to be_nil end end describe 'starts_on' do it 'returns the given date' do plan = create(:plan) expect(plan.starts_on(Time.zone.today)).to eq Time.zone.today end end describe 'ends_on' do it 'returns the given date' do plan = create(:plan) expect(plan.ends_on(Time.zone.today)).to eq Time.zone.today end end describe 'subscription_interval' do it 'returns the interval from the stripe plan' do plan = build_stubbed(:plan) stripe_plan = stub(interval: 'year') Stripe::Plan.stubs(:retrieve).returns(stripe_plan) expect(plan.subscription_interval).to eq 'year' expect(Stripe::Plan).to have_received(:retrieve).with(plan.sku) end end describe 'offering_type' do it 'returns subscription' do plan = build_stubbed(:plan) result = plan.offering_type expect(result).to eq 'subscription' end end describe 'fulfillment_method' do it 'returns subscription' do plan = build_stubbed(:plan) result = plan.fulfillment_method expect(result).to eq 'subscription' end end describe '#alternates' do it 'is empty' do plan = IndividualPlan.new result = plan.alternates expect(result).to eq [] end end describe '#announcement' do it 'calls Announcement.current' do Announcement.stubs :current plan = create(:plan) plan.announcement expect(Announcement).to have_received(:current) end end describe '#projected_monthly_revenue' do it 'returns 0 when there are no subscribers' do plan = create(:individual_plan) expect(plan.projected_monthly_revenue).to eq 0 end it 'returns the subscriber count times the individual price for a Plan' do plan = create(:individual_plan) create(:subscription, plan: plan) create(:subscription, plan: plan) expected_revenue = plan.individual_price * 2 expect(plan.projected_monthly_revenue).to eq expected_revenue end end describe '#fulfill' do it 'starts a subscription' do user = build_stubbed(:user) purchase = build_stubbed(:purchase, user: user) plan = build_stubbed(:individual_plan) fulfillment = stub_subscription_fulfillment(purchase) plan.fulfill(purchase, user) expect(fulfillment).to have_received(:fulfill) end end def create_inactive_subscription_for(plan) create(:inactive_subscription, plan: plan) end def create_active_subscription_for(plan) create(:subscription, plan: plan) end end
1
8,990
This is following the existing convention, but I'm not sure the convention is a good one. I think it would be better to have one shared example group: - It means we don't need to add new example groups twice every time. - If the shared example group looks large, that makes it more obvious that our team models have too much shared behavior. I don't want to do anything about this right now (I'm pretty sure I can wipe out this shared example group when I change team plans), but it's good to keep in mind.
thoughtbot-upcase
rb
@@ -304,14 +304,15 @@ size_t network_prefix) bool nano::server_socket::limit_reached_for_incoming_subnetwork_connections (std::shared_ptr<nano::socket> const & new_connection) { debug_assert (strand.running_in_this_thread ()); - if (node.flags.disable_max_peers_per_subnetwork) + if (node.flags.disable_max_peers_per_subnetwork || nano::transport::is_ipv4_or_v4_mapped_address (new_connection->remote.address ())) { // If the limit is disabled, then it is unreachable. + // If the address is IPv4 we don't check for a network limit, since its address space isn't big as IPv6 /64. return false; } auto const counted_connections = socket_functions::count_subnetwork_connections ( connections_per_address, - nano::transport::mapped_from_v4_or_v6 (new_connection->remote.address ()), + new_connection->remote.address ().to_v6 (), node.network_params.network.ipv6_subnetwork_prefix_for_limiting); return counted_connections >= node.network_params.network.max_peers_per_subnetwork; }
1
#include <nano/boost/asio/bind_executor.hpp> #include <nano/boost/asio/dispatch.hpp> #include <nano/boost/asio/ip/address.hpp> #include <nano/boost/asio/ip/address_v6.hpp> #include <nano/boost/asio/ip/network_v6.hpp> #include <nano/boost/asio/read.hpp> #include <nano/node/node.hpp> #include <nano/node/socket.hpp> #include <nano/node/transport/transport.hpp> #include <boost/format.hpp> #include <cstdint> #include <iterator> #include <limits> #include <memory> nano::socket::socket (nano::node & node_a) : strand{ node_a.io_ctx.get_executor () }, tcp_socket{ node_a.io_ctx }, node{ node_a }, next_deadline{ std::numeric_limits<uint64_t>::max () }, last_completion_time{ 0 }, last_receive_time{ 0 }, io_timeout{ node_a.config.tcp_io_timeout }, silent_connection_tolerance_time{ node_a.network_params.network.silent_connection_tolerance_time } { } nano::socket::~socket () { close_internal (); } void nano::socket::async_connect (nano::tcp_endpoint const & endpoint_a, std::function<void (boost::system::error_code const &)> callback_a) { checkup (); auto this_l (shared_from_this ()); start_timer (); this_l->tcp_socket.async_connect (endpoint_a, boost::asio::bind_executor (this_l->strand, [this_l, callback_a, endpoint_a] (boost::system::error_code const & ec) { this_l->stop_timer (); this_l->remote = endpoint_a; callback_a (ec); })); } void nano::socket::async_read (std::shared_ptr<std::vector<uint8_t>> const & buffer_a, std::size_t size_a, std::function<void (boost::system::error_code const &, std::size_t)> callback_a) { if (size_a <= buffer_a->size ()) { auto this_l (shared_from_this ()); if (!closed) { start_timer (); boost::asio::post (strand, boost::asio::bind_executor (strand, [buffer_a, callback_a, size_a, this_l] () { boost::asio::async_read (this_l->tcp_socket, boost::asio::buffer (buffer_a->data (), size_a), boost::asio::bind_executor (this_l->strand, [this_l, buffer_a, callback_a] (boost::system::error_code const & ec, std::size_t size_a) { this_l->node.stats.add (nano::stat::type::traffic_tcp, nano::stat::dir::in, size_a); this_l->stop_timer (); this_l->update_last_receive_time (); callback_a (ec, size_a); })); })); } } else { debug_assert (false && "nano::socket::async_read called with incorrect buffer size"); boost::system::error_code ec_buffer = boost::system::errc::make_error_code (boost::system::errc::no_buffer_space); callback_a (ec_buffer, 0); } } void nano::socket::async_write (nano::shared_const_buffer const & buffer_a, std::function<void (boost::system::error_code const &, std::size_t)> const & callback_a) { if (!closed) { ++queue_size; boost::asio::post (strand, boost::asio::bind_executor (strand, [buffer_a, callback_a, this_l = shared_from_this ()] () { if (!this_l->closed) { this_l->start_timer (); nano::async_write (this_l->tcp_socket, buffer_a, boost::asio::bind_executor (this_l->strand, [buffer_a, callback_a, this_l] (boost::system::error_code ec, std::size_t size_a) { --this_l->queue_size; this_l->node.stats.add (nano::stat::type::traffic_tcp, nano::stat::dir::out, size_a); this_l->stop_timer (); if (callback_a) { callback_a (ec, size_a); } })); } else { if (callback_a) { callback_a (boost::system::errc::make_error_code (boost::system::errc::not_supported), 0); } } })); } else if (callback_a) { node.background ([callback_a] () { callback_a (boost::system::errc::make_error_code (boost::system::errc::not_supported), 0); }); } } void nano::socket::start_timer () { start_timer (io_timeout); } void nano::socket::start_timer (std::chrono::seconds deadline_a) { next_deadline = deadline_a.count (); } void nano::socket::stop_timer () { last_completion_time = nano::seconds_since_epoch (); } void nano::socket::update_last_receive_time () { last_receive_time = nano::seconds_since_epoch (); } void nano::socket::checkup () { std::weak_ptr<nano::socket> this_w (shared_from_this ()); node.workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::seconds (2), [this_w] () { if (auto this_l = this_w.lock ()) { uint64_t now (nano::seconds_since_epoch ()); auto condition_to_disconnect{ false }; if (this_l->is_realtime_connection () && now - this_l->last_receive_time > this_l->silent_connection_tolerance_time.count ()) { this_l->node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_silent_connection_drop, nano::stat::dir::in); condition_to_disconnect = true; } if (this_l->next_deadline != std::numeric_limits<uint64_t>::max () && now - this_l->last_completion_time > this_l->next_deadline) { this_l->node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_io_timeout_drop, nano::stat::dir::in); condition_to_disconnect = true; } if (condition_to_disconnect) { if (this_l->node.config.logging.network_timeout_logging ()) { // The remote end may have closed the connection before this side timing out, in which case the remote address is no longer available. boost::system::error_code ec_remote_l; boost::asio::ip::tcp::endpoint remote_endpoint_l = this_l->tcp_socket.remote_endpoint (ec_remote_l); if (!ec_remote_l) { this_l->node.logger.try_log (boost::str (boost::format ("Disconnecting from %1% due to timeout") % remote_endpoint_l)); } } this_l->timed_out = true; this_l->close (); } else if (!this_l->closed) { this_l->checkup (); } } }); } bool nano::socket::has_timed_out () const { return timed_out; } void nano::socket::timeout_set (std::chrono::seconds io_timeout_a) { io_timeout = io_timeout_a; } void nano::socket::set_silent_connection_tolerance_time (std::chrono::seconds tolerance_time_a) { auto this_l (shared_from_this ()); boost::asio::dispatch (strand, boost::asio::bind_executor (strand, [this_l, tolerance_time_a] () { this_l->silent_connection_tolerance_time = tolerance_time_a; })); } void nano::socket::close () { auto this_l (shared_from_this ()); boost::asio::dispatch (strand, boost::asio::bind_executor (strand, [this_l] { this_l->close_internal (); })); } // This must be called from a strand or the destructor void nano::socket::close_internal () { if (!closed.exchange (true)) { io_timeout = std::chrono::seconds (0); boost::system::error_code ec; // Ignore error code for shutdown as it is best-effort tcp_socket.shutdown (boost::asio::ip::tcp::socket::shutdown_both, ec); tcp_socket.close (ec); if (ec) { node.logger.try_log ("Failed to close socket gracefully: ", ec.message ()); node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::error_socket_close); } } } nano::tcp_endpoint nano::socket::remote_endpoint () const { return remote; } nano::tcp_endpoint nano::socket::local_endpoint () const { return tcp_socket.local_endpoint (); } nano::server_socket::server_socket (nano::node & node_a, boost::asio::ip::tcp::endpoint local_a, std::size_t max_connections_a) : socket{ node_a }, acceptor{ node_a.io_ctx }, local{ local_a }, max_inbound_connections{ max_connections_a } { io_timeout = std::chrono::seconds::max (); } void nano::server_socket::start (boost::system::error_code & ec_a) { acceptor.open (local.protocol ()); acceptor.set_option (boost::asio::ip::tcp::acceptor::reuse_address (true)); acceptor.bind (local, ec_a); if (!ec_a) { acceptor.listen (boost::asio::socket_base::max_listen_connections, ec_a); } } void nano::server_socket::close () { auto this_l (std::static_pointer_cast<nano::server_socket> (shared_from_this ())); boost::asio::dispatch (strand, boost::asio::bind_executor (strand, [this_l] () { this_l->close_internal (); this_l->acceptor.close (); for (auto & address_connection_pair : this_l->connections_per_address) { if (auto connection_l = address_connection_pair.second.lock ()) { connection_l->close (); } } this_l->connections_per_address.clear (); })); } boost::asio::ip::network_v6 nano::socket_functions::get_ipv6_subnet_address (boost::asio::ip::address_v6 const & ip_address, size_t network_prefix) { return boost::asio::ip::make_network_v6 (ip_address, network_prefix); } boost::asio::ip::address nano::socket_functions::first_ipv6_subnet_address (boost::asio::ip::address_v6 const & ip_address, size_t network_prefix) { auto range = get_ipv6_subnet_address (ip_address, network_prefix).hosts (); debug_assert (!range.empty ()); return *(range.begin ()); } boost::asio::ip::address nano::socket_functions::last_ipv6_subnet_address (boost::asio::ip::address_v6 const & ip_address, size_t network_prefix) { auto range = get_ipv6_subnet_address (ip_address, network_prefix).hosts (); debug_assert (!range.empty ()); return *(--range.end ()); } size_t nano::socket_functions::count_subnetwork_connections ( nano::address_socket_mmap const & per_address_connections, boost::asio::ip::address_v6 const & remote_address, size_t network_prefix) { auto range = get_ipv6_subnet_address (remote_address, network_prefix).hosts (); if (range.empty ()) { return 0; } auto const first_ip = first_ipv6_subnet_address (remote_address, network_prefix); auto const last_ip = last_ipv6_subnet_address (remote_address, network_prefix); auto const counted_connections = std::distance (per_address_connections.lower_bound (first_ip), per_address_connections.upper_bound (last_ip)); return counted_connections; } bool nano::server_socket::limit_reached_for_incoming_subnetwork_connections (std::shared_ptr<nano::socket> const & new_connection) { debug_assert (strand.running_in_this_thread ()); if (node.flags.disable_max_peers_per_subnetwork) { // If the limit is disabled, then it is unreachable. return false; } auto const counted_connections = socket_functions::count_subnetwork_connections ( connections_per_address, nano::transport::mapped_from_v4_or_v6 (new_connection->remote.address ()), node.network_params.network.ipv6_subnetwork_prefix_for_limiting); return counted_connections >= node.network_params.network.max_peers_per_subnetwork; } bool nano::server_socket::limit_reached_for_incoming_ip_connections (std::shared_ptr<nano::socket> const & new_connection) { debug_assert (strand.running_in_this_thread ()); if (node.flags.disable_max_peers_per_ip) { // If the limit is disabled, then it is unreachable. return false; } auto const address_connections_range = connections_per_address.equal_range (new_connection->remote.address ()); auto const counted_connections = std::distance (address_connections_range.first, address_connections_range.second); return counted_connections >= node.network_params.network.max_peers_per_ip; } void nano::server_socket::on_connection (std::function<bool (std::shared_ptr<nano::socket> const &, boost::system::error_code const &)> callback_a) { auto this_l (std::static_pointer_cast<nano::server_socket> (shared_from_this ())); boost::asio::post (strand, boost::asio::bind_executor (strand, [this_l, callback_a] () { if (!this_l->acceptor.is_open ()) { this_l->node.logger.always_log ("Network: Acceptor is not open"); return; } // Prepare new connection auto new_connection = std::make_shared<nano::socket> (this_l->node); this_l->acceptor.async_accept (new_connection->tcp_socket, new_connection->remote, boost::asio::bind_executor (this_l->strand, [this_l, new_connection, callback_a] (boost::system::error_code const & ec_a) { this_l->evict_dead_connections (); if (this_l->connections_per_address.size () >= this_l->max_inbound_connections) { this_l->node.logger.try_log ("Network: max_inbound_connections reached, unable to open new connection"); this_l->node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_accept_failure, nano::stat::dir::in); this_l->on_connection_requeue_delayed (callback_a); return; } if (this_l->limit_reached_for_incoming_ip_connections (new_connection)) { auto const remote_ip_address = new_connection->remote_endpoint ().address (); auto const log_message = boost::str ( boost::format ("Network: max connections per IP (max_peers_per_ip) was reached for %1%, unable to open new connection") % remote_ip_address.to_string ()); this_l->node.logger.try_log (log_message); this_l->node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::in); this_l->on_connection_requeue_delayed (callback_a); return; } if (this_l->limit_reached_for_incoming_subnetwork_connections (new_connection)) { auto const remote_ip_address = new_connection->remote_endpoint ().address (); debug_assert (remote_ip_address.is_v6 ()); auto const remote_subnet = socket_functions::get_ipv6_subnet_address (remote_ip_address.to_v6 (), this_l->node.network_params.network.max_peers_per_subnetwork); auto const log_message = boost::str ( boost::format ("Network: max connections per subnetwork (max_peers_per_subnetwork) was reached for subnetwork %1% (remote IP: %2%), unable to open new connection") % remote_subnet.canonical ().to_string () % remote_ip_address.to_string ()); this_l->node.logger.try_log (log_message); this_l->node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_subnetwork, nano::stat::dir::in); this_l->on_connection_requeue_delayed (callback_a); return; } if (!ec_a) { // Make sure the new connection doesn't idle. Note that in most cases, the callback is going to start // an IO operation immediately, which will start a timer. new_connection->checkup (); new_connection->start_timer (this_l->node.network_params.network.is_dev_network () ? std::chrono::seconds (2) : this_l->node.network_params.network.idle_timeout); this_l->node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_accept_success, nano::stat::dir::in); this_l->connections_per_address.emplace (new_connection->remote.address (), new_connection); if (callback_a (new_connection, ec_a)) { this_l->on_connection (callback_a); return; } this_l->node.logger.always_log ("Network: Stopping to accept connections"); return; } // accept error this_l->node.logger.try_log ("Network: Unable to accept connection: ", ec_a.message ()); this_l->node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_accept_failure, nano::stat::dir::in); if (this_l->is_temporary_error (ec_a)) { // if it is a temporary error, just retry it this_l->on_connection_requeue_delayed (callback_a); return; } // if it is not a temporary error, check how the listener wants to handle this error if (callback_a (new_connection, ec_a)) { this_l->on_connection_requeue_delayed (callback_a); return; } // No requeue if we reach here, no incoming socket connections will be handled this_l->node.logger.always_log ("Network: Stopping to accept connections"); })); })); } // If we are unable to accept a socket, for any reason, we wait just a little (1ms) before rescheduling the next connection accept. // The intention is to throttle back the connection requests and break up any busy loops that could possibly form and // give the rest of the system a chance to recover. void nano::server_socket::on_connection_requeue_delayed (std::function<bool (std::shared_ptr<nano::socket> const &, boost::system::error_code const &)> callback_a) { auto this_l (std::static_pointer_cast<nano::server_socket> (shared_from_this ())); node.workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (1), [this_l, callback_a] () { this_l->on_connection (callback_a); }); } bool nano::server_socket::is_temporary_error (boost::system::error_code const ec_a) { switch (ec_a.value ()) { #if EAGAIN != EWOULDBLOCK case EAGAIN: #endif case EWOULDBLOCK: case EINTR: return true; default: return false; } } // This must be called from a strand void nano::server_socket::evict_dead_connections () { debug_assert (strand.running_in_this_thread ()); for (auto it = connections_per_address.begin (); it != connections_per_address.end ();) { if (it->second.expired ()) { it = connections_per_address.erase (it); continue; } ++it; } }
1
16,982
I do not think we need to do a source code change to handle this. We could set the subnetwork to default to /32 (/128 for ipv6 ipv4-mapped)
nanocurrency-nano-node
cpp
@@ -41,7 +41,7 @@ gboolean ot_remote_builtin_delete_cookie (int argc, char **argv, GCancellable *cancellable, GError **error) { g_autoptr(OstreeRepo) repo = NULL; - g_autoptr(GOptionContext) context = g_option_context_new ("NAME DOMAIN PATH COOKIE_NAME- Remote one cookie from remote"); + g_autoptr(GOptionContext) context = g_option_context_new ("NAME DOMAIN PATH COOKIE_NAME- Remove one cookie from remote"); if (!ostree_option_context_parse (context, option_entries, &argc, &argv, OSTREE_BUILTIN_FLAG_NONE, &repo, cancellable, error))
1
/* * Copyright (C) 2015 Red Hat, Inc. * Copyright (C) 2016 Sjoerd Simons <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #include "config.h" #include "otutil.h" #include <sys/stat.h> #include "ot-main.h" #include "ot-remote-builtins.h" #include "ostree-repo-private.h" #include "ot-remote-cookie-util.h" /* ATTENTION: * Please remember to update the bash-completion script (bash/ostree) and * man page (man/ostree-remote.xml) when changing the option list. */ static GOptionEntry option_entries[] = { { NULL } }; gboolean ot_remote_builtin_delete_cookie (int argc, char **argv, GCancellable *cancellable, GError **error) { g_autoptr(OstreeRepo) repo = NULL; g_autoptr(GOptionContext) context = g_option_context_new ("NAME DOMAIN PATH COOKIE_NAME- Remote one cookie from remote"); if (!ostree_option_context_parse (context, option_entries, &argc, &argv, OSTREE_BUILTIN_FLAG_NONE, &repo, cancellable, error)) return FALSE; if (argc < 5) { ot_util_usage_error (context, "NAME, DOMAIN, PATH and COOKIE_NAME must be specified", error); return FALSE; } const char *remote_name = argv[1]; const char *domain = argv[2]; const char *path = argv[3]; const char *cookie_name = argv[4]; g_autofree char *cookie_file = g_strdup_printf ("%s.cookies.txt", remote_name); if (!ot_delete_cookie_at (ostree_repo_get_dfd (repo), cookie_file, domain, path, cookie_name, error)) return FALSE; return TRUE; }
1
12,856
It looks good overall! The only issue I have is that the description string is now duplicated twice right? Once in the struct and once in the parameter string? Maybe let's pass the struct to the command so that `ostree_option_context_parse` can set it as the summary? Similar to what we do in rpm-ostree.
ostreedev-ostree
c
@@ -29,7 +29,7 @@ import org.openqa.selenium.remote.service.DriverService; * * @see <a href="https://chromium.googlesource.com/chromium/src/+/master/chrome/test/chromedriver/client/command_executor.py">List of ChromeWebdriver commands</a> */ -class ChromeDriverCommandExecutor extends DriverCommandExecutor { +public class ChromeDriverCommandExecutor extends DriverCommandExecutor { private static final ImmutableMap<String, CommandInfo> CHROME_COMMAND_NAME_TO_URL = ImmutableMap.of( ChromeDriverCommand.LAUNCH_APP,
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.chrome; import com.google.common.collect.ImmutableMap; import org.openqa.selenium.remote.CommandInfo; import org.openqa.selenium.remote.http.HttpMethod; import org.openqa.selenium.remote.service.DriverCommandExecutor; import org.openqa.selenium.remote.service.DriverService; /** * {@link DriverCommandExecutor} that understands ChromeDriver specific commands. * * @see <a href="https://chromium.googlesource.com/chromium/src/+/master/chrome/test/chromedriver/client/command_executor.py">List of ChromeWebdriver commands</a> */ class ChromeDriverCommandExecutor extends DriverCommandExecutor { private static final ImmutableMap<String, CommandInfo> CHROME_COMMAND_NAME_TO_URL = ImmutableMap.of( ChromeDriverCommand.LAUNCH_APP, new CommandInfo("/session/:sessionId/chromium/launch_app", HttpMethod.POST), ChromeDriverCommand.GET_NETWORK_CONDITIONS, new CommandInfo("/session/:sessionId/chromium/network_conditions", HttpMethod.GET), ChromeDriverCommand.SET_NETWORK_CONDITIONS, new CommandInfo("/session/:sessionId/chromium/network_conditions", HttpMethod.POST), ChromeDriverCommand.DELETE_NETWORK_CONDITIONS, new CommandInfo("/session/:sessionId/chromium/network_conditions", HttpMethod.DELETE), ChromeDriverCommand.EXECUTE_CDP_COMMAND, new CommandInfo("/session/:sessionId/goog/cdp/execute", HttpMethod.POST)); public ChromeDriverCommandExecutor(DriverService service) { super(service, CHROME_COMMAND_NAME_TO_URL); } }
1
16,391
This states very clearly that this is a derivative of Chrome and not Chromium. Do we need to extract an abstract `ChromiumCommandExecutor` and have both Edge and Chrome derive from that?
SeleniumHQ-selenium
js
@@ -84,7 +84,9 @@ public class CompareObjectsWithEqualsRule extends AbstractJavaRule { ASTReferenceType type1 = ((Node) nd1.getAccessNodeParent()) .getFirstDescendantOfType(ASTReferenceType.class); // skip, if it is an enum - if (type0.getType() != null && type0.getType().equals(type1.getType()) && type0.getType().isEnum()) { + if (type0.getType() != null && type0.getType().equals(type1.getType()) + // It may be a custom enum class or an explicit Enum class usage + && (type0.getType().isEnum() || type0.getType() == java.lang.Enum.class)) { return data; }
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.rule.errorprone; import net.sourceforge.pmd.lang.ast.Node; import net.sourceforge.pmd.lang.java.ast.ASTAllocationExpression; import net.sourceforge.pmd.lang.java.ast.ASTEqualityExpression; import net.sourceforge.pmd.lang.java.ast.ASTInitializer; import net.sourceforge.pmd.lang.java.ast.ASTName; import net.sourceforge.pmd.lang.java.ast.ASTPrimaryPrefix; import net.sourceforge.pmd.lang.java.ast.ASTPrimarySuffix; import net.sourceforge.pmd.lang.java.ast.ASTReferenceType; import net.sourceforge.pmd.lang.java.rule.AbstractJavaRule; import net.sourceforge.pmd.lang.java.symboltable.VariableNameDeclaration; public class CompareObjectsWithEqualsRule extends AbstractJavaRule { private boolean hasName(Node n) { return n.jjtGetNumChildren() > 0 && n.jjtGetChild(0) instanceof ASTName; } /** * Indicate whether this node is allocating a new object. * * @param n * node that might be allocating a new object * @return true if child 0 is an AllocationExpression */ private boolean isAllocation(Node n) { return n.jjtGetNumChildren() > 0 && n.jjtGetChild(0) instanceof ASTAllocationExpression && n.jjtGetParent().jjtGetNumChildren() == 1; } public Object visit(ASTEqualityExpression node, Object data) { Node c0 = node.jjtGetChild(0).jjtGetChild(0); Node c1 = node.jjtGetChild(1).jjtGetChild(0); // If either side is allocating a new object, there's no way an // equals expression is correct if (isAllocation(c0) || isAllocation(c1)) { addViolation(data, node); return data; } // skip if either child is not a simple name if (!hasName(c0) || !hasName(c1)) { return data; } // skip if either is a qualified name if (isQualifiedName(c0.jjtGetChild(0)) || isQualifiedName(c1.jjtGetChild(0))) { return data; } // skip if either is part of a qualified name if (isPartOfQualifiedName(node.jjtGetChild(0)) || isPartOfQualifiedName(node.jjtGetChild(1))) { return data; } // skip static initializers... missing some cases here if (!node.getParentsOfType(ASTInitializer.class).isEmpty()) { return data; } ASTName n0 = (ASTName) c0.jjtGetChild(0); ASTName n1 = (ASTName) c1.jjtGetChild(0); if (n0.getNameDeclaration() instanceof VariableNameDeclaration && n1.getNameDeclaration() instanceof VariableNameDeclaration) { VariableNameDeclaration nd0 = (VariableNameDeclaration) n0.getNameDeclaration(); VariableNameDeclaration nd1 = (VariableNameDeclaration) n1.getNameDeclaration(); // skip array dereferences... this misses some cases // FIXME catch comparisons btwn array elements of reference types if (nd0.isArray() || nd1.isArray()) { return data; } if (nd0.isReferenceType() && nd1.isReferenceType()) { ASTReferenceType type0 = ((Node) nd0.getAccessNodeParent()) .getFirstDescendantOfType(ASTReferenceType.class); ASTReferenceType type1 = ((Node) nd1.getAccessNodeParent()) .getFirstDescendantOfType(ASTReferenceType.class); // skip, if it is an enum if (type0.getType() != null && type0.getType().equals(type1.getType()) && type0.getType().isEnum()) { return data; } addViolation(data, node); } } return data; } /** * Checks whether the given node contains a qualified name, consisting of * one ASTPrimaryPrefix and one or more ASTPrimarySuffix nodes. * * @param node * the node * @return <code>true</code> if it is a qualified name */ private boolean isPartOfQualifiedName(Node node) { return node.jjtGetChild(0) instanceof ASTPrimaryPrefix && !node.findChildrenOfType(ASTPrimarySuffix.class).isEmpty(); } }
1
13,530
it seems to me, the issue lies on `isEnum()` itself, returning false for something that is an enum. I'd rather change it there than here.
pmd-pmd
java
@@ -49,6 +49,15 @@ class QuteSchemeHandler(schemehandler.SchemeHandler): """Scheme handler for qute: URLs.""" + handlers = dict() + + @classmethod + def addHandler(cls, name): + """Add a handler to the qute: sheme.""" + def namedecorator(function): + cls.handlers[name] = function + return namedecorator + def createRequest(self, _op, request, _outgoing_data): """Create a new request.
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. # # pylint complains when using .render() on jinja templates, so we make it shut # up for this whole module. # pylint: disable=no-member # https://bitbucket.org/logilab/pylint/issue/490/ """Handler functions for different qute:... pages. Module attributes: pyeval_output: The output of the last :pyeval command. """ import functools import configparser from PyQt5.QtCore import pyqtSlot, QObject from PyQt5.QtNetwork import QNetworkReply import qutebrowser from qutebrowser.browser.network import schemehandler, networkreply from qutebrowser.utils import (version, utils, jinja, log, message, docutils, objreg) from qutebrowser.config import configexc, configdata pyeval_output = ":pyeval was never called" class QuteSchemeHandler(schemehandler.SchemeHandler): """Scheme handler for qute: URLs.""" def createRequest(self, _op, request, _outgoing_data): """Create a new request. Args: request: const QNetworkRequest & req _op: Operation op _outgoing_data: QIODevice * outgoingData Return: A QNetworkReply. """ path = request.url().path() host = request.url().host() # An url like "qute:foo" is split as "scheme:path", not "scheme:host". log.misc.debug("url: {}, path: {}, host {}".format( request.url().toDisplayString(), path, host)) try: handler = HANDLERS[path] except KeyError: try: handler = HANDLERS[host] except KeyError: errorstr = "No handler found for {}!".format( request.url().toDisplayString()) return networkreply.ErrorNetworkReply( request, errorstr, QNetworkReply.ContentNotFoundError, self.parent()) try: data = handler(self._win_id, request) except OSError as e: return networkreply.ErrorNetworkReply( request, str(e), QNetworkReply.ContentNotFoundError, self.parent()) return networkreply.FixedDataNetworkReply( request, data, 'text/html', self.parent()) class JSBridge(QObject): """Javascript-bridge for special qute:... pages.""" def __init__(self, parent=None): super().__init__(parent) @pyqtSlot(int, str, str, str) def set(self, win_id, sectname, optname, value): """Slot to set a setting from qute:settings.""" # https://github.com/The-Compiler/qutebrowser/issues/727 if ((sectname, optname) == ('content', 'allow-javascript') and value == 'false'): message.error(win_id, "Refusing to disable javascript via " "qute:settings as it needs javascript support.") return try: objreg.get('config').set('conf', sectname, optname, value) except (configexc.Error, configparser.Error) as e: message.error(win_id, e) def qute_pyeval(_win_id, _request): """Handler for qute:pyeval. Return HTML content as bytes.""" html = jinja.env.get_template('pre.html').render( title='pyeval', content=pyeval_output) return html.encode('UTF-8', errors='xmlcharrefreplace') def qute_version(_win_id, _request): """Handler for qute:version. Return HTML content as bytes.""" html = jinja.env.get_template('version.html').render( title='Version info', version=version.version(), copyright=qutebrowser.__copyright__) return html.encode('UTF-8', errors='xmlcharrefreplace') def qute_plainlog(_win_id, _request): """Handler for qute:plainlog. Return HTML content as bytes.""" if log.ram_handler is None: text = "Log output was disabled." else: text = log.ram_handler.dump_log() html = jinja.env.get_template('pre.html').render(title='log', content=text) return html.encode('UTF-8', errors='xmlcharrefreplace') def qute_log(_win_id, _request): """Handler for qute:log. Return HTML content as bytes.""" if log.ram_handler is None: html_log = None else: html_log = log.ram_handler.dump_log(html=True) html = jinja.env.get_template('log.html').render( title='log', content=html_log) return html.encode('UTF-8', errors='xmlcharrefreplace') def qute_gpl(_win_id, _request): """Handler for qute:gpl. Return HTML content as bytes.""" return utils.read_file('html/COPYING.html').encode('ASCII') def qute_help(win_id, request): """Handler for qute:help. Return HTML content as bytes.""" try: utils.read_file('html/doc/index.html') except OSError: html = jinja.env.get_template('error.html').render( title="Error while loading documentation", url=request.url().toDisplayString(), error="This most likely means the documentation was not generated " "properly. If you are running qutebrowser from the git " "repository, please run scripts/asciidoc2html.py. " "If you're running a released version this is a bug, please " "use :report to report it.", icon='') return html.encode('UTF-8', errors='xmlcharrefreplace') urlpath = request.url().path() if not urlpath or urlpath == '/': urlpath = 'index.html' else: urlpath = urlpath.lstrip('/') if not docutils.docs_up_to_date(urlpath): message.error(win_id, "Your documentation is outdated! Please re-run " "scripts/asciidoc2html.py.") path = 'html/doc/{}'.format(urlpath) return utils.read_file(path).encode('UTF-8', errors='xmlcharrefreplace') def qute_settings(win_id, _request): """Handler for qute:settings. View/change qute configuration.""" config_getter = functools.partial(objreg.get('config').get, raw=True) html = jinja.env.get_template('settings.html').render( win_id=win_id, title='settings', config=configdata, confget=config_getter) return html.encode('UTF-8', errors='xmlcharrefreplace') HANDLERS = { 'pyeval': qute_pyeval, 'version': qute_version, 'plainlog': qute_plainlog, 'log': qute_log, 'gpl': qute_gpl, 'help': qute_help, 'settings': qute_settings, }
1
13,859
Please use `{}` instead of `dict()`
qutebrowser-qutebrowser
py
@@ -20,11 +20,12 @@ * External dependencies */ import PropTypes from 'prop-types'; +import { useInView } from 'react-intersection-observer'; /** * WordPress dependencies */ -import { useCallback } from '@wordpress/element'; +import { useCallback, useEffect } from '@wordpress/element'; import { __ } from '@wordpress/i18n'; /**
1
/** * DashboardCTA component. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import PropTypes from 'prop-types'; /** * WordPress dependencies */ import { useCallback } from '@wordpress/element'; import { __ } from '@wordpress/i18n'; /** * Internal dependencies */ import Data from 'googlesitekit-data'; import { CORE_SITE } from '../../../../googlesitekit/datastore/site/constants'; import { CORE_MODULES } from '../../../../googlesitekit/modules/datastore/constants'; import { CORE_LOCATION } from '../../../../googlesitekit/datastore/location/constants'; import { CORE_USER } from '../../../../googlesitekit/datastore/user/constants'; import Button from '../../../../components/Button'; import Link from '../../../../components/Link'; import IdeaHubIcon from '../../../../../svg/idea-hub.svg'; import BulbIcon from '../../../../../svg/bulb.svg'; import CloseIcon from '../../../../../svg/close.svg'; const { useSelect, useDispatch } = Data; const DISMISS_ITEM_IDEA_HUB_CTA = 'idea-hub-cta'; function DashboardCTA( { Widget, WidgetNull } ) { const { connected, active } = useSelect( ( select ) => select( CORE_MODULES ).getModule( 'idea-hub' ) ); const dismissed = useSelect( ( select ) => select( CORE_USER ).isItemDismissed( DISMISS_ITEM_IDEA_HUB_CTA ) ); const { activateModule } = useDispatch( CORE_MODULES ); const { navigateTo } = useDispatch( CORE_LOCATION ); const { setInternalServerError } = useDispatch( CORE_SITE ); const { dismissItem } = useDispatch( CORE_USER ); const onClick = useCallback( async () => { const { error, response } = await activateModule( 'idea-hub' ); if ( ! error ) { navigateTo( response.moduleReauthURL ); } else { setInternalServerError( { id: 'idea-hub-setup-error', description: error.message, } ); } }, [ activateModule, navigateTo, setInternalServerError ] ); const onDismiss = useCallback( async () => { await dismissItem( DISMISS_ITEM_IDEA_HUB_CTA ); }, [ dismissItem ] ); // Don't render this component if it has been dismissed or dismissed items aren't loaded yet. if ( dismissed || dismissed === undefined ) { return <WidgetNull />; } return ( <Widget> <div className="googlesitekit-idea-hub__dashboard-cta"> <div className="googlesitekit-idea-hub__dashboard-cta__icon"> <IdeaHubIcon height="144" width="144" /> </div> <div className="googlesitekit-idea-hub__dashboard-cta__content"> <h5> { __( 'Get new topics based on what people are searching for with Idea Hub', 'google-site-kit' ) } </h5> <p className="googlesitekit-idea-hub__dashboard-cta__learnmore-copy"> <BulbIcon width="16" height="16" /> &nbsp; <Link className="googlesitekit-idea-hub__dashboard-cta__learnmore" href="https://sitekit.withgoogle.com/documentation/idea-hub-module/" external inherit hideExternalIndicator > { __( 'Learn more', 'google-site-kit' ) } </Link> </p> <Button onClick={ onClick }> { active && ! connected ? __( 'Complete set up', 'google-site-kit' ) : __( 'Set up', 'google-site-kit' ) } </Button> </div> <Button className="googlesitekit-idea-hub__dashboard-cta__close-button" icon={ <CloseIcon width="14" height="14" /> } text onClick={ onDismiss } /> </div> </Widget> ); } DashboardCTA.propTypes = { Widget: PropTypes.func.isRequired, }; export default DashboardCTA;
1
40,776
I have concerns about using this in more places before #3278 - I'll take a look at that again shortly.
google-site-kit-wp
js
@@ -37,7 +37,16 @@ func TestConvert(t *testing.T) { testLog := newTestLog() testLog.Topics = topics testLog.NotFixTopicCopyBug = true - receipt := &Receipt{1, 1, hash.ZeroHash256, 1, "test", []*Log{testLog}, nil, "balance not enough"} + receipt := &Receipt{ + Status: 1, + BlockHeight: 1, + ActionHash: hash.ZeroHash256, + GasConsumed: 1, + ContractAddress: "test", + TxIndex: 1, + logs: []*Log{testLog}, + executionRevertMsg: "balance not enough", + } typeReceipt := receipt.ConvertToReceiptPb() require.NotNil(typeReceipt)
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "encoding/hex" "testing" "github.com/stretchr/testify/require" "github.com/iotexproject/go-pkgs/hash" ) func newTestLog() *Log { return &Log{ Address: "1", Data: []byte("cd07d8a74179e032f030d9244"), BlockHeight: 1, ActionHash: hash.ZeroHash256, Index: 1, } } func TestConvert(t *testing.T) { require := require.New(t) topics := []hash.Hash256{ hash.Hash256b([]byte("test")), hash.Hash256b([]byte("Pacific")), hash.Hash256b([]byte("Aleutian")), } testLog := newTestLog() testLog.Topics = topics testLog.NotFixTopicCopyBug = true receipt := &Receipt{1, 1, hash.ZeroHash256, 1, "test", []*Log{testLog}, nil, "balance not enough"} typeReceipt := receipt.ConvertToReceiptPb() require.NotNil(typeReceipt) receipt2 := &Receipt{} receipt2.ConvertFromReceiptPb(typeReceipt) require.Equal(receipt.Status, receipt2.Status) require.Equal(receipt.BlockHeight, receipt2.BlockHeight) require.Equal(receipt.ActionHash, receipt2.ActionHash) require.Equal(receipt.GasConsumed, receipt2.GasConsumed) require.Equal(receipt.ContractAddress, receipt2.ContractAddress) require.Equal(receipt.executionRevertMsg, receipt2.executionRevertMsg) // block earlier than AleutianHeight overwrites all topics with last topic data require.NotEqual(testLog, receipt2.logs[0]) h := receipt.Hash() testLog.NotFixTopicCopyBug = false typeReceipt = receipt.ConvertToReceiptPb() require.NotNil(typeReceipt) receipt2 = &Receipt{} receipt2.ConvertFromReceiptPb(typeReceipt) require.Equal(receipt, receipt2) require.NotEqual(h, receipt.Hash()) } func TestSerDer(t *testing.T) { require := require.New(t) receipt := &Receipt{1, 1, hash.ZeroHash256, 1, "", nil, nil, ""} ser, err := receipt.Serialize() require.NoError(err) receipt2 := &Receipt{} receipt2.Deserialize(ser) require.Equal(receipt.Status, receipt2.Status) require.Equal(receipt.BlockHeight, receipt2.BlockHeight) require.Equal(receipt.ActionHash, receipt2.ActionHash) require.Equal(receipt.GasConsumed, receipt2.GasConsumed) require.Equal(receipt.ContractAddress, receipt2.ContractAddress) hash := receipt.Hash() oldHash := "9b1d77d8b8902e8d4e662e7cd07d8a74179e032f030d92441ca7fba1ca68e0f4" require.Equal(oldHash, hex.EncodeToString(hash[:])) // starting HawaiiHeight execution revert message is added to receipt receipt = receipt.SetExecutionRevertMsg("test") hash2 := receipt.Hash() require.NotEqual(oldHash, hex.EncodeToString(hash2[:])) } func TestConvertLog(t *testing.T) { require := require.New(t) topics := []hash.Hash256{ hash.ZeroHash256, hash.Hash256b([]byte("Pacific")), hash.Hash256b([]byte("Aleutian")), } testLog := newTestLog() testLog.Topics = topics testLog.NotFixTopicCopyBug = true typeLog := testLog.ConvertToLogPb() require.NotNil(typeLog) log2 := &Log{} log2.ConvertFromLogPb(typeLog) require.Equal(testLog.Address, log2.Address) require.Equal(testLog.Data, log2.Data) require.Equal(testLog.BlockHeight, log2.BlockHeight) require.Equal(testLog.ActionHash, log2.ActionHash) require.Equal(testLog.Index, log2.Index) // block earlier than AleutianHeight overwrites all topics with last topic data last := len(log2.Topics) - 1 for _, v := range log2.Topics[:last] { require.Equal(topics[last], v) } testLog.NotFixTopicCopyBug = false typeLog = testLog.ConvertToLogPb() require.NotNil(typeLog) log2 = &Log{} log2.ConvertFromLogPb(typeLog) require.Equal(testLog, log2) } func TestSerDerLog(t *testing.T) { require := require.New(t) topics := []hash.Hash256{ hash.ZeroHash256, hash.Hash256b([]byte("Pacific")), hash.Hash256b([]byte("Aleutian")), } testLog := newTestLog() testLog.Topics = topics testLog.NotFixTopicCopyBug = true typeLog, err := testLog.Serialize() require.NoError(err) log2 := &Log{} log2.Deserialize(typeLog) require.Equal(testLog.Address, log2.Address) require.Equal(testLog.Data, log2.Data) require.Equal(testLog.BlockHeight, log2.BlockHeight) require.Equal(testLog.ActionHash, log2.ActionHash) require.Equal(testLog.Index, log2.Index) // block earlier than AleutianHeight overwrites all topics with last topic data last := len(log2.Topics) - 1 for _, v := range log2.Topics[:last] { require.Equal(topics[last], v) } testLog.NotFixTopicCopyBug = false typeLog, err = testLog.Serialize() require.NoError(err) log2 = &Log{} log2.Deserialize(typeLog) require.Equal(testLog, log2) }
1
24,263
new field needs to be asserted
iotexproject-iotex-core
go
@@ -190,6 +190,7 @@ void Host::appendLogsInternal(folly::EventBase* eb, { std::lock_guard<std::mutex> g(self->lock_); self->setResponse(r); + self->lastLogIdSent_ = self->logIdToSend_; } self->noMoreRequestCV_.notify_all(); return;
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include "kvstore/raftex/Host.h" #include "kvstore/raftex/RaftPart.h" #include "kvstore/wal/FileBasedWal.h" #include "network/NetworkUtils.h" #include <folly/io/async/EventBase.h> #include <folly/executors/IOThreadPoolExecutor.h> DEFINE_uint32(max_appendlog_batch_size, 128, "The max number of logs in each appendLog request batch"); DEFINE_uint32(max_outstanding_requests, 1024, "The max number of outstanding appendLog requests"); DEFINE_int32(raft_rpc_timeout_ms, 500, "rpc timeout for raft client"); namespace nebula { namespace raftex { using nebula::network::NetworkUtils; Host::Host(const HostAddr& addr, std::shared_ptr<RaftPart> part, bool isLearner) : part_(std::move(part)) , addr_(addr) , isLearner_(isLearner) , idStr_(folly::stringPrintf( "%s[Host: %s:%d] ", part_->idStr_.c_str(), NetworkUtils::intToIPv4(addr_.first).c_str(), addr_.second)) , cachingPromise_(folly::SharedPromise<cpp2::AppendLogResponse>()) { } void Host::waitForStop() { std::unique_lock<std::mutex> g(lock_); CHECK(stopped_); noMoreRequestCV_.wait(g, [this] { return !requestOnGoing_; }); LOG(INFO) << idStr_ << "The host has been stopped!"; } cpp2::ErrorCode Host::checkStatus() const { CHECK(!lock_.try_lock()); if (stopped_) { VLOG(2) << idStr_ << "The host is stopped, just return"; return cpp2::ErrorCode::E_HOST_STOPPED; } if (paused_) { VLOG(2) << idStr_ << "The host is paused, due to losing leadership"; return cpp2::ErrorCode::E_NOT_A_LEADER; } return cpp2::ErrorCode::SUCCEEDED; } folly::Future<cpp2::AskForVoteResponse> Host::askForVote( const cpp2::AskForVoteRequest& req) { { std::lock_guard<std::mutex> g(lock_); auto res = checkStatus(); if (res != cpp2::ErrorCode::SUCCEEDED) { VLOG(2) << idStr_ << "The Host is not in a proper status, do not send"; cpp2::AskForVoteResponse resp; resp.set_error_code(res); return resp; } } auto client = tcManager().client(addr_); return client->future_askForVote(req); } folly::Future<cpp2::AppendLogResponse> Host::appendLogs( folly::EventBase* eb, TermID term, LogID logId, LogID committedLogId, TermID prevLogTerm, LogID prevLogId) { VLOG(3) << idStr_ << "Entering Host::appendLogs()"; VLOG(2) << idStr_ << "Append logs to the host [term = " << term << ", logId = " << logId << ", committedLogId = " << committedLogId << ", lastLogTermSent = " << prevLogTerm << ", lastLogIdSent = " << prevLogId << "]"; auto ret = folly::Future<cpp2::AppendLogResponse>::makeEmpty(); std::shared_ptr<cpp2::AppendLogRequest> req; { std::lock_guard<std::mutex> g(lock_); auto res = checkStatus(); if (logId <= lastLogIdSent_) { LOG(INFO) << idStr_ << "The log " << logId << " has been sended" << ", lastLogIdSent " << lastLogIdSent_; cpp2::AppendLogResponse r; r.set_error_code(cpp2::ErrorCode::SUCCEEDED); return r; } if (requestOnGoing_ && res == cpp2::ErrorCode::SUCCEEDED) { if (cachingPromise_.size() <= FLAGS_max_outstanding_requests) { pendingReq_ = std::make_tuple(term, logId, committedLogId); return cachingPromise_.getFuture(); } else { PLOG_EVERY_N(INFO, 200) << idStr_ << "Too many requests are waiting, return error"; cpp2::AppendLogResponse r; r.set_error_code(cpp2::ErrorCode::E_TOO_MANY_REQUESTS); return r; } } if (res != cpp2::ErrorCode::SUCCEEDED) { VLOG(2) << idStr_ << "The host is not in a proper status, just return"; cpp2::AppendLogResponse r; r.set_error_code(res); return r; } VLOG(2) << idStr_ << "About to send the AppendLog request"; // No request is ongoing, let's send a new request if (UNLIKELY(lastLogIdSent_ == 0 && lastLogTermSent_ == 0)) { LOG(INFO) << idStr_ << "This is the first time to send the logs to this host"; lastLogIdSent_ = prevLogId; lastLogTermSent_ = prevLogTerm; } if (prevLogTerm < lastLogTermSent_ || prevLogId < lastLogIdSent_) { LOG(INFO) << idStr_ << "We have sended this log, so go on from id " << lastLogIdSent_ << ", term " << lastLogTermSent_ << "; current prev log id " << prevLogId << ", current prev log term " << prevLogTerm; } logTermToSend_ = term; logIdToSend_ = logId; committedLogId_ = committedLogId; pendingReq_ = std::make_tuple(0, 0, 0); promise_ = std::move(cachingPromise_); cachingPromise_ = folly::SharedPromise<cpp2::AppendLogResponse>(); ret = promise_.getFuture(); requestOnGoing_ = true; req = prepareAppendLogRequest(); } // Get a new promise appendLogsInternal(eb, std::move(req)); return ret; } void Host::setResponse(const cpp2::AppendLogResponse& r) { CHECK(!lock_.try_lock()); promise_.setValue(r); cachingPromise_.setValue(r); cachingPromise_ = folly::SharedPromise<cpp2::AppendLogResponse>(); pendingReq_ = std::make_tuple(0, 0, 0); requestOnGoing_ = false; } void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptr<cpp2::AppendLogRequest> req) { sendAppendLogRequest(eb, std::move(req)).via(eb).then( [eb, self = shared_from_this()] (folly::Try<cpp2::AppendLogResponse>&& t) { VLOG(3) << self->idStr_ << "appendLogs() call got response"; if (t.hasException()) { VLOG(2) << self->idStr_ << t.exception().what(); cpp2::AppendLogResponse r; r.set_error_code(cpp2::ErrorCode::E_EXCEPTION); { std::lock_guard<std::mutex> g(self->lock_); self->setResponse(r); } self->noMoreRequestCV_.notify_all(); return; } cpp2::AppendLogResponse resp = std::move(t).value(); VLOG(3) << self->idStr_ << "AppendLogResponse " << "code " << static_cast<int32_t>(resp.get_error_code()) << ", currTerm " << resp.get_current_term() << ", lastLogId " << resp.get_last_log_id() << ", lastLogTerm " << resp.get_last_log_term() << ", commitLogId " << resp.get_committed_log_id(); switch (resp.get_error_code()) { case cpp2::ErrorCode::SUCCEEDED: { VLOG(2) << self->idStr_ << "AppendLog request sent successfully"; std::shared_ptr<cpp2::AppendLogRequest> newReq; { std::lock_guard<std::mutex> g(self->lock_); auto res = self->checkStatus(); if (res != cpp2::ErrorCode::SUCCEEDED) { VLOG(2) << self->idStr_ << "The host is not in a proper status," " just return"; cpp2::AppendLogResponse r; r.set_error_code(res); self->setResponse(r); } else { self->lastLogIdSent_ = resp.get_last_log_id(); self->lastLogTermSent_ = resp.get_last_log_term(); if (self->lastLogIdSent_ < self->logIdToSend_) { // More to send VLOG(2) << self->idStr_ << "There are more logs to send"; newReq = self->prepareAppendLogRequest(); } else { VLOG(2) << self->idStr_ << "Fulfill the promise, size = " << self->promise_.size(); // Fulfill the promise self->promise_.setValue(resp); if (self->noRequest()) { VLOG(2) << self->idStr_ << "No request any more!"; self->requestOnGoing_ = false; } else { auto& tup = self->pendingReq_; self->logTermToSend_ = std::get<0>(tup); self->logIdToSend_ = std::get<1>(tup); self->committedLogId_ = std::get<2>(tup); VLOG(2) << self->idStr_ << "Sending the pending request in the queue" << ", from " << self->lastLogIdSent_ + 1 << " to " << self->logIdToSend_; newReq = self->prepareAppendLogRequest(); self->promise_ = std::move(self->cachingPromise_); self->cachingPromise_ = folly::SharedPromise<cpp2::AppendLogResponse>(); self->pendingReq_ = std::make_tuple(0, 0, 0); } } } } if (newReq) { self->appendLogsInternal(eb, newReq); } else { self->noMoreRequestCV_.notify_all(); } return; } case cpp2::ErrorCode::E_LOG_GAP: { VLOG(2) << self->idStr_ << "The host's log is behind, need to catch up"; std::shared_ptr<cpp2::AppendLogRequest> newReq; { std::lock_guard<std::mutex> g(self->lock_); auto res = self->checkStatus(); if (res != cpp2::ErrorCode::SUCCEEDED) { VLOG(2) << self->idStr_ << "The host is not in a proper status," " skip catching up the gap"; cpp2::AppendLogResponse r; r.set_error_code(res); self->setResponse(r); } else { self->lastLogIdSent_ = resp.get_last_log_id(); self->lastLogTermSent_ = resp.get_last_log_term(); newReq = self->prepareAppendLogRequest(); } } if (newReq) { self->appendLogsInternal(eb, newReq); } else { self->noMoreRequestCV_.notify_all(); } return; } case cpp2::ErrorCode::E_WAITING_SNAPSHOT: { VLOG(2) << self->idStr_ << "The host is waiting for the snapshot, so we need to send log from " << " current committedLogId " << self->committedLogId_; std::shared_ptr<cpp2::AppendLogRequest> newReq; { std::lock_guard<std::mutex> g(self->lock_); auto res = self->checkStatus(); if (res != cpp2::ErrorCode::SUCCEEDED) { VLOG(2) << self->idStr_ << "The host is not in a proper status," " skip waiting the snapshot"; cpp2::AppendLogResponse r; r.set_error_code(res); self->setResponse(r); } else { self->lastLogIdSent_ = self->committedLogId_; self->lastLogTermSent_ = self->logTermToSend_; newReq = self->prepareAppendLogRequest(); } } if (newReq) { self->appendLogsInternal(eb, newReq); } else { self->noMoreRequestCV_.notify_all(); } return; } case cpp2::ErrorCode::E_LOG_STALE: { VLOG(2) << self->idStr_ << "Log stale, reset lastLogIdSent " << self->lastLogIdSent_ << " to the followers lastLodId " << resp.get_last_log_id(); { std::lock_guard<std::mutex> g(self->lock_); auto res = self->checkStatus(); if (res != cpp2::ErrorCode::SUCCEEDED) { VLOG(2) << self->idStr_ << "The host is not in a proper status," " skip waiting the snapshot"; cpp2::AppendLogResponse r; r.set_error_code(res); self->setResponse(r); } else { self->lastLogIdSent_ = resp.get_last_log_id(); self->lastLogTermSent_ = resp.get_last_log_term(); // For log stale, we think the request has been succeeded cpp2::AppendLogResponse r; r.set_error_code(cpp2::ErrorCode::SUCCEEDED); self->setResponse(r); } } self->noMoreRequestCV_.notify_all(); return; } default: { PLOG_EVERY_N(ERROR, 100) << self->idStr_ << "Failed to append logs to the host (Err: " << static_cast<int32_t>(resp.get_error_code()) << ")"; { std::lock_guard<std::mutex> g(self->lock_); self->setResponse(resp); } self->noMoreRequestCV_.notify_all(); return; } } }); } std::shared_ptr<cpp2::AppendLogRequest> Host::prepareAppendLogRequest() { CHECK(!lock_.try_lock()); auto req = std::make_shared<cpp2::AppendLogRequest>(); req->set_space(part_->spaceId()); req->set_part(part_->partitionId()); req->set_current_term(logTermToSend_); req->set_last_log_id(logIdToSend_); req->set_leader_ip(part_->address().first); req->set_leader_port(part_->address().second); req->set_committed_log_id(committedLogId_); req->set_last_log_term_sent(lastLogTermSent_); req->set_last_log_id_sent(lastLogIdSent_); VLOG(2) << idStr_ << "Prepare AppendLogs request from Log " << lastLogIdSent_ + 1 << " to " << logIdToSend_; auto it = part_->wal()->iterator(lastLogIdSent_ + 1, logIdToSend_); if (it->valid()) { VLOG(2) << idStr_ << "Prepare the list of log entries to send"; auto term = it->logTerm(); req->set_log_term(term); std::vector<cpp2::LogEntry> logs; for (size_t cnt = 0; it->valid() && it->logTerm() == term && cnt < FLAGS_max_appendlog_batch_size; ++(*it), ++cnt) { cpp2::LogEntry le; le.set_cluster(it->logSource()); le.set_log_str(it->logMsg().toString()); logs.emplace_back(std::move(le)); } req->set_log_str_list(std::move(logs)); req->set_sending_snapshot(false); } else { req->set_sending_snapshot(true); if (!sendingSnapshot_) { LOG(INFO) << idStr_ << "Can't find log " << lastLogIdSent_ + 1 << " in wal, send the snapshot"; sendingSnapshot_ = true; part_->snapshot_->sendSnapshot(part_, addr_).then([this] (Status&& status) { if (status.ok()) { LOG(INFO) << idStr_ << "Send snapshot succeeded!"; } else { LOG(INFO) << idStr_ << "Send snapshot failed!"; // TODO(heng): we should tell the follower i am failed. } sendingSnapshot_ = false; }); } else { LOG(INFO) << idStr_ << "The snapshot req is in queue, please wait for a moment"; } } return req; } folly::Future<cpp2::AppendLogResponse> Host::sendAppendLogRequest( folly::EventBase* eb, std::shared_ptr<cpp2::AppendLogRequest> req) { VLOG(2) << idStr_ << "Entering Host::sendAppendLogRequest()"; { std::lock_guard<std::mutex> g(lock_); auto res = checkStatus(); if (res != cpp2::ErrorCode::SUCCEEDED) { LOG(WARNING) << idStr_ << "The Host is not in a proper status, do not send"; cpp2::AppendLogResponse resp; resp.set_error_code(res); return resp; } } VLOG(1) << idStr_ << "Sending request space " << req->get_space() << ", part " << req->get_part() << ", current term " << req->get_current_term() << ", last_log_id " << req->get_last_log_id() << ", committed_id " << req->get_committed_log_id() << ", last_log_term_sent" << req->get_last_log_term_sent() << ", last_log_id_sent " << req->get_last_log_id_sent(); // Get client connection auto client = tcManager().client(addr_, eb, false, FLAGS_raft_rpc_timeout_ms); return client->future_appendLog(*req); } bool Host::noRequest() const { CHECK(!lock_.try_lock()); static auto emptyTup = std::make_tuple(0, 0, 0); return pendingReq_ == emptyTup; } } // namespace raftex } // namespace nebula
1
23,395
when send log failed, why update the last sent log id?
vesoft-inc-nebula
cpp
@@ -127,12 +127,12 @@ public final class Tuple0 implements Tuple, Comparable<Tuple0>, Serializable { } @Override - public <T> Tuple1<T> prepend(T value) { + public <T> Tuple1<T> append(T value) { return new Tuple1<>(value); } @Override - public <T> Tuple1<T> append(T value) { + public <T> Tuple1<T> prepend(T value) { return new Tuple1<>(value); }
1
/* / \____ _ _ ____ ______ / \ ____ __ _______ * / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG * _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io * /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0 */ package javaslang; /*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-*\ G E N E R A T O R C R A F T E D \*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-*/ import java.io.Serializable; import java.util.Comparator; import java.util.Objects; import java.util.function.Supplier; import javaslang.collection.Iterator; import javaslang.collection.List; import javaslang.collection.Seq; /** * A tuple of no elements which can be seen as cartesian product of no components. * * @author Daniel Dietrich * @since 1.1.0 */ public final class Tuple0 implements Tuple, Comparable<Tuple0>, Serializable { private static final long serialVersionUID = 1L; /** * The singleton instance of Tuple0. */ private static final Tuple0 INSTANCE = new Tuple0 (); /** * The singleton Tuple0 comparator. */ private static final Comparator<Tuple0> COMPARATOR = (Comparator<Tuple0> & Serializable) (t1, t2) -> 0; // hidden constructor, internally called private Tuple0 () { } /** * Returns the singleton instance of Tuple0. * * @return The singleton instance of Tuple0. */ public static Tuple0 instance() { return INSTANCE; } public static Comparator<Tuple0> comparator() { return COMPARATOR; } @Override public int arity() { return 0; } @Override public int compareTo(Tuple0 that) { return 0; } /** * Transforms this tuple to an object of type U. * * @param f Transformation which creates a new object of type U based on this tuple's contents. * @param <U> type of the transformation result * @return An object of type U * @throws NullPointerException if {@code f} is null */ public <U> U apply(Supplier<? extends U> f) { Objects.requireNonNull(f, "f is null"); return f.get(); } /** * Transforms this tuple to an object of type U. * * @deprecated Use {@link #apply(Supplier)} instead, will be removed in 3.0.0 * @param f Transformation which creates a new object of type U based on this tuple's contents. * @param <U> type of the transformation result * @return An object of type U * @throws NullPointerException if {@code f} is null */ @Deprecated(/* Use apply instead, will be removed in 3.0.0 */) public <U> U transform(Supplier<? extends U> f) { Objects.requireNonNull(f, "f is null"); return f.get(); } @Override public Seq<?> toSeq() { return List.empty(); } // -- Object @Override public boolean equals(Object o) { return o == this; } @Override public int hashCode() { return 1; } @Override public String toString() { return "()"; } // -- Serializable implementation /** * Instance control for object serialization. * * @return The singleton instance of Tuple0. * @see java.io.Serializable */ private Object readResolve() { return INSTANCE; } @Override public <T> Tuple1<T> prepend(T value) { return new Tuple1<>(value); } @Override public <T> Tuple1<T> append(T value) { return new Tuple1<>(value); } }
1
10,101
what was the problem with the previous order? `prepend` comes conceptually before `append`, i.e. `insert before` should be before `insert after`, I think
vavr-io-vavr
java
@@ -291,6 +291,8 @@ type BackupInfo struct { // Timestamp is the timestamp at which the source volume // was backed up to cloud Timestamp time.Time + // Metadata associated with the backup + Metadata map[string]string // Status indicates if this backup was successful Status string }
1
package api import ( "fmt" "math" "strconv" "strings" "time" "github.com/mohae/deepcopy" ) // Strings for VolumeSpec const ( Name = "name" SpecNodes = "nodes" SpecParent = "parent" SpecEphemeral = "ephemeral" SpecShared = "shared" SpecJournal = "journal" SpecNfs = "nfs" SpecCascaded = "cascaded" SpecSticky = "sticky" SpecSecure = "secure" SpecCompressed = "compressed" SpecSize = "size" SpecScale = "scale" SpecFilesystem = "fs" SpecBlockSize = "block_size" SpecHaLevel = "repl" SpecPriority = "io_priority" SpecSnapshotInterval = "snap_interval" SpecSnapshotSchedule = "snap_schedule" SpecAggregationLevel = "aggregation_level" SpecDedupe = "dedupe" SpecPassphrase = "secret_key" SpecAutoAggregationValue = "auto" SpecGroup = "group" SpecGroupEnforce = "fg" SpecZones = "zones" SpecRacks = "racks" SpecRack = "rack" SpecRegions = "regions" SpecLabels = "labels" SpecPriorityAlias = "priority_io" SpecIoProfile = "io_profile" ) // OptionKey specifies a set of recognized query params. const ( // OptName query parameter used to lookup volume by name. OptName = "Name" // OptVolumeID query parameter used to lookup volume by ID. OptVolumeID = "VolumeID" // OptSnapID query parameter used to lookup snapshot by ID. OptSnapID = "SnapID" // OptLabel query parameter used to lookup volume by set of labels. OptLabel = "Label" // OptConfigLabel query parameter used to lookup volume by set of labels. OptConfigLabel = "ConfigLabel" // OptCumulative query parameter used to request cumulative stats. OptCumulative = "Cumulative" // OptTimeout query parameter used to indicate timeout seconds OptTimeoutSec = "TimeoutSec" // OptQuiesceID query parameter use for quiesce OptQuiesceID = "QuiesceID" // OptCredUUID is the UUID of the credential OptCredUUID = "CredUUID" // OptCredType indicates type of credential OptCredType = "CredType" // OptCredEncrKey is the key used to encrypt data OptCredEncrKey = "CredEncrypt" // OptCredRegion indicates the region for s3 OptCredRegion = "CredRegion" // OptCredDisableSSL indicated if SSL should be disabled OptCredDisableSSL = "CredDisableSSL" // OptCredEndpoint indicate the cloud endpoint OptCredEndpoint = "CredEndpoint" // OptCredAccKey for s3 OptCredAccessKey = "CredAccessKey" // OptCredSecretKey for s3 OptCredSecretKey = "CredSecretKey" // OptCredGoogleProjectID projectID for google cloud OptCredGoogleProjectID = "CredProjectID" // OptCredGoogleJsonKey for google cloud OptCredGoogleJsonKey = "CredJsonKey" // OptCredAzureAccountName is the account name for // azure as the cloud provider OptCredAzureAccountName = "CredAccountName" // OptOptCredAzureAccountKey is the accountkey for // azure as the cloud provider OptCredAzureAccountKey = "CredAccountKey" // OptCloudBackupID is the backID in the cloud OptCloudBackupID = "CloudBackID" // OptSrcVolID is the source volume ID of the backup OptSrcVolID = "SrcVolID" // OptBkupOpState is the desired operational state // (stop/pause/resume) of backup/restore OptBkupOpState = "OpState" // OptBackupSchedUUID is the UUID of the backup-schedule OptBackupSchedUUID = "BkupSchedUUID" ) // Api clientserver Constants const ( OsdVolumePath = "osd-volumes" OsdSnapshotPath = "osd-snapshot" OsdCredsPath = "osd-creds" OsdBackupPath = "osd-backup" TimeLayout = "Jan 2 15:04:05 UTC 2006" ) const ( // AutoAggregation value indicates driver to select aggregation level. AutoAggregation = math.MaxUint32 ) // Node describes the state of a node. // It includes the current physical state (CPU, memory, storage, network usage) as // well as the containers running on the system. // // swagger:model type Node struct { // Id of the node. Id string // Cpu usage of the node. Cpu float64 // percentage. // Total Memory of the node MemTotal uint64 // Used Memory of the node MemUsed uint64 // Free Memory of the node MemFree uint64 // Average load (percentage) Avgload int // Node Status see (Status object) Status Status // GenNumber of the node GenNumber uint64 // List of disks on this node. Disks map[string]StorageResource // List of storage pools this node supports Pools []StoragePool // Management IP MgmtIp string // Data IP DataIp string // Timestamp Timestamp time.Time // Start time of this node StartTime time.Time // Hostname of this node Hostname string // Node data for this node (EX: Public IP, Provider, City..) NodeData map[string]interface{} // User defined labels for node. Key Value pairs NodeLabels map[string]string } // FluentDConfig describes ip and port of a fluentdhost. // DEPRECATED // // swagger:model type FluentDConfig struct { IP string `json:"ip"` Port string `json:"port"` } // TunnelConfig describes key, cert and endpoint of a reverse proxy tunnel // DEPRECATED // // swagger:model type TunnelConfig struct { Key string `json:"key"` Cert string `json:"cert"` Endpoint string `json:"tunnel_endpoint"` } // Cluster represents the state of the cluster. // // swagger:model type Cluster struct { Status Status // Id of the cluster. // // required: true Id string // Id of the node on which this cluster object is initialized NodeId string // array of all the nodes in the cluster. Nodes []Node // Logging url for the cluster. LoggingURL string // Management url for the cluster ManagementURL string // FluentD Host for the cluster FluentDConfig FluentDConfig // TunnelConfig for the cluster [key, cert, endpoint] TunnelConfig TunnelConfig } // CredCreateRequest is the input for CredCreate command type CredCreateRequest struct { // InputParams is map describing cloud provide InputParams map[string]string } // CredCreateResponse is returned for CredCreate command type CredCreateResponse struct { // UUID of the credential that was just created UUID string // CredErr indicates reasonfor failed CredCreate CredErr string } // StatPoint represents the basic structure of a single Stat reported // TODO: This is the first step to introduce stats in openstorage. // Follow up task is to introduce an API for logging stats type StatPoint struct { // Name of the Stat Name string // Tags for the Stat Tags map[string]string // Fields and values of the stat Fields map[string]interface{} // Timestamp in Unix format Timestamp int64 } type BackupRequest struct { // VolumeID of the volume for which cloudbackup is requested VolumeID string // CredentialUUID is cloud credential to be used for backup CredentialUUID string // Full indicates if full backup is desired eventhough incremental is possible Full bool } type BackupRestoreRequest struct { // CloudBackupID is the backup ID being restored CloudBackupID string // RestoreVolumeName is optional volume Name of the new volume to be created // in the cluster for restoring the cloudbackup RestoreVolumeName string // CredentialUUID is the credential to be used for restore operation CredentialUUID string // NodeID is the optional NodeID for provisionging restore volume(ResoreVolumeID should not be specified) NodeID string } type BackupRestoreResponse struct { // RestoreVolumeID is the volumeID to which the backup is being restored RestoreVolumeID string // RestoreErr indicates the reason for failure of restore operation RestoreErr string } type BackupGenericRequest struct { // SrcVolumeID is optional Source VolumeID to list backups for SrcVolumeID string // ClusterID is the optional clusterID to list backups for ClusterID string // All if set to true, backups for all clusters in the cloud are returned All bool // CredentialUUID is the credential for cloud CredentialUUID string } type BackupEnumerateRequest struct { BackupGenericRequest } type BackupDeleteRequest struct { BackupGenericRequest } type BackupInfo struct { // SrcVolumeID is Source volumeID of the backup SrcVolumeID string // SrcvolumeName is name of the sourceVolume of the backup SrcVolumeName string // BackupID is cloud backup ID for the above source volume BackupID string // Timestamp is the timestamp at which the source volume // was backed up to cloud Timestamp time.Time // Status indicates if this backup was successful Status string } type BackupEnumerateResponse struct { // Backups is list of backups in cloud for given volume/cluster/s Backups []BackupInfo // EnumerateErr indicates any error encountered while enumerating backups EnumerateErr string } type BackupStsRequest struct { // SrcVolumeID optional volumeID to list status of backup/restore SrcVolumeID string // Local indicates if only those backups/restores that are // active on current node must be returned Local bool } type BackupStatus struct { // OpType indicates if this is a backup or restore OpType string // State indicates if the op is currently active/done/failed Status string // BytesDone indicates total Bytes uploaded/downloaded BytesDone uint64 // StartTime indicates Op's start time StartTime time.Time // CompletedTime indicates Op's completed time CompletedTime time.Time //BackupID is the Backup ID for the Op BackupID string // NodeID is the ID of the node where this Op is active NodeID string } type BackupStsResponse struct { // statuses is list of currently active/failed/done backup/restores Statuses map[string]BackupStatus // StsErr indicates any error in obtaining the status StsErr string } type BackupCatalogueRequest struct { // CloudBackupID is Backup ID in the cloud CloudBackupID string // CredentialUUID is the credential for cloud CredentialUUID string } type BackupCatalogueResponse struct { // Contents is listing of backup contents Contents []string // CatalogueErr indicates any error in obtaining cataolgue CatalogueErr string } type BackupHistoryRequest struct { //SrcVolumeID is volumeID for which history of backup/restore // is being requested SrcVolumeID string } type BackupHistoryItem struct { // SrcVolumeID is volume ID which was backedup SrcVolumeID string // TimeStamp is the time at which either backup completed/failed Timestamp time.Time // Status indicates whether backup was completed/failed Status string } type BackupHistoryResponse struct { //HistoryList is list of past backup/restores in the cluster HistoryList []BackupHistoryItem //HistoryErr indicates any error in obtaining history HistoryErr string } type BackupStateChangeRequest struct { // SrcVolumeID is volume ID on which backup/restore // state change is being requested SrcVolumeID string // RequestedState is desired state of the op // can be pause/resume/stop RequestedState string } type BackupScheduleInfo struct { // SrcVolumeID is the i schedule's source volume SrcVolumeID string // CredentialUUID is the cloud credential used with this schedule CredentialUUID string // BackupSchedule is the frequence of backup BackupSchedule string // MaxBackups are the maximum number of backups retained // in cloud.Older backups are deleted MaxBackups uint } type BackupSchedDeleteRequest struct { // SchedUUID is UUID of the schedule to be deleted SchedUUID string } type BackupSchedResponse struct { // SchedUUID is the UUID of the newly created schedule SchedUUID string //SchedCreateErr indicates any error while creating backupschedule SchedCreateErr string } type BackupSchedEnumerateResponse struct { // BackupSchedule is map of schedule uuid to scheduleInfo BackupSchedules map[string]BackupScheduleInfo // SchedEnumerateErr is error encountered while enumerating schedules SchedEnumerateErr string } // DriverTypeSimpleValueOf returns the string format of DriverType func DriverTypeSimpleValueOf(s string) (DriverType, error) { obj, err := simpleValueOf("driver_type", DriverType_value, s) return DriverType(obj), err } // SimpleString returns the string format of DriverType func (x DriverType) SimpleString() string { return simpleString("driver_type", DriverType_name, int32(x)) } // FSTypeSimpleValueOf returns the string format of FSType func FSTypeSimpleValueOf(s string) (FSType, error) { obj, err := simpleValueOf("fs_type", FSType_value, s) return FSType(obj), err } // SimpleString returns the string format of DriverType func (x FSType) SimpleString() string { return simpleString("fs_type", FSType_name, int32(x)) } // CosTypeSimpleValueOf returns the string format of CosType func CosTypeSimpleValueOf(s string) (CosType, error) { obj, exists := CosType_value[strings.ToUpper(s)] if !exists { return -1, fmt.Errorf("Invalid cos value: %s", s) } return CosType(obj), nil } // SimpleString returns the string format of CosType func (x CosType) SimpleString() string { return simpleString("cos_type", CosType_name, int32(x)) } // GraphDriverChangeTypeSimpleValueOf returns the string format of GraphDriverChangeType func GraphDriverChangeTypeSimpleValueOf(s string) (GraphDriverChangeType, error) { obj, err := simpleValueOf("graph_driver_change_type", GraphDriverChangeType_value, s) return GraphDriverChangeType(obj), err } // SimpleString returns the string format of GraphDriverChangeType func (x GraphDriverChangeType) SimpleString() string { return simpleString("graph_driver_change_type", GraphDriverChangeType_name, int32(x)) } // VolumeActionParamSimpleValueOf returns the string format of VolumeAction func VolumeActionParamSimpleValueOf(s string) (VolumeActionParam, error) { obj, err := simpleValueOf("volume_action_param", VolumeActionParam_value, s) return VolumeActionParam(obj), err } // SimpleString returns the string format of VolumeAction func (x VolumeActionParam) SimpleString() string { return simpleString("volume_action_param", VolumeActionParam_name, int32(x)) } // VolumeStateSimpleValueOf returns the string format of VolumeState func VolumeStateSimpleValueOf(s string) (VolumeState, error) { obj, err := simpleValueOf("volume_state", VolumeState_value, s) return VolumeState(obj), err } // SimpleString returns the string format of VolumeState func (x VolumeState) SimpleString() string { return simpleString("volume_state", VolumeState_name, int32(x)) } // VolumeStatusSimpleValueOf returns the string format of VolumeStatus func VolumeStatusSimpleValueOf(s string) (VolumeStatus, error) { obj, err := simpleValueOf("volume_status", VolumeStatus_value, s) return VolumeStatus(obj), err } // SimpleString returns the string format of VolumeStatus func (x VolumeStatus) SimpleString() string { return simpleString("volume_status", VolumeStatus_name, int32(x)) } // IoProfileSimpleValueOf returns the string format of IoProfile func IoProfileSimpleValueOf(s string) (IoProfile, error) { obj, err := simpleValueOf("io_profile", IoProfile_value, s) return IoProfile(obj), err } // SimpleString returns the string format of IoProfile func (x IoProfile) SimpleString() string { return simpleString("io_profile", IoProfile_name, int32(x)) } func simpleValueOf(typeString string, valueMap map[string]int32, s string) (int32, error) { obj, ok := valueMap[strings.ToUpper(fmt.Sprintf("%s_%s", typeString, s))] if !ok { return 0, fmt.Errorf("no openstorage.%s for %s", strings.ToUpper(typeString), s) } return obj, nil } func simpleString(typeString string, nameMap map[int32]string, v int32) string { s, ok := nameMap[v] if !ok { return strconv.Itoa(int(v)) } return strings.TrimPrefix(strings.ToLower(s), fmt.Sprintf("%s_", strings.ToLower(typeString))) } func toSec(ms uint64) uint64 { return ms / 1000 } // WriteThroughput returns the write throughput func (v *Stats) WriteThroughput() uint64 { intv := toSec(v.IntervalMs) if intv == 0 { return 0 } return (v.WriteBytes) / intv } // ReadThroughput returns the read throughput func (v *Stats) ReadThroughput() uint64 { intv := toSec(v.IntervalMs) if intv == 0 { return 0 } return (v.ReadBytes) / intv } // Latency returns latency func (v *Stats) Latency() uint64 { ops := v.Writes + v.Reads if ops == 0 { return 0 } return (uint64)((v.IoMs * 1000) / ops) } // Read latency returns avg. time required for read operation to complete func (v *Stats) ReadLatency() uint64 { if v.Reads == 0 { return 0 } return (uint64)((v.ReadMs * 1000) / v.Reads) } // Write latency returns avg. time required for write operation to complete func (v *Stats) WriteLatency() uint64 { if v.Writes == 0 { return 0 } return (uint64)((v.WriteMs * 1000) / v.Writes) } // Iops returns iops func (v *Stats) Iops() uint64 { intv := toSec(v.IntervalMs) if intv == 0 { return 0 } return (v.Writes + v.Reads) / intv } // Scaled returns true if the volume is scaled. func (v *Volume) Scaled() bool { return v.Spec.Scale > 1 } // Contains returns true if mid is a member of volume's replication set. func (m *Volume) Contains(mid string) bool { rsets := m.GetReplicaSets() for _, rset := range rsets { for _, node := range rset.Nodes { if node == mid { return true } } } return false } // Copy makes a deep copy of VolumeSpec func (s *VolumeSpec) Copy() *VolumeSpec { spec := *s if s.VolumeLabels != nil { spec.VolumeLabels = make(map[string]string) for k, v := range s.VolumeLabels { spec.VolumeLabels[k] = v } } if s.ReplicaSet != nil { spec.ReplicaSet = &ReplicaSet{Nodes: make([]string, len(s.ReplicaSet.Nodes))} copy(spec.ReplicaSet.Nodes, s.ReplicaSet.Nodes) } return &spec } // Copy makes a deep copy of Node func (s *Node) Copy() *Node { localCopy := deepcopy.Copy(*s) nodeCopy := localCopy.(Node) return &nodeCopy } func (v Volume) IsClone() bool { return v.Source != nil && len(v.Source.Parent) != 0 && !v.Readonly } func (v Volume) IsSnapshot() bool { return v.Source != nil && len(v.Source.Parent) != 0 && v.Readonly } func (v Volume) DisplayId() string { if v.Locator != nil { return fmt.Sprintf("%s (%s)", v.Locator.Name, v.Id) } else { return v.Id } }
1
6,613
Why is this necessary? Could you provide some context?
libopenstorage-openstorage
go
@@ -46,7 +46,9 @@ public class HTMLTestResults { private final HTMLSuiteResult suite; private static final String HEADER = "<html>\n" + - "<head><style type='text/css'>\n" + + "<head>\n"+ + "<meta charset=\"UTF-8\">\n"+ + "<style type='text/css'>\n" + "body, table {\n" + " font-family: Verdana, Arial, sans-serif;\n" + " font-size: 12;\n" +
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.server.htmlrunner; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.io.Writer; import java.net.URLDecoder; import java.text.MessageFormat; import java.util.Collection; import java.util.LinkedList; import java.util.List; /** * A data model class for the results of the Selenium HTMLRunner (aka TestRunner, FITRunner) * * @author Darren Cotterill * @author Ajit George */ public class HTMLTestResults { private final String result; private final String totalTime; private final String numTestTotal; private final String numTestPasses; private final String numTestFailures; private final String numCommandPasses; private final String numCommandFailures; private final String numCommandErrors; private final String seleniumVersion; private final String seleniumRevision; private final String log; private final HTMLSuiteResult suite; private static final String HEADER = "<html>\n" + "<head><style type='text/css'>\n" + "body, table {\n" + " font-family: Verdana, Arial, sans-serif;\n" + " font-size: 12;\n" + "}\n" + "\n" + "table {\n" + " border-collapse: collapse;\n" + " border: 1px solid #ccc;\n" + "}\n" + "\n" + "th, td {\n" + " padding-left: 0.3em;\n" + " padding-right: 0.3em;\n" + "}\n" + "\n" + "a {\n" + " text-decoration: none;\n" + "}\n" + "\n" + ".title {\n" + " font-style: italic;\n" + "}\n" + "\n" + ".selected {\n" + " background-color: #ffffcc;\n" + "}\n" + "\n" + ".status_done {\n" + " background-color: #eeffee;\n" + "}\n" + "\n" + ".status_passed {\n" + " background-color: #ccffcc;\n" + "}\n" + "\n" + ".status_failed {\n" + " background-color: #ffcccc;\n" + "}\n" + "\n" + ".breakpoint {\n" + " background-color: #cccccc;\n" + " border: 1px solid black;\n" + "}\n" + "</style><title>Test suite results</title></head>\n" + "<body>\n<h1>Test suite results </h1>"; private static final String SUMMARY_HTML = "\n\n<table>\n" + "<tr>\n<td>result:</td>\n<td>{0}</td>\n</tr>\n" + "<tr>\n<td>totalTime:</td>\n<td>{1}</td>\n</tr>\n" + "<tr>\n<td>numTestTotal:</td>\n<td>{2}</td>\n</tr>\n" + "<tr>\n<td>numTestPasses:</td>\n<td>{3}</td>\n</tr>\n" + "<tr>\n<td>numTestFailures:</td>\n<td>{4}</td>\n</tr>\n" + "<tr>\n<td>numCommandPasses:</td>\n<td>{5}</td>\n</tr>\n" + "<tr>\n<td>numCommandFailures:</td>\n<td>{6}</td>\n</tr>\n" + "<tr>\n<td>numCommandErrors:</td>\n<td>{7}</td>\n</tr>\n" + "<tr>\n<td>Selenium Version:</td>\n<td>{8}</td>\n</tr>\n" + "<tr>\n<td>Selenium Revision:</td>\n<td>{9}</td>\n</tr>\n" + "<tr>\n<td>{10}</td>\n<td>&nbsp;</td>\n</tr>\n</table>"; private static final String SUITE_HTML = "<tr>\n<td><a name=\"testresult{0}\">{1}</a><br/>{2}</td>\n<td>&nbsp;</td>\n</tr>"; private final List<String> testTables; public HTMLTestResults(String postedSeleniumVersion, String postedSeleniumRevision, String postedResult, String postedTotalTime, String postedNumTestTotal, String postedNumTestPasses, String postedNumTestFailures, String postedNumCommandPasses, String postedNumCommandFailures, String postedNumCommandErrors, String postedSuite, List<String> postedTestTables, String postedLog) { result = postedResult; numCommandFailures = postedNumCommandFailures; numCommandErrors = postedNumCommandErrors; suite = new HTMLSuiteResult(postedSuite); totalTime = postedTotalTime; numTestTotal = postedNumTestTotal; numTestPasses = postedNumTestPasses; numTestFailures = postedNumTestFailures; numCommandPasses = postedNumCommandPasses; testTables = postedTestTables; seleniumVersion = postedSeleniumVersion; seleniumRevision = postedSeleniumRevision; log = postedLog; } public String getResult() { return result; } public String getNumCommandErrors() { return numCommandErrors; } public String getNumCommandFailures() { return numCommandFailures; } public String getNumCommandPasses() { return numCommandPasses; } public String getNumTestFailures() { return numTestFailures; } public String getNumTestPasses() { return numTestPasses; } public Collection getTestTables() { return testTables; } public String getTotalTime() { return totalTime; } public int getNumTotalTests() { return Integer.parseInt(numTestPasses) + Integer.parseInt(numTestFailures); } public void write(Writer out) throws IOException { out.write(HEADER); out.write(MessageFormat.format(SUMMARY_HTML, result, totalTime, numTestTotal, numTestPasses, numTestFailures, numCommandPasses, numCommandFailures, numCommandErrors, seleniumVersion, seleniumRevision, suite.getUpdatedSuite())); out.write("<table>"); for (int i = 0; i < testTables.size(); i++) { String table = testTables.get(i).replace("\u00a0", "&nbsp;"); out.write(MessageFormat.format(SUITE_HTML, i, suite.getHref(i), table)); } out.write("</table><pre>\n"); if (log != null) { out.write(quoteCharacters(log)); } out.write("</pre></body></html>"); out.flush(); } public static String quoteCharacters(String s) { StringBuffer result = null; for (int i = 0, max = s.length(), delta = 0; i < max; i++) { char c = s.charAt(i); String replacement = null; if (c == '&') { replacement = "&amp;"; } else if (c == '<') { replacement = "&lt;"; } else if (c == '>') { replacement = "&gt;"; } else if (c == '"') { replacement = "&quot;"; } else if (c == '\'') { replacement = "&apos;"; } if (replacement != null) { if (result == null) { result = new StringBuffer(s); } result.replace(i + delta, i + delta + 1, replacement); delta += (replacement.length() - 1); } } if (result == null) { return s; } return result.toString(); } class UrlDecoder { public String decode(String string) { try { return URLDecoder.decode(string, System.getProperty("file.encoding")); } catch (UnsupportedEncodingException e) { return string; } } public List decodeListOfStrings(List list) { List<String> decodedList = new LinkedList<String>(); for (Object o : list) { decodedList.add(decode((String) o)); } return decodedList; } } }
1
13,139
Is there a recommended quote style for attributes? I see single and double here, double further down.
SeleniumHQ-selenium
java
@@ -772,6 +772,17 @@ var _ = Describe("Session", func() { Expect(mconn.written[0][0] & 0x02).ToNot(BeZero()) // Public Reset Expect(sess.runClosed).To(BeClosed()) }) + + It("unblocks WaitUntilClosed when the run loop exists", func() { + returned := make(chan struct{}) + go func() { + sess.WaitUntilClosed() + close(returned) + }() + Consistently(returned).ShouldNot(BeClosed()) + sess.Close(nil) + Eventually(returned).Should(BeClosed()) + }) }) Context("receiving packets", func() {
1
package quic import ( "bytes" "crypto/tls" "errors" "io" "net" "runtime/pprof" "strings" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/lucas-clemente/quic-go/ackhandler" "github.com/lucas-clemente/quic-go/crypto" "github.com/lucas-clemente/quic-go/frames" "github.com/lucas-clemente/quic-go/handshake" "github.com/lucas-clemente/quic-go/internal/mocks" "github.com/lucas-clemente/quic-go/internal/mocks/mocks_fc" "github.com/lucas-clemente/quic-go/protocol" "github.com/lucas-clemente/quic-go/qerr" "github.com/lucas-clemente/quic-go/testdata" ) type mockConnection struct { remoteAddr net.Addr localAddr net.Addr written [][]byte } func (m *mockConnection) Write(p []byte) error { b := make([]byte, len(p)) copy(b, p) m.written = append(m.written, b) return nil } func (m *mockConnection) Read([]byte) (int, net.Addr, error) { panic("not implemented") } func (m *mockConnection) SetCurrentRemoteAddr(addr net.Addr) { m.remoteAddr = addr } func (m *mockConnection) LocalAddr() net.Addr { return m.localAddr } func (m *mockConnection) RemoteAddr() net.Addr { return m.remoteAddr } func (*mockConnection) Close() error { panic("not implemented") } type mockUnpacker struct { unpackErr error } func (m *mockUnpacker) Unpack(publicHeaderBinary []byte, hdr *PublicHeader, data []byte) (*unpackedPacket, error) { if m.unpackErr != nil { return nil, m.unpackErr } return &unpackedPacket{ frames: nil, }, nil } type mockSentPacketHandler struct { retransmissionQueue []*ackhandler.Packet sentPackets []*ackhandler.Packet congestionLimited bool requestedStopWaiting bool } func (h *mockSentPacketHandler) SentPacket(packet *ackhandler.Packet) error { h.sentPackets = append(h.sentPackets, packet) return nil } func (h *mockSentPacketHandler) ReceivedAck(ackFrame *frames.AckFrame, withPacketNumber protocol.PacketNumber, recvTime time.Time) error { return nil } func (h *mockSentPacketHandler) GetLeastUnacked() protocol.PacketNumber { return 1 } func (h *mockSentPacketHandler) GetAlarmTimeout() time.Time { panic("not implemented") } func (h *mockSentPacketHandler) OnAlarm() { panic("not implemented") } func (h *mockSentPacketHandler) SendingAllowed() bool { return !h.congestionLimited } func (h *mockSentPacketHandler) GetStopWaitingFrame(force bool) *frames.StopWaitingFrame { h.requestedStopWaiting = true return &frames.StopWaitingFrame{LeastUnacked: 0x1337} } func (h *mockSentPacketHandler) DequeuePacketForRetransmission() *ackhandler.Packet { if len(h.retransmissionQueue) > 0 { packet := h.retransmissionQueue[0] h.retransmissionQueue = h.retransmissionQueue[1:] return packet } return nil } func newMockSentPacketHandler() ackhandler.SentPacketHandler { return &mockSentPacketHandler{} } var _ ackhandler.SentPacketHandler = &mockSentPacketHandler{} type mockReceivedPacketHandler struct { nextAckFrame *frames.AckFrame } func (m *mockReceivedPacketHandler) GetAckFrame() *frames.AckFrame { f := m.nextAckFrame m.nextAckFrame = nil return f } func (m *mockReceivedPacketHandler) ReceivedPacket(packetNumber protocol.PacketNumber, shouldInstigateAck bool) error { panic("not implemented") } func (m *mockReceivedPacketHandler) ReceivedStopWaiting(*frames.StopWaitingFrame) error { panic("not implemented") } var _ ackhandler.ReceivedPacketHandler = &mockReceivedPacketHandler{} func areSessionsRunning() bool { var b bytes.Buffer pprof.Lookup("goroutine").WriteTo(&b, 1) return strings.Contains(b.String(), "quic-go.(*session).run") } var _ = Describe("Session", func() { var ( sess *session scfg *handshake.ServerConfig mconn *mockConnection mockCpm *mocks.MockConnectionParametersManager cryptoSetup *mockCryptoSetup handshakeChan <-chan handshakeEvent aeadChanged chan<- protocol.EncryptionLevel ) BeforeEach(func() { Eventually(areSessionsRunning).Should(BeFalse()) cryptoSetup = &mockCryptoSetup{} newCryptoSetup = func( _ protocol.ConnectionID, _ net.Addr, _ protocol.VersionNumber, _ *handshake.ServerConfig, _ io.ReadWriter, _ handshake.ConnectionParametersManager, _ []protocol.VersionNumber, _ func(net.Addr, *handshake.STK) bool, aeadChangedP chan<- protocol.EncryptionLevel, ) (handshake.CryptoSetup, error) { aeadChanged = aeadChangedP return cryptoSetup, nil } mconn = &mockConnection{ remoteAddr: &net.UDPAddr{}, } certChain := crypto.NewCertChain(testdata.GetTLSConfig()) kex, err := crypto.NewCurve25519KEX() Expect(err).NotTo(HaveOccurred()) scfg, err = handshake.NewServerConfig(kex, certChain) Expect(err).NotTo(HaveOccurred()) var pSess Session pSess, handshakeChan, err = newSession( mconn, protocol.Version35, 0, scfg, populateServerConfig(&Config{}), ) Expect(err).NotTo(HaveOccurred()) sess = pSess.(*session) Expect(sess.streamsMap.openStreams).To(HaveLen(1)) // Crypto stream mockCpm = mocks.NewMockConnectionParametersManager(mockCtrl) mockCpm.EXPECT().GetIdleConnectionStateLifetime().Return(time.Minute).AnyTimes() sess.connectionParameters = mockCpm }) AfterEach(func() { newCryptoSetup = handshake.NewCryptoSetup Eventually(areSessionsRunning).Should(BeFalse()) }) Context("source address validation", func() { var ( stkVerify func(net.Addr, *handshake.STK) bool paramClientAddr net.Addr paramSTK *STK ) remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 13, 37), Port: 1000} BeforeEach(func() { newCryptoSetup = func( _ protocol.ConnectionID, _ net.Addr, _ protocol.VersionNumber, _ *handshake.ServerConfig, _ io.ReadWriter, _ handshake.ConnectionParametersManager, _ []protocol.VersionNumber, stkFunc func(net.Addr, *handshake.STK) bool, _ chan<- protocol.EncryptionLevel, ) (handshake.CryptoSetup, error) { stkVerify = stkFunc return cryptoSetup, nil } conf := populateServerConfig(&Config{}) conf.AcceptSTK = func(clientAddr net.Addr, stk *STK) bool { paramClientAddr = clientAddr paramSTK = stk return false } pSess, _, err := newSession( mconn, protocol.Version35, 0, scfg, conf, ) Expect(err).NotTo(HaveOccurred()) sess = pSess.(*session) }) It("calls the callback with the right parameters when the client didn't send an STK", func() { stkVerify(remoteAddr, nil) Expect(paramClientAddr).To(Equal(remoteAddr)) Expect(paramSTK).To(BeNil()) }) It("calls the callback with the STK when the client sent an STK", func() { stkAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1337} sentTime := time.Now().Add(-time.Hour) stkVerify(remoteAddr, &handshake.STK{SentTime: sentTime, RemoteAddr: stkAddr.String()}) Expect(paramClientAddr).To(Equal(remoteAddr)) Expect(paramSTK).ToNot(BeNil()) Expect(paramSTK.remoteAddr).To(Equal(stkAddr.String())) Expect(paramSTK.sentTime).To(Equal(sentTime)) }) }) Context("when handling stream frames", func() { It("makes new streams", func() { sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{0xde, 0xca, 0xfb, 0xad}, }) p := make([]byte, 4) str, err := sess.streamsMap.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) Expect(str).ToNot(BeNil()) _, err = str.Read(p) Expect(err).ToNot(HaveOccurred()) Expect(p).To(Equal([]byte{0xde, 0xca, 0xfb, 0xad})) }) It("does not reject existing streams with even StreamIDs", func() { _, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) err = sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{0xde, 0xca, 0xfb, 0xad}, }) Expect(err).ToNot(HaveOccurred()) }) It("handles existing streams", func() { sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{0xde, 0xca}, }) numOpenStreams := len(sess.streamsMap.openStreams) sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Offset: 2, Data: []byte{0xfb, 0xad}, }) Expect(sess.streamsMap.openStreams).To(HaveLen(numOpenStreams)) p := make([]byte, 4) str, _ := sess.streamsMap.GetOrOpenStream(5) Expect(str).ToNot(BeNil()) _, err := str.Read(p) Expect(err).ToNot(HaveOccurred()) Expect(p).To(Equal([]byte{0xde, 0xca, 0xfb, 0xad})) }) It("does not delete streams with Close()", func() { str, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) str.Close() sess.garbageCollectStreams() str, err = sess.streamsMap.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) Expect(str).ToNot(BeNil()) }) It("does not delete streams with FIN bit", func() { sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{0xde, 0xca, 0xfb, 0xad}, FinBit: true, }) numOpenStreams := len(sess.streamsMap.openStreams) str, _ := sess.streamsMap.GetOrOpenStream(5) Expect(str).ToNot(BeNil()) p := make([]byte, 4) _, err := str.Read(p) Expect(err).To(MatchError(io.EOF)) Expect(p).To(Equal([]byte{0xde, 0xca, 0xfb, 0xad})) sess.garbageCollectStreams() Expect(sess.streamsMap.openStreams).To(HaveLen(numOpenStreams)) str, _ = sess.streamsMap.GetOrOpenStream(5) Expect(str).ToNot(BeNil()) }) It("deletes streams with FIN bit & close", func() { sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{0xde, 0xca, 0xfb, 0xad}, FinBit: true, }) numOpenStreams := len(sess.streamsMap.openStreams) str, _ := sess.streamsMap.GetOrOpenStream(5) Expect(str).ToNot(BeNil()) p := make([]byte, 4) _, err := str.Read(p) Expect(err).To(MatchError(io.EOF)) Expect(p).To(Equal([]byte{0xde, 0xca, 0xfb, 0xad})) sess.garbageCollectStreams() Expect(sess.streamsMap.openStreams).To(HaveLen(numOpenStreams)) str, _ = sess.streamsMap.GetOrOpenStream(5) Expect(str).ToNot(BeNil()) // We still need to close the stream locally str.Close() // ... and simulate that we actually the FIN str.sentFin() sess.garbageCollectStreams() Expect(len(sess.streamsMap.openStreams)).To(BeNumerically("<", numOpenStreams)) str, err = sess.streamsMap.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) Expect(str).To(BeNil()) // flow controller should have been notified _, err = sess.flowControlManager.SendWindowSize(5) Expect(err).To(MatchError("Error accessing the flowController map.")) }) It("cancels streams with error", func() { sess.garbageCollectStreams() testErr := errors.New("test") sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{0xde, 0xca, 0xfb, 0xad}, }) str, err := sess.streamsMap.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) Expect(str).ToNot(BeNil()) p := make([]byte, 4) _, err = str.Read(p) Expect(err).ToNot(HaveOccurred()) sess.handleCloseError(closeError{err: testErr, remote: true}) _, err = str.Read(p) Expect(err).To(MatchError(qerr.Error(qerr.InternalError, testErr.Error()))) sess.garbageCollectStreams() str, err = sess.streamsMap.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) Expect(str).To(BeNil()) }) It("cancels empty streams with error", func() { testErr := errors.New("test") sess.GetOrOpenStream(5) str, err := sess.streamsMap.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) Expect(str).ToNot(BeNil()) sess.handleCloseError(closeError{err: testErr, remote: true}) _, err = str.Read([]byte{0}) Expect(err).To(MatchError(qerr.Error(qerr.InternalError, testErr.Error()))) sess.garbageCollectStreams() str, err = sess.streamsMap.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) Expect(str).To(BeNil()) }) It("informs the FlowControlManager about new streams", func() { // since the stream doesn't yet exist, this will throw an error err := sess.flowControlManager.UpdateHighestReceived(5, 1000) Expect(err).To(HaveOccurred()) sess.GetOrOpenStream(5) err = sess.flowControlManager.UpdateHighestReceived(5, 2000) Expect(err).ToNot(HaveOccurred()) }) It("ignores streams that existed previously", func() { sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{}, FinBit: true, }) str, _ := sess.streamsMap.GetOrOpenStream(5) Expect(str).ToNot(BeNil()) _, err := str.Read([]byte{0}) Expect(err).To(MatchError(io.EOF)) str.Close() str.sentFin() sess.garbageCollectStreams() err = sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte{}, }) Expect(err).To(BeNil()) }) }) Context("handling RST_STREAM frames", func() { It("closes the streams for writing", func() { s, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) err = sess.handleRstStreamFrame(&frames.RstStreamFrame{ StreamID: 5, ErrorCode: 42, }) Expect(err).ToNot(HaveOccurred()) n, err := s.Write([]byte{0}) Expect(n).To(BeZero()) Expect(err).To(MatchError("RST_STREAM received with code 42")) }) It("doesn't close the stream for reading", func() { s, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 5, Data: []byte("foobar"), }) err = sess.handleRstStreamFrame(&frames.RstStreamFrame{ StreamID: 5, ErrorCode: 42, ByteOffset: 6, }) Expect(err).ToNot(HaveOccurred()) b := make([]byte, 3) n, err := s.Read(b) Expect(n).To(Equal(3)) Expect(err).ToNot(HaveOccurred()) }) It("queues a RST_STERAM frame with the correct offset", func() { str, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) str.(*stream).writeOffset = 0x1337 err = sess.handleRstStreamFrame(&frames.RstStreamFrame{ StreamID: 5, }) Expect(err).ToNot(HaveOccurred()) Expect(sess.packer.controlFrames).To(HaveLen(1)) Expect(sess.packer.controlFrames[0].(*frames.RstStreamFrame)).To(Equal(&frames.RstStreamFrame{ StreamID: 5, ByteOffset: 0x1337, })) Expect(str.(*stream).finished()).To(BeTrue()) }) It("doesn't queue a RST_STREAM for a stream that it already sent a FIN on", func() { str, err := sess.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) str.(*stream).sentFin() str.Close() err = sess.handleRstStreamFrame(&frames.RstStreamFrame{ StreamID: 5, }) Expect(err).ToNot(HaveOccurred()) Expect(sess.packer.controlFrames).To(BeEmpty()) Expect(str.(*stream).finished()).To(BeTrue()) }) It("passes the byte offset to the flow controller", func() { sess.streamsMap.GetOrOpenStream(5) fcm := mocks_fc.NewMockFlowControlManager(mockCtrl) sess.flowControlManager = fcm fcm.EXPECT().ResetStream(protocol.StreamID(5), protocol.ByteCount(0x1337)) err := sess.handleRstStreamFrame(&frames.RstStreamFrame{ StreamID: 5, ByteOffset: 0x1337, }) Expect(err).ToNot(HaveOccurred()) }) It("returns errors from the flow controller", func() { testErr := errors.New("flow control violation") sess.streamsMap.GetOrOpenStream(5) fcm := mocks_fc.NewMockFlowControlManager(mockCtrl) sess.flowControlManager = fcm fcm.EXPECT().ResetStream(protocol.StreamID(5), protocol.ByteCount(0x1337)).Return(testErr) err := sess.handleRstStreamFrame(&frames.RstStreamFrame{ StreamID: 5, ByteOffset: 0x1337, }) Expect(err).To(MatchError(testErr)) }) It("ignores the error when the stream is not known", func() { err := sess.handleFrames([]frames.Frame{&frames.RstStreamFrame{ StreamID: 5, ErrorCode: 42, }}) Expect(err).NotTo(HaveOccurred()) }) It("queues a RST_STREAM when a stream gets reset locally", func() { testErr := errors.New("testErr") str, err := sess.streamsMap.GetOrOpenStream(5) str.writeOffset = 0x1337 Expect(err).ToNot(HaveOccurred()) str.Reset(testErr) Expect(sess.packer.controlFrames).To(HaveLen(1)) Expect(sess.packer.controlFrames[0]).To(Equal(&frames.RstStreamFrame{ StreamID: 5, ByteOffset: 0x1337, })) Expect(str.finished()).To(BeFalse()) }) It("doesn't queue another RST_STREAM, when it receives an RST_STREAM as a response for the first", func() { testErr := errors.New("testErr") str, err := sess.streamsMap.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) str.Reset(testErr) Expect(sess.packer.controlFrames).To(HaveLen(1)) err = sess.handleRstStreamFrame(&frames.RstStreamFrame{ StreamID: 5, ByteOffset: 0x42, }) Expect(err).ToNot(HaveOccurred()) Expect(sess.packer.controlFrames).To(HaveLen(1)) }) }) Context("handling WINDOW_UPDATE frames", func() { It("updates the Flow Control Window of a stream", func() { _, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) err = sess.handleWindowUpdateFrame(&frames.WindowUpdateFrame{ StreamID: 5, ByteOffset: 100, }) Expect(err).ToNot(HaveOccurred()) Expect(sess.flowControlManager.SendWindowSize(5)).To(Equal(protocol.ByteCount(100))) }) It("updates the Flow Control Window of the connection", func() { err := sess.handleWindowUpdateFrame(&frames.WindowUpdateFrame{ StreamID: 0, ByteOffset: 0x800000, }) Expect(err).ToNot(HaveOccurred()) }) It("opens a new stream when receiving a WINDOW_UPDATE for an unknown stream", func() { err := sess.handleWindowUpdateFrame(&frames.WindowUpdateFrame{ StreamID: 5, ByteOffset: 1337, }) Expect(err).ToNot(HaveOccurred()) str, err := sess.streamsMap.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) Expect(str).ToNot(BeNil()) }) It("errors when receiving a WindowUpdateFrame for a closed stream", func() { sess.handleStreamFrame(&frames.StreamFrame{StreamID: 5}) err := sess.streamsMap.RemoveStream(5) Expect(err).ToNot(HaveOccurred()) sess.garbageCollectStreams() err = sess.handleWindowUpdateFrame(&frames.WindowUpdateFrame{ StreamID: 5, ByteOffset: 1337, }) Expect(err).To(MatchError(errWindowUpdateOnClosedStream)) }) It("ignores errors when receiving a WindowUpdateFrame for a closed stream", func() { sess.handleStreamFrame(&frames.StreamFrame{StreamID: 5}) err := sess.streamsMap.RemoveStream(5) Expect(err).ToNot(HaveOccurred()) sess.garbageCollectStreams() err = sess.handleFrames([]frames.Frame{&frames.WindowUpdateFrame{ StreamID: 5, ByteOffset: 1337, }}) Expect(err).NotTo(HaveOccurred()) }) }) It("handles PING frames", func() { err := sess.handleFrames([]frames.Frame{&frames.PingFrame{}}) Expect(err).NotTo(HaveOccurred()) }) It("handles BLOCKED frames", func() { err := sess.handleFrames([]frames.Frame{&frames.BlockedFrame{}}) Expect(err).NotTo(HaveOccurred()) }) It("errors on GOAWAY frames", func() { err := sess.handleFrames([]frames.Frame{&frames.GoawayFrame{}}) Expect(err).To(MatchError("unimplemented: handling GOAWAY frames")) }) It("handles STOP_WAITING frames", func() { err := sess.handleFrames([]frames.Frame{&frames.StopWaitingFrame{LeastUnacked: 10}}) Expect(err).NotTo(HaveOccurred()) }) It("handles CONNECTION_CLOSE frames", func(done Done) { go sess.run() str, _ := sess.GetOrOpenStream(5) err := sess.handleFrames([]frames.Frame{&frames.ConnectionCloseFrame{ErrorCode: 42, ReasonPhrase: "foobar"}}) Expect(err).NotTo(HaveOccurred()) Eventually(sess.runClosed).Should(BeClosed()) _, err = str.Read([]byte{0}) Expect(err).To(MatchError(qerr.Error(42, "foobar"))) close(done) }) Context("waiting until the handshake completes", func() { It("waits until the handshake is complete", func(done Done) { go sess.run() var waitReturned bool go func() { defer GinkgoRecover() err := sess.WaitUntilHandshakeComplete() Expect(err).ToNot(HaveOccurred()) waitReturned = true }() aeadChanged <- protocol.EncryptionForwardSecure Consistently(func() bool { return waitReturned }).Should(BeFalse()) close(aeadChanged) Eventually(func() bool { return waitReturned }).Should(BeTrue()) Expect(sess.Close(nil)).To(Succeed()) close(done) }) It("errors if the handshake fails", func(done Done) { testErr := errors.New("crypto error") sess.cryptoSetup = &mockCryptoSetup{handleErr: testErr} go sess.run() err := sess.WaitUntilHandshakeComplete() Expect(err).To(MatchError(testErr)) close(done) }, 0.5) It("returns when Close is called", func(done Done) { testErr := errors.New("close error") go sess.run() var waitReturned bool go func() { defer GinkgoRecover() err := sess.WaitUntilHandshakeComplete() Expect(err).To(MatchError(testErr)) waitReturned = true }() sess.Close(testErr) Eventually(func() bool { return waitReturned }).Should(BeTrue()) close(done) }) It("doesn't wait if the handshake is already completed", func(done Done) { go sess.run() close(aeadChanged) err := sess.WaitUntilHandshakeComplete() Expect(err).ToNot(HaveOccurred()) Expect(sess.Close(nil)).To(Succeed()) close(done) }) }) Context("accepting streams", func() { It("waits for new streams", func() { var str Stream go func() { defer GinkgoRecover() var err error str, err = sess.AcceptStream() Expect(err).ToNot(HaveOccurred()) }() Consistently(func() Stream { return str }).Should(BeNil()) sess.handleStreamFrame(&frames.StreamFrame{ StreamID: 3, }) Eventually(func() Stream { return str }).ShouldNot(BeNil()) Expect(str.StreamID()).To(Equal(protocol.StreamID(3))) }) It("stops accepting when the session is closed", func() { testErr := errors.New("testErr") var err error go func() { _, err = sess.AcceptStream() }() go sess.run() Consistently(func() error { return err }).ShouldNot(HaveOccurred()) sess.Close(testErr) Eventually(func() error { return err }).Should(HaveOccurred()) Expect(err).To(MatchError(qerr.ToQuicError(testErr))) }) It("stops accepting when the session is closed after version negotiation", func() { var err error go func() { _, err = sess.AcceptStream() }() go sess.run() Consistently(func() error { return err }).ShouldNot(HaveOccurred()) Expect(sess.runClosed).ToNot(BeClosed()) sess.Close(errCloseSessionForNewVersion) Eventually(func() error { return err }).Should(HaveOccurred()) Expect(err).To(MatchError(qerr.Error(qerr.InternalError, errCloseSessionForNewVersion.Error()))) Eventually(sess.runClosed).Should(BeClosed()) }) }) Context("closing", func() { BeforeEach(func() { Eventually(areSessionsRunning).Should(BeFalse()) go sess.run() Eventually(areSessionsRunning).Should(BeTrue()) }) It("shuts down without error", func() { sess.Close(nil) Eventually(areSessionsRunning).Should(BeFalse()) Expect(mconn.written).To(HaveLen(1)) Expect(mconn.written[0]).To(ContainSubstring(string([]byte{0x02, byte(qerr.PeerGoingAway), 0, 0, 0, 0, 0}))) Expect(sess.runClosed).To(BeClosed()) }) It("only closes once", func() { sess.Close(nil) sess.Close(nil) Eventually(areSessionsRunning).Should(BeFalse()) Expect(mconn.written).To(HaveLen(1)) Expect(sess.runClosed).To(BeClosed()) }) It("closes streams with proper error", func() { testErr := errors.New("test error") s, err := sess.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) sess.Close(testErr) Eventually(areSessionsRunning).Should(BeFalse()) n, err := s.Read([]byte{0}) Expect(n).To(BeZero()) Expect(err.Error()).To(ContainSubstring(testErr.Error())) n, err = s.Write([]byte{0}) Expect(n).To(BeZero()) Expect(err.Error()).To(ContainSubstring(testErr.Error())) Expect(sess.runClosed).To(BeClosed()) }) It("closes the session in order to replace it with another QUIC version", func() { sess.Close(errCloseSessionForNewVersion) Eventually(areSessionsRunning).Should(BeFalse()) Expect(mconn.written).To(BeEmpty()) // no CONNECTION_CLOSE or PUBLIC_RESET sent }) It("sends a Public Reset if the client is initiating the head-of-line blocking experiment", func() { sess.Close(handshake.ErrHOLExperiment) Expect(mconn.written).To(HaveLen(1)) Expect(mconn.written[0][0] & 0x02).ToNot(BeZero()) // Public Reset Expect(sess.runClosed).To(BeClosed()) }) }) Context("receiving packets", func() { var hdr *PublicHeader BeforeEach(func() { sess.unpacker = &mockUnpacker{} hdr = &PublicHeader{PacketNumberLen: protocol.PacketNumberLen6} }) It("sets the {last,largest}RcvdPacketNumber", func() { hdr.PacketNumber = 5 err := sess.handlePacketImpl(&receivedPacket{publicHeader: hdr}) Expect(err).ToNot(HaveOccurred()) Expect(sess.lastRcvdPacketNumber).To(Equal(protocol.PacketNumber(5))) Expect(sess.largestRcvdPacketNumber).To(Equal(protocol.PacketNumber(5))) }) It("closes when handling a packet fails", func(done Done) { testErr := errors.New("unpack error") hdr.PacketNumber = 5 var runErr error go func() { runErr = sess.run() }() sess.unpacker.(*mockUnpacker).unpackErr = testErr sess.handlePacket(&receivedPacket{publicHeader: hdr}) Eventually(func() error { return runErr }).Should(MatchError(testErr)) Expect(sess.runClosed).To(BeClosed()) close(done) }) It("sets the {last,largest}RcvdPacketNumber, for an out-of-order packet", func() { hdr.PacketNumber = 5 err := sess.handlePacketImpl(&receivedPacket{publicHeader: hdr}) Expect(err).ToNot(HaveOccurred()) Expect(sess.lastRcvdPacketNumber).To(Equal(protocol.PacketNumber(5))) Expect(sess.largestRcvdPacketNumber).To(Equal(protocol.PacketNumber(5))) hdr.PacketNumber = 3 err = sess.handlePacketImpl(&receivedPacket{publicHeader: hdr}) Expect(err).ToNot(HaveOccurred()) Expect(sess.lastRcvdPacketNumber).To(Equal(protocol.PacketNumber(3))) Expect(sess.largestRcvdPacketNumber).To(Equal(protocol.PacketNumber(5))) }) It("handles duplicate packets", func() { hdr.PacketNumber = 5 err := sess.handlePacketImpl(&receivedPacket{publicHeader: hdr}) Expect(err).ToNot(HaveOccurred()) err = sess.handlePacketImpl(&receivedPacket{publicHeader: hdr}) Expect(err).ToNot(HaveOccurred()) }) It("handles packets smaller than the highest LeastUnacked of a StopWaiting", func() { err := sess.receivedPacketHandler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: 10}) Expect(err).ToNot(HaveOccurred()) hdr.PacketNumber = 5 err = sess.handlePacketImpl(&receivedPacket{publicHeader: hdr}) Expect(err).ToNot(HaveOccurred()) }) Context("updating the remote address", func() { It("sets the remote address", func() { remoteIP := &net.IPAddr{IP: net.IPv4(192, 168, 0, 100)} Expect(sess.conn.(*mockConnection).remoteAddr).ToNot(Equal(remoteIP)) p := receivedPacket{ remoteAddr: remoteIP, publicHeader: &PublicHeader{PacketNumber: 1337}, } err := sess.handlePacketImpl(&p) Expect(err).ToNot(HaveOccurred()) Expect(sess.conn.(*mockConnection).remoteAddr).To(Equal(remoteIP)) }) It("doesn't change the remote address if authenticating the packet fails", func() { remoteIP := &net.IPAddr{IP: net.IPv4(192, 168, 0, 100)} attackerIP := &net.IPAddr{IP: net.IPv4(192, 168, 0, 102)} sess.conn.(*mockConnection).remoteAddr = remoteIP // use the real packetUnpacker here, to make sure this test fails if the error code for failed decryption changes sess.unpacker = &packetUnpacker{} sess.unpacker.(*packetUnpacker).aead = &mockAEAD{} p := receivedPacket{ remoteAddr: attackerIP, publicHeader: &PublicHeader{PacketNumber: 1337}, } err := sess.handlePacketImpl(&p) quicErr := err.(*qerr.QuicError) Expect(quicErr.ErrorCode).To(Equal(qerr.DecryptionFailure)) Expect(sess.conn.(*mockConnection).remoteAddr).To(Equal(remoteIP)) }) It("sets the remote address, if the packet is authenticated, but unpacking fails for another reason", func() { testErr := errors.New("testErr") remoteIP := &net.IPAddr{IP: net.IPv4(192, 168, 0, 100)} Expect(sess.conn.(*mockConnection).remoteAddr).ToNot(Equal(remoteIP)) p := receivedPacket{ remoteAddr: remoteIP, publicHeader: &PublicHeader{PacketNumber: 1337}, } sess.unpacker.(*mockUnpacker).unpackErr = testErr err := sess.handlePacketImpl(&p) Expect(err).To(MatchError(testErr)) Expect(sess.conn.(*mockConnection).remoteAddr).To(Equal(remoteIP)) }) }) }) Context("sending packets", func() { It("sends ack frames", func() { packetNumber := protocol.PacketNumber(0x035E) sess.receivedPacketHandler.ReceivedPacket(packetNumber, true) err := sess.sendPacket() Expect(err).NotTo(HaveOccurred()) Expect(mconn.written).To(HaveLen(1)) Expect(mconn.written[0]).To(ContainSubstring(string([]byte{0x5E, 0x03}))) }) It("sends two WindowUpdate frames", func() { _, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) sess.flowControlManager.AddBytesRead(5, protocol.ReceiveStreamFlowControlWindow) err = sess.sendPacket() Expect(err).NotTo(HaveOccurred()) err = sess.sendPacket() Expect(err).NotTo(HaveOccurred()) err = sess.sendPacket() Expect(err).NotTo(HaveOccurred()) Expect(mconn.written).To(HaveLen(2)) Expect(mconn.written[0]).To(ContainSubstring(string([]byte{0x04, 0x05, 0, 0, 0}))) Expect(mconn.written[1]).To(ContainSubstring(string([]byte{0x04, 0x05, 0, 0, 0}))) }) It("sends public reset", func() { err := sess.sendPublicReset(1) Expect(err).NotTo(HaveOccurred()) Expect(mconn.written).To(HaveLen(1)) Expect(mconn.written[0]).To(ContainSubstring("PRST")) }) It("informs the SentPacketHandler about sent packets", func() { sess.sentPacketHandler = newMockSentPacketHandler() sess.packer.packetNumberGenerator.next = 0x1337 + 9 sess.packer.cryptoSetup = &mockCryptoSetup{encLevelSeal: protocol.EncryptionForwardSecure} f := &frames.StreamFrame{ StreamID: 5, Data: []byte("foobar"), } sess.streamFramer.AddFrameForRetransmission(f) _, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) err = sess.sendPacket() Expect(err).NotTo(HaveOccurred()) Expect(mconn.written).To(HaveLen(1)) sentPackets := sess.sentPacketHandler.(*mockSentPacketHandler).sentPackets Expect(sentPackets).To(HaveLen(1)) Expect(sentPackets[0].Frames).To(ContainElement(f)) Expect(sentPackets[0].EncryptionLevel).To(Equal(protocol.EncryptionForwardSecure)) Expect(sentPackets[0].Length).To(BeEquivalentTo(len(mconn.written[0]))) }) }) Context("retransmissions", func() { var sph *mockSentPacketHandler BeforeEach(func() { // a StopWaitingFrame is added, so make sure the packet number of the new package is higher than the packet number of the retransmitted packet sess.packer.packetNumberGenerator.next = 0x1337 + 10 sph = newMockSentPacketHandler().(*mockSentPacketHandler) sess.sentPacketHandler = sph sess.packer.cryptoSetup = &mockCryptoSetup{encLevelSeal: protocol.EncryptionForwardSecure} }) Context("for handshake packets", func() { It("retransmits an unencrypted packet", func() { sf := &frames.StreamFrame{StreamID: 1, Data: []byte("foobar")} sph.retransmissionQueue = []*ackhandler.Packet{{ Frames: []frames.Frame{sf}, EncryptionLevel: protocol.EncryptionUnencrypted, }} err := sess.sendPacket() Expect(err).ToNot(HaveOccurred()) Expect(mconn.written).To(HaveLen(1)) sentPackets := sph.sentPackets Expect(sentPackets).To(HaveLen(1)) Expect(sentPackets[0].EncryptionLevel).To(Equal(protocol.EncryptionUnencrypted)) Expect(sentPackets[0].Frames).To(HaveLen(2)) Expect(sentPackets[0].Frames[1]).To(Equal(sf)) swf := sentPackets[0].Frames[0].(*frames.StopWaitingFrame) Expect(swf.LeastUnacked).To(Equal(protocol.PacketNumber(0x1337))) }) It("doesn't retransmit non-retransmittable packets", func() { sph.retransmissionQueue = []*ackhandler.Packet{{ Frames: []frames.Frame{ &frames.AckFrame{}, &frames.StopWaitingFrame{}, }, EncryptionLevel: protocol.EncryptionUnencrypted, }} err := sess.sendPacket() Expect(err).ToNot(HaveOccurred()) Expect(mconn.written).To(BeEmpty()) }) It("retransmit a packet encrypted with the initial encryption", func() { sf := &frames.StreamFrame{StreamID: 1, Data: []byte("foobar")} sph.retransmissionQueue = []*ackhandler.Packet{{ Frames: []frames.Frame{sf}, EncryptionLevel: protocol.EncryptionSecure, }} err := sess.sendPacket() Expect(err).ToNot(HaveOccurred()) Expect(mconn.written).To(HaveLen(1)) sentPackets := sph.sentPackets Expect(sentPackets).To(HaveLen(1)) Expect(sentPackets[0].EncryptionLevel).To(Equal(protocol.EncryptionSecure)) Expect(sentPackets[0].Frames).To(HaveLen(2)) Expect(sentPackets[0].Frames).To(ContainElement(sf)) }) It("doesn't retransmit handshake packets when the handshake is complete", func() { sess.handshakeComplete = true sf := &frames.StreamFrame{StreamID: 1, Data: []byte("foobar")} sph.retransmissionQueue = []*ackhandler.Packet{{ Frames: []frames.Frame{sf}, EncryptionLevel: protocol.EncryptionSecure, }} err := sess.sendPacket() Expect(err).ToNot(HaveOccurred()) Expect(mconn.written).To(BeEmpty()) }) }) Context("for packets after the handshake", func() { It("sends a StreamFrame from a packet queued for retransmission", func() { f := frames.StreamFrame{ StreamID: 0x5, Data: []byte("foobar1234567"), } p := ackhandler.Packet{ PacketNumber: 0x1337, Frames: []frames.Frame{&f}, EncryptionLevel: protocol.EncryptionForwardSecure, } sph.retransmissionQueue = []*ackhandler.Packet{&p} err := sess.sendPacket() Expect(err).NotTo(HaveOccurred()) Expect(mconn.written).To(HaveLen(1)) Expect(sph.requestedStopWaiting).To(BeTrue()) Expect(mconn.written[0]).To(ContainSubstring("foobar1234567")) }) It("sends a StreamFrame from a packet queued for retransmission", func() { f1 := frames.StreamFrame{ StreamID: 0x5, Data: []byte("foobar"), } f2 := frames.StreamFrame{ StreamID: 0x7, Data: []byte("loremipsum"), } p1 := ackhandler.Packet{ PacketNumber: 0x1337, Frames: []frames.Frame{&f1}, EncryptionLevel: protocol.EncryptionForwardSecure, } p2 := ackhandler.Packet{ PacketNumber: 0x1338, Frames: []frames.Frame{&f2}, EncryptionLevel: protocol.EncryptionForwardSecure, } sph.retransmissionQueue = []*ackhandler.Packet{&p1, &p2} err := sess.sendPacket() Expect(err).NotTo(HaveOccurred()) Expect(mconn.written).To(HaveLen(1)) Expect(mconn.written[0]).To(ContainSubstring("foobar")) Expect(mconn.written[0]).To(ContainSubstring("loremipsum")) }) It("always attaches a StopWaiting to a packet that contains a retransmission", func() { f := &frames.StreamFrame{ StreamID: 0x5, Data: bytes.Repeat([]byte{'f'}, int(1.5*float32(protocol.MaxPacketSize))), } sess.streamFramer.AddFrameForRetransmission(f) err := sess.sendPacket() Expect(err).NotTo(HaveOccurred()) Expect(mconn.written).To(HaveLen(2)) sentPackets := sph.sentPackets Expect(sentPackets).To(HaveLen(2)) _, ok := sentPackets[0].Frames[0].(*frames.StopWaitingFrame) Expect(ok).To(BeTrue()) _, ok = sentPackets[1].Frames[0].(*frames.StopWaitingFrame) Expect(ok).To(BeTrue()) }) It("retransmits a WindowUpdates if it hasn't already sent a WindowUpdate with a higher ByteOffset", func() { _, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) fcm := mocks_fc.NewMockFlowControlManager(mockCtrl) sess.flowControlManager = fcm fcm.EXPECT().GetWindowUpdates() fcm.EXPECT().GetReceiveWindow(protocol.StreamID(5)).Return(protocol.ByteCount(0x1000), nil) fcm.EXPECT().GetWindowUpdates() wuf := &frames.WindowUpdateFrame{ StreamID: 5, ByteOffset: 0x1000, } sph.retransmissionQueue = []*ackhandler.Packet{{ Frames: []frames.Frame{wuf}, EncryptionLevel: protocol.EncryptionForwardSecure, }} err = sess.sendPacket() Expect(err).ToNot(HaveOccurred()) Expect(sph.sentPackets).To(HaveLen(1)) Expect(sph.sentPackets[0].Frames).To(ContainElement(wuf)) }) It("doesn't retransmit WindowUpdates if it already sent a WindowUpdate with a higher ByteOffset", func() { _, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) fcm := mocks_fc.NewMockFlowControlManager(mockCtrl) sess.flowControlManager = fcm fcm.EXPECT().GetWindowUpdates() fcm.EXPECT().GetReceiveWindow(protocol.StreamID(5)).Return(protocol.ByteCount(0x2000), nil) sph.retransmissionQueue = []*ackhandler.Packet{{ Frames: []frames.Frame{&frames.WindowUpdateFrame{ StreamID: 5, ByteOffset: 0x1000, }}, EncryptionLevel: protocol.EncryptionForwardSecure, }} err = sess.sendPacket() Expect(err).ToNot(HaveOccurred()) Expect(sph.sentPackets).To(BeEmpty()) }) It("doesn't retransmit WindowUpdates for closed streams", func() { str, err := sess.GetOrOpenStream(5) Expect(err).ToNot(HaveOccurred()) // close the stream str.(*stream).sentFin() str.Close() str.(*stream).RegisterRemoteError(nil) sess.garbageCollectStreams() _, err = sess.flowControlManager.SendWindowSize(5) Expect(err).To(MatchError("Error accessing the flowController map.")) sph.retransmissionQueue = []*ackhandler.Packet{{ Frames: []frames.Frame{&frames.WindowUpdateFrame{ StreamID: 5, ByteOffset: 0x1337, }}, EncryptionLevel: protocol.EncryptionForwardSecure, }} err = sess.sendPacket() Expect(err).ToNot(HaveOccurred()) Expect(sph.sentPackets).To(BeEmpty()) }) }) }) It("retransmits RTO packets", func() { n := protocol.PacketNumber(10) sess.packer.cryptoSetup = &mockCryptoSetup{encLevelSeal: protocol.EncryptionForwardSecure} // We simulate consistently low RTTs, so that the test works faster rtt := time.Millisecond sess.rttStats.UpdateRTT(rtt, 0, time.Now()) Expect(sess.rttStats.SmoothedRTT()).To(Equal(rtt)) // make sure it worked sess.packer.packetNumberGenerator.next = n + 1 // Now, we send a single packet, and expect that it was retransmitted later err := sess.sentPacketHandler.SentPacket(&ackhandler.Packet{ PacketNumber: n, Length: 1, Frames: []frames.Frame{&frames.StreamFrame{ Data: []byte("foobar"), }}, EncryptionLevel: protocol.EncryptionForwardSecure, }) Expect(err).NotTo(HaveOccurred()) go sess.run() defer sess.Close(nil) sess.scheduleSending() Eventually(func() int { return len(mconn.written) }).ShouldNot(BeZero()) Expect(mconn.written[0]).To(ContainSubstring("foobar")) }) Context("scheduling sending", func() { BeforeEach(func() { sess.packer.cryptoSetup = &mockCryptoSetup{encLevelSeal: protocol.EncryptionForwardSecure} }) It("sends after writing to a stream", func(done Done) { Expect(sess.sendingScheduled).NotTo(Receive()) s, err := sess.GetOrOpenStream(3) Expect(err).NotTo(HaveOccurred()) go func() { s.Write([]byte("foobar")) close(done) }() Eventually(sess.sendingScheduled).Should(Receive()) s.(*stream).getDataForWriting(1000) // unblock }) It("sets the timer to the ack timer", func() { rph := &mockReceivedPacketHandler{} rph.nextAckFrame = &frames.AckFrame{LargestAcked: 0x1337} sess.receivedPacketHandler = rph go sess.run() defer sess.Close(nil) sess.ackAlarmChanged(time.Now().Add(10 * time.Millisecond)) time.Sleep(10 * time.Millisecond) Eventually(func() int { return len(mconn.written) }).ShouldNot(BeZero()) Expect(mconn.written[0]).To(ContainSubstring(string([]byte{0x37, 0x13}))) }) Context("bundling of small packets", func() { It("bundles two small frames of different streams into one packet", func() { s1, err := sess.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) s2, err := sess.GetOrOpenStream(7) Expect(err).NotTo(HaveOccurred()) // Put data directly into the streams s1.(*stream).dataForWriting = []byte("foobar1") s2.(*stream).dataForWriting = []byte("foobar2") sess.scheduleSending() go sess.run() defer sess.Close(nil) Eventually(func() [][]byte { return mconn.written }).Should(HaveLen(1)) Expect(mconn.written[0]).To(ContainSubstring("foobar1")) Expect(mconn.written[0]).To(ContainSubstring("foobar2")) }) It("sends out two big frames in two packets", func() { s1, err := sess.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) s2, err := sess.GetOrOpenStream(7) Expect(err).NotTo(HaveOccurred()) go sess.run() defer sess.Close(nil) go func() { defer GinkgoRecover() s1.Write(bytes.Repeat([]byte{'e'}, 1000)) }() _, err = s2.Write(bytes.Repeat([]byte{'e'}, 1000)) Expect(err).ToNot(HaveOccurred()) Eventually(func() [][]byte { return mconn.written }).Should(HaveLen(2)) }) It("sends out two small frames that are written to long after one another into two packets", func() { s, err := sess.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) go sess.run() defer sess.Close(nil) _, err = s.Write([]byte("foobar1")) Expect(err).NotTo(HaveOccurred()) Eventually(func() [][]byte { return mconn.written }).Should(HaveLen(1)) _, err = s.Write([]byte("foobar2")) Expect(err).NotTo(HaveOccurred()) Eventually(func() [][]byte { return mconn.written }).Should(HaveLen(2)) }) It("sends a queued ACK frame only once", func() { packetNumber := protocol.PacketNumber(0x1337) sess.receivedPacketHandler.ReceivedPacket(packetNumber, true) s, err := sess.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) go sess.run() defer sess.Close(nil) _, err = s.Write([]byte("foobar1")) Expect(err).NotTo(HaveOccurred()) Eventually(func() [][]byte { return mconn.written }).Should(HaveLen(1)) _, err = s.Write([]byte("foobar2")) Expect(err).NotTo(HaveOccurred()) Eventually(func() [][]byte { return mconn.written }).Should(HaveLen(2)) Expect(mconn.written[0]).To(ContainSubstring(string([]byte{0x37, 0x13}))) Expect(mconn.written[1]).ToNot(ContainSubstring(string([]byte{0x37, 0x13}))) }) }) }) It("closes when crypto stream errors", func() { testErr := errors.New("crypto setup error") cryptoSetup.handleErr = testErr var runErr error go func() { runErr = sess.run() }() Eventually(func() error { return runErr }).Should(HaveOccurred()) Expect(runErr).To(MatchError(testErr)) }) Context("sending a Public Reset when receiving undecryptable packets during the handshake", func() { // sends protocol.MaxUndecryptablePackets+1 undecrytable packets // this completely fills up the undecryptable packets queue and triggers the public reset timer sendUndecryptablePackets := func() { for i := 0; i < protocol.MaxUndecryptablePackets+1; i++ { hdr := &PublicHeader{ PacketNumber: protocol.PacketNumber(i + 1), } sess.handlePacket(&receivedPacket{publicHeader: hdr, data: []byte("foobar")}) } } BeforeEach(func() { sess.unpacker = &mockUnpacker{unpackErr: qerr.Error(qerr.DecryptionFailure, "")} sess.cryptoSetup = &mockCryptoSetup{} }) It("doesn't immediately send a Public Reset after receiving too many undecryptable packets", func() { go sess.run() sendUndecryptablePackets() sess.scheduleSending() Consistently(func() [][]byte { return mconn.written }).Should(HaveLen(0)) }) It("sets a deadline to send a Public Reset after receiving too many undecryptable packets", func() { go sess.run() sendUndecryptablePackets() Eventually(func() time.Time { return sess.receivedTooManyUndecrytablePacketsTime }).Should(BeTemporally("~", time.Now(), 20*time.Millisecond)) sess.Close(nil) }) It("drops undecryptable packets when the undecrytable packet queue is full", func() { go sess.run() sendUndecryptablePackets() Eventually(func() []*receivedPacket { return sess.undecryptablePackets }).Should(HaveLen(protocol.MaxUndecryptablePackets)) // check that old packets are kept, and the new packets are dropped Expect(sess.undecryptablePackets[0].publicHeader.PacketNumber).To(Equal(protocol.PacketNumber(1))) sess.Close(nil) }) It("sends a Public Reset after a timeout", func() { go sess.run() sendUndecryptablePackets() Eventually(func() time.Time { return sess.receivedTooManyUndecrytablePacketsTime }).Should(BeTemporally("~", time.Now(), 10*time.Millisecond)) // speed up this test by manually setting back the time when too many packets were received sess.receivedTooManyUndecrytablePacketsTime = time.Now().Add(-protocol.PublicResetTimeout) time.Sleep(10 * time.Millisecond) // wait for the run loop to spin up sess.scheduleSending() // wake up the run loop Eventually(func() [][]byte { return mconn.written }).Should(HaveLen(1)) Expect(mconn.written[0]).To(ContainSubstring("PRST")) Eventually(sess.runClosed).Should(BeClosed()) }) It("doesn't send a Public Reset if decrypting them suceeded during the timeout", func() { go sess.run() sess.receivedTooManyUndecrytablePacketsTime = time.Now().Add(-protocol.PublicResetTimeout).Add(-time.Millisecond) sess.scheduleSending() // wake up the run loop // there are no packets in the undecryptable packet queue // in reality, this happens when the trial decryption succeeded during the Public Reset timeout Consistently(func() [][]byte { return mconn.written }).ShouldNot(HaveLen(1)) Expect(sess.runClosed).ToNot(Receive()) sess.Close(nil) }) It("ignores undecryptable packets after the handshake is complete", func() { close(aeadChanged) go sess.run() sendUndecryptablePackets() Consistently(sess.undecryptablePackets).Should(BeEmpty()) Expect(sess.Close(nil)).To(Succeed()) }) It("unqueues undecryptable packets for later decryption", func() { sess.undecryptablePackets = []*receivedPacket{{ publicHeader: &PublicHeader{PacketNumber: protocol.PacketNumber(42)}, }} Expect(sess.receivedPackets).NotTo(Receive()) sess.tryDecryptingQueuedPackets() Expect(sess.undecryptablePackets).To(BeEmpty()) Expect(sess.receivedPackets).To(Receive()) }) }) It("send a handshake event on the handshakeChan when the AEAD changes to secure", func(done Done) { go sess.run() aeadChanged <- protocol.EncryptionSecure Eventually(handshakeChan).Should(Receive(&handshakeEvent{encLevel: protocol.EncryptionSecure})) Expect(sess.Close(nil)).To(Succeed()) close(done) }) It("send a handshake event on the handshakeChan when the AEAD changes to forward-secure", func(done Done) { go sess.run() aeadChanged <- protocol.EncryptionForwardSecure Eventually(handshakeChan).Should(Receive(&handshakeEvent{encLevel: protocol.EncryptionForwardSecure})) Expect(sess.Close(nil)).To(Succeed()) close(done) }) It("closes the handshakeChan when the handshake completes", func(done Done) { go sess.run() close(aeadChanged) Eventually(handshakeChan).Should(BeClosed()) Expect(sess.Close(nil)).To(Succeed()) close(done) }) It("passes errors to the handshakeChan", func(done Done) { testErr := errors.New("handshake error") go sess.run() Expect(sess.Close(nil)).To(Succeed()) Expect(handshakeChan).To(Receive(&handshakeEvent{err: testErr})) close(done) }) It("does not block if an error occurs", func(done Done) { // this test basically tests that the handshakeChan has a capacity of 3 // The session needs to run (and close) properly, even if no one is receiving from the handshakeChan go sess.run() aeadChanged <- protocol.EncryptionSecure aeadChanged <- protocol.EncryptionForwardSecure Expect(sess.Close(nil)).To(Succeed()) close(done) }) Context("timeouts", func() { It("times out due to no network activity", func(done Done) { sess.lastNetworkActivityTime = time.Now().Add(-time.Hour) err := sess.run() // Would normally not return Expect(err.(*qerr.QuicError).ErrorCode).To(Equal(qerr.NetworkIdleTimeout)) Expect(mconn.written[0]).To(ContainSubstring("No recent network activity.")) Expect(sess.runClosed).To(BeClosed()) close(done) }) It("times out due to non-completed crypto handshake", func(done Done) { sess.sessionCreationTime = time.Now().Add(-protocol.DefaultHandshakeTimeout).Add(-time.Second) err := sess.run() // Would normally not return Expect(err.(*qerr.QuicError).ErrorCode).To(Equal(qerr.HandshakeTimeout)) Expect(mconn.written[0]).To(ContainSubstring("Crypto handshake did not complete in time.")) Expect(sess.runClosed).To(BeClosed()) close(done) }) It("does not use ICSL before handshake", func(done Done) { sess.lastNetworkActivityTime = time.Now().Add(-time.Minute) mockCpm = mocks.NewMockConnectionParametersManager(mockCtrl) mockCpm.EXPECT().GetIdleConnectionStateLifetime().Return(9999 * time.Second).AnyTimes() mockCpm.EXPECT().TruncateConnectionID().Return(false).AnyTimes() sess.connectionParameters = mockCpm sess.packer.connectionParameters = mockCpm err := sess.run() // Would normally not return Expect(err.(*qerr.QuicError).ErrorCode).To(Equal(qerr.NetworkIdleTimeout)) Expect(mconn.written[0]).To(ContainSubstring("No recent network activity.")) Expect(sess.runClosed).To(BeClosed()) close(done) }) It("uses ICSL after handshake", func(done Done) { close(aeadChanged) mockCpm = mocks.NewMockConnectionParametersManager(mockCtrl) mockCpm.EXPECT().GetIdleConnectionStateLifetime().Return(0 * time.Second) mockCpm.EXPECT().TruncateConnectionID().Return(false).AnyTimes() sess.connectionParameters = mockCpm sess.packer.connectionParameters = mockCpm mockCpm.EXPECT().GetIdleConnectionStateLifetime().Return(0 * time.Second).AnyTimes() err := sess.run() // Would normally not return Expect(err.(*qerr.QuicError).ErrorCode).To(Equal(qerr.NetworkIdleTimeout)) Expect(mconn.written[0]).To(ContainSubstring("No recent network activity.")) Expect(sess.runClosed).To(BeClosed()) close(done) }) }) It("stores up to MaxSessionUnprocessedPackets packets", func(done Done) { // Nothing here should block for i := protocol.PacketNumber(0); i < protocol.MaxSessionUnprocessedPackets+10; i++ { sess.handlePacket(&receivedPacket{}) } close(done) }, 0.5) Context("getting streams", func() { It("returns a new stream", func() { str, err := sess.GetOrOpenStream(11) Expect(err).ToNot(HaveOccurred()) Expect(str).ToNot(BeNil()) Expect(str.StreamID()).To(Equal(protocol.StreamID(11))) }) It("returns a nil-value (not an interface with value nil) for closed streams", func() { _, err := sess.GetOrOpenStream(9) Expect(err).ToNot(HaveOccurred()) sess.streamsMap.RemoveStream(9) sess.garbageCollectStreams() Expect(sess.streamsMap.GetOrOpenStream(9)).To(BeNil()) str, err := sess.GetOrOpenStream(9) Expect(err).ToNot(HaveOccurred()) Expect(str).To(BeNil()) // make sure that the returned value is a plain nil, not an Stream with value nil _, ok := str.(Stream) Expect(ok).To(BeFalse()) }) // all relevant tests for this are in the streamsMap It("opens streams synchronously", func() { str, err := sess.OpenStreamSync() Expect(err).ToNot(HaveOccurred()) Expect(str).ToNot(BeNil()) }) }) Context("counting streams", func() { It("errors when too many streams are opened", func() { for i := 0; i < 110; i++ { _, err := sess.GetOrOpenStream(protocol.StreamID(i*2 + 1)) Expect(err).NotTo(HaveOccurred()) } _, err := sess.GetOrOpenStream(protocol.StreamID(301)) Expect(err).To(MatchError(qerr.TooManyOpenStreams)) }) It("does not error when many streams are opened and closed", func() { for i := 2; i <= 1000; i++ { s, err := sess.GetOrOpenStream(protocol.StreamID(i*2 + 1)) Expect(err).NotTo(HaveOccurred()) err = s.Close() Expect(err).NotTo(HaveOccurred()) s.(*stream).sentFin() s.(*stream).CloseRemote(0) _, err = s.Read([]byte("a")) Expect(err).To(MatchError(io.EOF)) sess.garbageCollectStreams() } }) }) Context("ignoring errors", func() { It("ignores duplicate acks", func() { sess.sentPacketHandler.SentPacket(&ackhandler.Packet{ PacketNumber: 1, Length: 1, }) err := sess.handleFrames([]frames.Frame{&frames.AckFrame{ LargestAcked: 1, }}) Expect(err).NotTo(HaveOccurred()) err = sess.handleFrames([]frames.Frame{&frames.AckFrame{ LargestAcked: 1, }}) Expect(err).NotTo(HaveOccurred()) }) }) Context("window updates", func() { It("gets stream level window updates", func() { err := sess.flowControlManager.AddBytesRead(1, protocol.ReceiveStreamFlowControlWindow) Expect(err).NotTo(HaveOccurred()) frames := sess.getWindowUpdateFrames() Expect(frames).To(HaveLen(1)) Expect(frames[0].StreamID).To(Equal(protocol.StreamID(1))) Expect(frames[0].ByteOffset).To(Equal(protocol.ReceiveStreamFlowControlWindow * 2)) }) It("gets connection level window updates", func() { _, err := sess.GetOrOpenStream(5) Expect(err).NotTo(HaveOccurred()) err = sess.flowControlManager.AddBytesRead(5, protocol.ReceiveConnectionFlowControlWindow) Expect(err).NotTo(HaveOccurred()) frames := sess.getWindowUpdateFrames() Expect(frames).To(HaveLen(1)) Expect(frames[0].StreamID).To(Equal(protocol.StreamID(0))) Expect(frames[0].ByteOffset).To(Equal(protocol.ReceiveConnectionFlowControlWindow * 2)) }) }) It("returns the local address", func() { addr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1337} mconn.localAddr = addr Expect(sess.LocalAddr()).To(Equal(addr)) }) It("returns the remote address", func() { addr := &net.UDPAddr{IP: net.IPv4(1, 2, 7, 1), Port: 7331} mconn.remoteAddr = addr Expect(sess.RemoteAddr()).To(Equal(addr)) }) }) var _ = Describe("Client Session", func() { var ( sess *session mconn *mockConnection aeadChanged chan<- protocol.EncryptionLevel cryptoSetup *mockCryptoSetup ) BeforeEach(func() { Eventually(areSessionsRunning).Should(BeFalse()) cryptoSetup = &mockCryptoSetup{} newCryptoSetupClient = func( _ string, _ protocol.ConnectionID, _ protocol.VersionNumber, _ io.ReadWriter, _ *tls.Config, _ handshake.ConnectionParametersManager, aeadChangedP chan<- protocol.EncryptionLevel, _ *handshake.TransportParameters, _ []protocol.VersionNumber, ) (handshake.CryptoSetup, error) { aeadChanged = aeadChangedP return cryptoSetup, nil } mconn = &mockConnection{ remoteAddr: &net.UDPAddr{}, } sessP, _, err := newClientSession( mconn, "hostname", protocol.Version35, 0, populateClientConfig(&Config{}), nil, ) sess = sessP.(*session) Expect(err).ToNot(HaveOccurred()) Expect(sess.streamsMap.openStreams).To(HaveLen(1)) // Crypto stream }) AfterEach(func() { newCryptoSetupClient = handshake.NewCryptoSetupClient }) Context("receiving packets", func() { var hdr *PublicHeader BeforeEach(func() { hdr = &PublicHeader{PacketNumberLen: protocol.PacketNumberLen6} sess.unpacker = &mockUnpacker{} }) It("passes the diversification nonce to the cryptoSetup", func() { go sess.run() hdr.PacketNumber = 5 hdr.DiversificationNonce = []byte("foobar") err := sess.handlePacketImpl(&receivedPacket{publicHeader: hdr}) Expect(err).ToNot(HaveOccurred()) Eventually(func() []byte { return cryptoSetup.divNonce }).Should(Equal(hdr.DiversificationNonce)) Expect(sess.Close(nil)).To(Succeed()) }) }) It("does not block if an error occurs", func(done Done) { // this test basically tests that the handshakeChan has a capacity of 3 // The session needs to run (and close) properly, even if no one is receiving from the handshakeChan go sess.run() aeadChanged <- protocol.EncryptionSecure aeadChanged <- protocol.EncryptionForwardSecure Expect(sess.Close(nil)).To(Succeed()) close(done) }) })
1
6,313
Please use an atomic bool.
lucas-clemente-quic-go
go
@@ -0,0 +1,19 @@ +namespace Datadog.Trace.ClrProfiler +{ + internal static class WebHelpers + { + public static void DecorateWebSpan( + this Span span, + string resourceName, + string method, + string host, + string httpUrl) + { + span.Type = SpanTypes.Web; + span.ResourceName = resourceName; + span.SetTag(Tags.HttpMethod, method); + span.SetTag(Tags.HttpRequestHeadersHost, host); + span.SetTag(Tags.HttpUrl, httpUrl); + } + } +}
1
1
14,952
Consider naming this class `SpanExtensions` to follow C# conventions.
DataDog-dd-trace-dotnet
.cs
@@ -81,11 +81,6 @@ namespace Datadog.Trace.Logging { // Don't let this exception bubble up as this logger is for debugging and is non-critical } - finally - { - // Log some information to correspond with the app domain - SharedLogger.Information(FrameworkDescription.Create().ToString()); - } } public static ILogger GetLogger(Type classType)
1
using System; using System.Diagnostics; using System.IO; using System.Runtime.InteropServices; using Datadog.Trace.Configuration; using Datadog.Trace.Vendors.Serilog; using Datadog.Trace.Vendors.Serilog.Core; using Datadog.Trace.Vendors.Serilog.Events; using Datadog.Trace.Vendors.Serilog.Sinks.File; namespace Datadog.Trace.Logging { internal static class DatadogLogging { private const string NixDefaultDirectory = "/var/log/datadog/dotnet"; private static readonly long? MaxLogFileSize = 10 * 1024 * 1024; private static readonly LoggingLevelSwitch LoggingLevelSwitch = new LoggingLevelSwitch(LogEventLevel.Information); private static readonly ILogger SharedLogger = null; static DatadogLogging() { // No-op for if we fail to construct the file logger SharedLogger = new LoggerConfiguration() .WriteTo.Sink<NullSink>() .CreateLogger(); try { if (GlobalSettings.Source.DebugEnabled) { LoggingLevelSwitch.MinimumLevel = LogEventLevel.Verbose; } var maxLogSizeVar = Environment.GetEnvironmentVariable(ConfigurationKeys.MaxLogFileSize); if (long.TryParse(maxLogSizeVar, out var maxLogSize)) { // No verbose or debug logs MaxLogFileSize = maxLogSize; } var logDirectory = GetLogDirectory(); // ReSharper disable once ConditionIsAlwaysTrueOrFalse if (logDirectory == null) { return; } var currentProcess = Process.GetCurrentProcess(); // Ends in a dash because of the date postfix var managedLogPath = Path.Combine(logDirectory, $"dotnet-tracer-{currentProcess.ProcessName}-.log"); var loggerConfiguration = new LoggerConfiguration() .Enrich.FromLogContext() .MinimumLevel.ControlledBy(LoggingLevelSwitch) .WriteTo.File( managedLogPath, outputTemplate: "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj}{NewLine}{Exception}{Properties}{NewLine}", rollingInterval: RollingInterval.Day, rollOnFileSizeLimit: true, fileSizeLimitBytes: MaxLogFileSize); try { var currentAppDomain = AppDomain.CurrentDomain; loggerConfiguration.Enrich.WithProperty("MachineName", currentProcess.MachineName); loggerConfiguration.Enrich.WithProperty("Process", $"[{currentProcess.Id} {currentProcess.ProcessName}]"); loggerConfiguration.Enrich.WithProperty("AppDomain", $"[{currentAppDomain.Id} {currentAppDomain.FriendlyName}]"); loggerConfiguration.Enrich.WithProperty("TracerVersion", TracerConstants.AssemblyVersion); } catch { // At all costs, make sure the logger works when possible. } SharedLogger = loggerConfiguration.CreateLogger(); } catch { // Don't let this exception bubble up as this logger is for debugging and is non-critical } finally { // Log some information to correspond with the app domain SharedLogger.Information(FrameworkDescription.Create().ToString()); } } public static ILogger GetLogger(Type classType) { // Tells us which types are loaded, when, and how often. SharedLogger.Debug($"Logger retrieved for: {classType.AssemblyQualifiedName}"); return SharedLogger; } public static ILogger For<T>() { return GetLogger(typeof(T)); } internal static void SetLogLevel(LogEventLevel logLevel) { LoggingLevelSwitch.MinimumLevel = logLevel; } internal static void UseDefaultLevel() { SetLogLevel(LogEventLevel.Information); } private static string GetLogDirectory() { var nativeLogFile = Environment.GetEnvironmentVariable(ConfigurationKeys.ProfilerLogPath); string logDirectory = null; if (!string.IsNullOrEmpty(nativeLogFile)) { logDirectory = Path.GetDirectoryName(nativeLogFile); } // This entire block may throw a SecurityException if not granted the System.Security.Permissions.FileIOPermission // because of the following API calls // - Directory.Exists // - Environment.GetFolderPath // - Path.GetTempPath if (logDirectory == null) { if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { var windowsDefaultDirectory = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData), @"Datadog .NET Tracer", "logs"); if (Directory.Exists(windowsDefaultDirectory)) { logDirectory = windowsDefaultDirectory; } } else { // either Linux or OS X if (Directory.Exists(NixDefaultDirectory)) { logDirectory = NixDefaultDirectory; } else { try { var di = Directory.CreateDirectory(NixDefaultDirectory); logDirectory = NixDefaultDirectory; } catch { // Unable to create the directory meaning that the user // will have to create it on their own. } } } } if (logDirectory == null) { // Last effort at writing logs logDirectory = Path.GetTempPath(); } return logDirectory; } } }
1
16,872
FYI @lucaspimentel and @colin-higgins since you have already approved, I wanted to point out this required change. By fixing `FrameworkDescription` to use the correct logger, it created a cycle between these two static constructors, so I'm removing this log line and delaying it to when it's actually constructed later in the Tracer. Let me know if you find that acceptable or have any suggestions.
DataDog-dd-trace-dotnet
.cs
@@ -34,6 +34,8 @@ module Beaker Beaker::Fusion when /blimpy/ Beaker::Blimper + when /ec2/ + Beaker::AwsSdk when /vcloud/ if options['pooling_api'] Beaker::VcloudPooled
1
%w( host_prebuilt_steps ).each do |lib| begin require lib rescue LoadError require File.expand_path(File.join(File.dirname(__FILE__), lib)) end end module Beaker #The Beaker class that interacts to all the supported hypervisors class Hypervisor include HostPrebuiltSteps #Generates an array with all letters a thru z and numbers 0 thru 9 CHARMAP = ('a'..'z').to_a + ('0'..'9').to_a #Hypervisor creator method. Creates the appropriate hypervisor class object based upon #the provided hypervisor type selected, then provisions hosts with hypervisor. #@param [String] type The type of hypervisor to create - one of aix, solaris, vsphere, fusion, # blimpy, vcloud or vagrant #@param [Array<Host>] hosts_to_provision The hosts to be provisioned with the selected hypervisor #@param [Hash] options options Options to alter execution def self.create(type, hosts_to_provision, options) @logger = options[:logger] @logger.notify("Beaker::Hypervisor, found some #{type} boxes to create") hyper_class = case type when /aix/ Beaker::Aixer when /solaris/ Beaker::Solaris when /vsphere/ Beaker::Vsphere when /fusion/ Beaker::Fusion when /blimpy/ Beaker::Blimper when /vcloud/ if options['pooling_api'] Beaker::VcloudPooled else Beaker::Vcloud end when /vagrant/ Beaker::Vagrant when /google/ Beaker::GoogleCompute when /none/ Beaker::Hypervisor else raise "Invalid hypervisor: #{type}" end hypervisor = hyper_class.new(hosts_to_provision, options) hypervisor.provision hypervisor end def initialize(hosts, options) @hosts = hosts @options = options end #Provisioning steps for be run for a given hypervisor. Default is nil. def provision nil end #Cleanup steps to be run for a given hypervisor. Default is nil. def cleanup nil end #Default configuration steps to be run for a given hypervisor def configure if @options[:timesync] timesync(@hosts, @options) end if @options[:root_keys] sync_root_keys(@hosts, @options) end if @options[:add_el_extras] add_el_extras(@hosts, @options) end if @options[:add_master_entry] add_master_entry(@hosts, @options) end end #Default validation steps to be run for a given hypervisor def validate if @options[:validate] validate_host(@hosts, @options) end end #Generate a random straing composted of letter and numbers def generate_host_name CHARMAP[rand(25)] + (0...14).map{CHARMAP[rand(CHARMAP.length)]}.join end end end %w( vsphere_helper vagrant fusion blimper vsphere vcloud vcloud_pooled aixer solaris google_compute_helper google_compute).each do |lib| begin require "hypervisor/#{lib}" rescue LoadError require File.expand_path(File.join(File.dirname(__FILE__), "hypervisor", lib)) end end
1
5,441
Ah, so we are going to need to update node/host files for this to work?
voxpupuli-beaker
rb
@@ -35,7 +35,7 @@ public class Container { private final ContainerId id; public Container(Function<HttpRequest, HttpResponse> client, ContainerId id) { - LOG.info("Created container " + id); + LOG.finest("Created container " + id); this.client = Objects.requireNonNull(client); this.id = Objects.requireNonNull(id); }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.docker; import static org.openqa.selenium.remote.http.HttpMethod.DELETE; import static org.openqa.selenium.remote.http.HttpMethod.POST; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import java.time.Duration; import java.util.Objects; import java.util.function.Function; import java.util.logging.Logger; public class Container { public static final Logger LOG = Logger.getLogger(Container.class.getName()); private final Function<HttpRequest, HttpResponse> client; private final ContainerId id; public Container(Function<HttpRequest, HttpResponse> client, ContainerId id) { LOG.info("Created container " + id); this.client = Objects.requireNonNull(client); this.id = Objects.requireNonNull(id); } public ContainerId getId() { return id; } public void start() { LOG.info("Starting " + getId()); client.apply(new HttpRequest(POST, String.format("/containers/%s/start", id))); } public void stop(Duration timeout) { Objects.requireNonNull(timeout); LOG.info("Stopping " + getId()); String seconds = String.valueOf(timeout.toMillis() / 1000); HttpRequest request = new HttpRequest(POST, String.format("/containers/%s/stop", id)); request.addQueryParameter("t", seconds); client.apply(request); } public void delete() { LOG.info("Removing " + getId()); HttpRequest request = new HttpRequest(DELETE, "/containers/" + id); client.apply(request); } }
1
16,458
This code is new and not tested well. While we may drop the log level before we ship 4.0, right now this is extremely helpful to users.
SeleniumHQ-selenium
rb
@@ -11,9 +11,15 @@ class ObservationsController < ApplicationController end def destroy - self.observation.destroy - flash[:success] = "Deleted Observation" - redirect_to proposal_path(self.observation.proposal_id) + proposal = observation.proposal + if current_user == observation.user + redirect_path = "/proposals" + else + redirect_path = proposal_path(proposal) + end + observation.destroy + flash[:success] = "Deleted Observation for #{proposal.public_id}" + redirect_to redirect_path end protected
1
class ObservationsController < ApplicationController before_action :authenticate_user! before_action :find_proposal before_action -> { authorize self.observation_for_auth } rescue_from Pundit::NotAuthorizedError, with: :auth_errors def create obs = @proposal.add_observer(observer_email, current_user, params[:observation][:reason]) flash[:success] = "#{obs.user.full_name} has been added as an observer" redirect_to proposal_path(@proposal) end def destroy self.observation.destroy flash[:success] = "Deleted Observation" redirect_to proposal_path(self.observation.proposal_id) end protected def find_proposal @proposal ||= Proposal.find(params[:proposal_id]) end def observation_for_auth if params[:action] == 'create' Observation.new(proposal: @proposal) else self.observation end end def observation @cached_observation ||= Observation.find(params[:id]) end def observer_email params.permit(observation: { user: [:email_address] }) .require(:observation).require(:user).require(:email_address) end def auth_errors(exception) render 'communicarts/authorization_error', status: 403, locals: { msg: "You are not allowed to add observers to that proposal. #{exception.message}" } end end
1
15,354
used named path (`proposals_path`) instead?
18F-C2
rb
@@ -107,7 +107,10 @@ class Sierra extends AbstractBase implements TranslatorAwareInterface . "FROM sierra_view.bib_view " . "LEFT JOIN sierra_view.bib_record_item_record_link ON " . "(bib_view.id = bib_record_item_record_link.bib_record_id) " - . "WHERE bib_view.record_num = $1;"; + . "INNER JOIN sierra_view.item_view ON " + . "(bib_record_item_record_link.item_record_id = item_view.id " + . "WHERE bib_view.record_num = $1 " + . "AND item_view.is_suppressed = false;"; $record_ids = pg_query_params( $this->db, $get_record_ids_query, [$this->idStrip($id)] );
1
<?php /** * Sierra (III) ILS Driver for VuFind * * PHP version 5 * * Copyright (C) 2013 Julia Bauder * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * @category VuFind * @package ILS_Drivers * @author Julia Bauder <[email protected]> * @license http://opensource.org/licenses/GPL-3.0 GNU General Public License * @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki */ namespace VuFind\ILS\Driver; use VuFind\Exception\ILS as ILSException, VuFind\I18n\Translator\TranslatorAwareInterface; /** * Sierra (III) ILS Driver for VuFind * * @category VuFind * @package ILS_Drivers * @author Julia Bauder <[email protected]> * @license http://opensource.org/licenses/GPL-3.0 GNU General Public License * @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki */ class Sierra extends AbstractBase implements TranslatorAwareInterface { use \VuFind\I18n\Translator\TranslatorAwareTrait; /** * Database connection * * @var resource */ protected $db; /** * Removes leading ".b" and trailing check digit from id numbers before querying * the database with them * * @param string $id ID string * * @return string */ protected function idStrip($id) { $id = preg_replace('/\.b/', '', $id); $id = substr($id, 0, -1); return $id; } /** * Converts the record numbers in the Postgres database to the record numbers in * VuFind * * @param string $bareNumber Record number from database * * @return string */ protected function createFullId($bareNumber) { $digitArray = str_split($bareNumber); $numberLength = count($digitArray); $partialCheck = 0; // see Millennium manual page #105781 for the logic behind this for ($i = $numberLength; $i > 0; $i--) { $j = $numberLength - $i; $partialCheck = $partialCheck + ($digitArray[$j] * ($i + 1)); } $checkdigit = $partialCheck % 11; if ($checkdigit == 10) { $checkdigit = "x"; } $fullNumber = ".b" . $bareNumber . $checkdigit; return $fullNumber; } /** * Uses the bib number in VuFind to look up the database ids for the associated * items * * @param string $id VuFind bib number * * @return array */ protected function getIds($id) { $get_record_ids_query = "SELECT bib_record_item_record_link.item_record_id, bib_view.id " . "FROM sierra_view.bib_view " . "LEFT JOIN sierra_view.bib_record_item_record_link ON " . "(bib_view.id = bib_record_item_record_link.bib_record_id) " . "WHERE bib_view.record_num = $1;"; $record_ids = pg_query_params( $this->db, $get_record_ids_query, [$this->idStrip($id)] ); while ($record = pg_fetch_row($record_ids)) { $itemRecords[] = $record[0]; } return $itemRecords; } /** * Modify location string to add status information, if necessary * * @param string $location Original location string * @param string $cattime Date and time item record was created * * @return string */ protected function getLocationText($location, $cattime) { // No "just cataloged" setting? Default to unmodified location. if (!isset($this->config['Catalog']['just_cataloged_time'])) { return $location; } // Convert hours to seconds: $seconds = 60 * 60 * $this->config['Catalog']['just_cataloged_time']; // Was this a recently cataloged item? If so, return a special string // based on the append setting.... if (time() - $seconds < strtotime($cattime)) { if (isset($this->config['Catalog']['just_cataloged_append']) && $this->config['Catalog']['just_cataloged_append'] == 'Y' ) { return $location . ' ' . $this->translate('just_cataloged'); } return $this->translate('just_cataloged'); } // Default case: return the location unmodified: return $location; } /** * Some call number processing used for both getStatus and getHoldings * * @param string $callnumber Call number * @param string $id ID * * @return string */ protected function processCallNumber($callnumber, $id) { // If there's no item-specific call number from the item-level queries // in getStatus/getHoldings, get the bib-level call number if ($callnumber == null) { $query = "SELECT varfield_view.field_content " . "FROM sierra_view.varfield_view " . "WHERE varfield_view.record_type_code = 'b' AND " . "varfield_view.varfield_type_code = 'c' and " . "varfield_view.record_num = $1;"; $results = pg_query_params( $this->db, $query, [$this->idStrip($id)] ); if (pg_num_rows($results) > 0) { $callnumberarray = pg_fetch_array($results, 0, PGSQL_NUM); $callnumber = $callnumberarray[0]; // stripping subfield codes from call numbers $callnumber = preg_replace('/\|(a|b)/', ' ', $callnumber); } else { $callnumber = ''; } } return $callnumber; } /** * Initialize the driver. * * Validate configuration and perform all resource-intensive tasks needed to * make the driver active. * * @throws ILSException * @return void */ public function init() { if (empty($this->config)) { throw new ILSException('Configuration needs to be set.'); } try { $conn_string = "host=" . $this->config['Catalog']['dna_url'] . " port=" . $this->config['Catalog']['dna_port'] . " dbname=" . $this->config['Catalog']['dna_db'] . " user=" . $this->config['Catalog']['dna_user'] . " password=" . $this->config['Catalog']['dna_password']; $this->db = pg_connect($conn_string); } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Courses * * Obtain a list of courses for use in limiting the reserves list. * * @throws ILSException * @return array An associative array with key = ID, value = name. */ public function getCourses() { try { // Sierra allows for multiple names for a course. Only the first name // will be included here; all others will be ignored. If you absolutely // need to allow multiple course names to be listed here, duplicate the // hack that's currently in getInstructors and findReserves to allow for // multiple instructors for a course. $query = "SELECT field_content, record_num " . "FROM sierra_view.varfield_view WHERE record_type_code = 'r' AND " . "varfield_type_code = 'r' AND occ_num = '0' " . "ORDER BY field_content;"; $results = pg_query($query); while ($row = pg_fetch_row($results)) { $courses[$row[1]] = $row[0]; } return $courses; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Departments * * Obtain a list of departments for use in limiting the reserves list. * * @throws ILSException * @return array An associative array with key = dept. ID, value = dept. name. */ public function getDepartments() { try { // Sierra does not allow for searching for reserves by departments. $departments = []; return $departments; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Instructors * * Obtain a list of instructors for use in limiting the reserves list. * * @throws ILSException * @return array An associative array with key = ID, value = name. */ public function getInstructors() { // This function contains some hacks. To wit: // Each instructor will be listed once for each course they are teaching, // with each course name in parentheses after the instructor name. This is // because Sierra doesn't actually have an "instructor ID" -- reserve books // can only be looked up by course, not by instructor. // To deal with cases where a given course may have multiple instructors, // "fake IDs" are constructed for each instance of a course after the first. // The findReserves function can work back from these fake IDs to the actual // course ID. (Otherwise a course could only be listed under one instructor.) try { $query = "SELECT t1.field_content, t2.field_content, t1.record_num " . "FROM sierra_view.varfield_view AS t1 " . "INNER JOIN sierra_view.varfield_view AS t2 ON " . "t1.record_num = t2.record_num WHERE " . "t1.record_type_code = 'r' AND t1.varfield_type_code = 'p' AND " . "t2.record_type_code = 'r' AND t2.varfield_type_code = 'r' AND " . "t2.occ_num = '0' ORDER BY t1.field_content;"; $results = pg_query($query); $instructors = []; $j = 0; while ($row = pg_fetch_row($results)) { if ($instructors[$row[2]] != null) { $fakeId = $row[2] . "-" . $j; $instructors[$fakeId] = $row[0] . " (" . $row[1] . ")"; $j++; } else { $instructors[$row[2]] = $row[0] . " (" . $row[1] . ")"; } } return $instructors; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Find Reserves * * Obtain information on course reserves. * * @param string $course ID from getCourses (empty string to match all) * @param string $instructor ID from getInstructors (empty string to match all) * @param string $department ID from getDepartments (empty string to match all) * * @throws ILSException * @return array An array of associative arrays representing reserve items. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function findReserves($course, $instructor, $department) { try { if ($course != null) { $coursenum = $course; } elseif ($instructor != null) { // This deals with the "fake ID" hack explained in the getInstructors // function $instructor = explode("-", $instructor); $coursenum = $instructor[0]; } $query = "SELECT DISTINCT bib_view.record_num " . "FROM sierra_view.bib_view " . "INNER JOIN sierra_view.bib_record_item_record_link " . "ON (bib_view.id = bib_record_item_record_link.bib_record_id) " . "INNER JOIN sierra_view.course_record_item_record_link " . "ON (course_record_item_record_link.item_record_id = " . "bib_record_item_record_link.item_record_id) " . "INNER JOIN sierra_view.varfield_view " . "ON (course_record_item_record_link.course_record_id = " . "varfield_view.record_id) " . "WHERE varfield_view.record_num = $1;"; $results = pg_query_params($this->db, $query, [$coursenum]); while ($resultArray = pg_fetch_row($results)) { $bareNumber = $resultArray[0]; $fullNumber = $this->createFullId($bareNumber); $reserves[]['BIB_ID'] = $fullNumber; } return $reserves; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Funds * * Return a list of funds which may be used to limit the getNewItems list. * * @throws ILSException * @return array An associative array with key = fund ID, value = fund name. */ public function getFunds() { try { $funds = []; $query = "SELECT DISTINCT fund_master.code_num, fund_master.code FROM sierra_view.fund_master;"; $results = pg_query($this->db, $query); while ($resultArray = pg_fetch_row($results)) { $funds[$resultArray[0]] = $resultArray[1]; } return $funds; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Status * * This is responsible for retrieving the status information of a certain * record. * * @param string $id The record id to retrieve the holdings for * * @throws ILSException * @return mixed On success, an associative array with the following keys: * id, availability (boolean), status, location, reserve, callnumber. */ public function getStatus($id) { try { $status = []; $itemIds = $this->getIds($id); // Use the database ids to get the item-level information (status, // location, and potentially call number) associated with that bib record $query1 = "SELECT item_view.item_status_code, " . "location_name.name, " . "varfield_view.field_content, " . "varfield_view.varfield_type_code, " . "checkout.due_gmt, " . "item_view.record_creation_date_gmt " . "FROM sierra_view.item_view " . "LEFT JOIN sierra_view.varfield_view " . "ON (item_view.id = varfield_view.record_id) " . "LEFT JOIN sierra_view.location " . "ON (item_view.location_code = location.code) " . "LEFT JOIN sierra_view.location_name " . "ON (location.id = location_name.location_id) " . "LEFT JOIN sierra_view.checkout " . "ON (item_view.id = checkout.item_record_id) " . "WHERE item_view.id = $1 " . "AND varfield_view.record_type_code = 'i' " . "AND location_name.iii_language_id = '1';"; pg_prepare($this->db, "prep_query", $query1); foreach ($itemIds as $item) { $callnumber = null; $results1 = pg_execute($this->db, "prep_query", [$item]); while ($resultArray = pg_fetch_row($results1)) { if ($resultArray[3] == "c") { $callnumber = $resultArray[2]; } } $finalcallnumber = $this->processCallNumber($callnumber, $id); $resultArray = pg_fetch_array($results1, 0); if (($resultArray[0] == "-" && $resultArray[4] == null) || ($resultArray[0] == "o" && $resultArray[4] == null) ) { $availability = true; } else { $availability = false; } $location = $this->getLocationText($resultArray[1], $resultArray[5]); $itemInfo = [ "id" => $id, "status" => $resultArray[0], "location" => $location, "reserve" => "N", "callnumber" => $finalcallnumber, "availability" => $availability ]; $status[] = $itemInfo; } return $status; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Holding * * This is responsible for retrieving the holding information of a certain * record. * * @param string $id The record id to retrieve the holdings for * @param array $patron Patron data * * @throws DateException * @throws ILSException * @return array On success, an associative array with the following * keys: id, availability (boolean), status, location, reserve, callnumber, * duedate, number, barcode. */ public function getHolding($id, array $patron = null) { try { $holdings = []; $itemIds = $this->getIds($id); // Use the database ids to get the item-level information (status, // location, and potentially call number) associated with that bib record $query1 = "SELECT item_view.item_status_code, location_name.name, checkout.due_gmt, varfield_view.field_content, varfield_view.varfield_type_code, item_view.record_creation_date_gmt FROM sierra_view.item_view LEFT JOIN sierra_view.location ON (item_view.location_code = location.code) LEFT JOIN sierra_view.location_name ON (location.id = location_name.location_id) LEFT JOIN sierra_view.checkout ON (item_view.id = checkout.item_record_id) LEFT JOIN sierra_view.varfield_view ON (item_view.id = varfield_view.record_id) WHERE item_view.id = $1 AND varfield_view.record_type_code = 'i' AND location_name.iii_language_id = '1';"; pg_prepare($this->db, "prep_query", $query1); foreach ($itemIds as $item) { $callnumber = null; $barcode = null; $results1 = pg_execute($this->db, "prep_query", [$item]); $number = null; while ($row1 = pg_fetch_row($results1)) { if ($row1[4] == "b") { $barcode = $row1[3]; } elseif ($row1[4] == "c") { $callnumber = $row1[3]; } elseif ($row1[4] == "v") { $number = $row1[3]; } } $finalcallnumber = $this->processCallNumber($callnumber, $id); $resultArray = pg_fetch_array($results1, 0); if (($resultArray[0] == "-" && $resultArray[2] == null) || ($resultArray[0] == "o" && $resultArray[2] == null) ) { $availability = true; } else { $availability = false; } $location = $this->getLocationText($resultArray[1], $resultArray[5]); $itemInfo = [ "id" => $id, "availability" => $availability, "status" => $resultArray[0], "location" => $location, "reserve" => "N", "callnumber" => $finalcallnumber, "duedate" => $resultArray[2], "returnDate" => false, "number" => $number, "barcode" => $barcode ]; $holdings[] = $itemInfo; } return $holdings; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get New Items * * Retrieve the IDs of items recently added to the catalog. * * @param int $page Page number of results to retrieve (counting starts at 1) * @param int $limit The size of each page of results to retrieve * @param int $daysOld The maximum age of records to retrieve in days (max. 30) * @param int $fundID optional fund ID to use for limiting results (use a value * returned by getFunds, or exclude for no limit); note that "fund" may be a * misnomer - if funds are not an appropriate way to limit your new item * results, you can return a different set of values from getFunds. The * important thing is that this parameter supports an ID returned by getFunds, * whatever that may mean. * * @throws ILSException * @return array Associative array with 'count' and 'results' keys */ public function getNewItems($page, $limit, $daysOld, $fundID) { try { $newItems = []; $offset = $limit * ($page - 1); $daysOld = (int) $daysOld; if (is_int($daysOld) == false || $daysOld > 30) { $daysOld = "30"; } $query = "SELECT bib_view.record_num FROM sierra_view.bib_view "; if ($fundID != null) { $query .= "INNER JOIN sierra_view.bib_record_order_record_link " . "ON (bib_view.id = bib_record_order_record_link.bib_record_id)" . " INNER JOIN sierra_view.order_record_cmf " . "ON (bib_record_order_record_link.order_record_id = " . "order_record_cmf.order_record_id) " . "INNER JOIN sierra_view.fund_master " . "ON (CAST (order_record_cmf.fund_code AS integer) = " . "fund_master.code_num) " . "WHERE fund_master.code_num = CAST ($3 AS integer) AND "; } else { $query .= "WHERE "; } if ($this->config['Catalog']['new_by_cat_date'] == "Y") { $query .= "bib_view.cataloging_date_gmt BETWEEN " . "date_trunc('day', (now() - interval '" . $daysOld . " days')) AND now() "; } else { $query .= "bib_view.record_creation_date_gmt BETWEEN " . "date_trunc('day', (now() - interval '" . $daysOld . " days')) AND now() "; } $query .= "ORDER BY cataloging_date_gmt LIMIT CAST ($1 AS integer) " . "OFFSET CAST ($2 AS integer);"; if ($fundID != null) { $results = pg_query_params( $this->db, $query, [$limit, $offset, $fundID] ); } else { $results = pg_query_params( $this->db, $query, [$limit, $offset] ); } $newItems['count'] = (string) pg_num_rows($results); if (pg_num_rows($results) != 0) { while ($record = pg_fetch_row($results)) { $bareNumber = $record[0]; $fullNumber = $this->createFullId($bareNumber); $newItems['results'][]['id'] = $fullNumber; } } else { $newItems['results'] = []; $newItems['results'][0]['id'] = null; } return $newItems; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Purchase History * * This is responsible for retrieving the acquisitions history data for the * specific record (usually recently received issues of a serial). * * @param string $id The record id to retrieve the info for * * @throws ILSException * @return array An array with the acquisitions data on success. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getPurchaseHistory($id) { try { // TODO $history = []; return $history; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get Statuses * * This is responsible for retrieving the status information for a * collection of records. * * @param array $ids The array of record ids to retrieve the status for * * @throws ILSException * @return array An array of getStatus() return values on success. */ public function getStatuses($ids) { try { $statuses = []; foreach ($ids as $id) { $statuses[] = $this->getStatus($id); } return $statuses; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get suppressed authority records * * @return array ID numbers of suppressed authority records in the system. */ public function getSuppressedAuthorityRecords() { try { $authRecords = []; $query = "SELECT record_metadata.record_num FROM " . "sierra_view.authority_record LEFT JOIN " . "sierra_view.record_metadata ON " . "(authority_record.record_id = record_metadata.id) " . "where authority_record.is_suppressed = 't';"; $record_ids = pg_query($this->db, $query); while ($record = pg_fetch_row($record_ids)) { $authRecords[] = $record[0]; } return $authRecords; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } /** * Get suppressed records. * * @throws ILSException * @return array ID numbers of suppressed records in the system. */ public function getSuppressedRecords() { try { $suppRecords = []; $query = "SELECT record_metadata.record_num FROM " . "sierra_view.bib_record LEFT JOIN sierra_view.record_metadata " . "ON (bib_record.record_id = record_metadata.id) " . "where bib_record.is_suppressed = 't';"; $record_ids = pg_query($this->db, $query); while ($record = pg_fetch_row($record_ids)) { $suppRecords[] = $record[0]; } return $suppRecords; } catch (\Exception $e) { throw new ILSException($e->getMessage()); } } }
1
25,223
Am I misreading something, or is there a mismatched parenthesis here? Please let me know whether or not this is cause for concern -- just wanted to be totally sure before merging, since I can't test this from here. Thanks!
vufind-org-vufind
php
@@ -256,6 +256,13 @@ func (o *Outbound) call(ctx context.Context, treq *transport.Request) (*transpor span.SetTag("http.status_code", response.StatusCode) + // Service name match validation, return yarpcerrors.CodeInternal error if not match + if match, resSvcName := checkServiceMatch(treq.Service, response.Header); !match { + return nil, transport.UpdateSpanWithErr(span, + yarpcerrors.InternalErrorf("service name sent from the request "+ + "does not match the service name received in the response, sent %q, got: %q", treq.Service, resSvcName)) + } + tres := &transport.Response{ Headers: applicationHeaders.FromHTTPHeaders(response.Header, transport.NewHeaders()), Body: response.Body,
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package http import ( "context" "fmt" "io/ioutil" "log" "net/http" "net/url" "strings" "time" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" opentracinglog "github.com/opentracing/opentracing-go/log" "go.uber.org/yarpc" "go.uber.org/yarpc/api/peer" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/internal/introspection" intyarpcerrors "go.uber.org/yarpc/internal/yarpcerrors" peerchooser "go.uber.org/yarpc/peer" "go.uber.org/yarpc/peer/hostport" "go.uber.org/yarpc/pkg/lifecycle" "go.uber.org/yarpc/yarpcerrors" ) // this ensures the HTTP outbound implements both transport.Outbound interfaces var ( _ transport.UnaryOutbound = (*Outbound)(nil) _ transport.OnewayOutbound = (*Outbound)(nil) _ introspection.IntrospectableOutbound = (*Outbound)(nil) ) var defaultURLTemplate, _ = url.Parse("http://localhost") // OutboundOption customizes an HTTP Outbound. type OutboundOption func(*Outbound) func (OutboundOption) httpOption() {} // URLTemplate specifies the URL this outbound makes requests to. For // peer.Chooser-based outbounds, the peer (host:port) spection of the URL may // vary from call to call but the rest will remain unchanged. For single-peer // outbounds, the URL will be used as-is. func URLTemplate(template string) OutboundOption { return func(o *Outbound) { o.setURLTemplate(template) } } // AddHeader specifies that an HTTP outbound should always include the given // header in outgoung requests. // // httpTransport.NewOutbound(chooser, http.AddHeader("X-Token", "TOKEN")) // // Note that headers starting with "Rpc-" are reserved by YARPC. This function // will panic if the header starts with "Rpc-". func AddHeader(key, value string) OutboundOption { if strings.HasPrefix(strings.ToLower(key), "rpc-") { panic(fmt.Errorf( "invalid header name %q: "+ `headers starting with "Rpc-" are reserved by YARPC`, key)) } return func(o *Outbound) { if o.headers == nil { o.headers = make(http.Header) } o.headers.Add(key, value) } } // NewOutbound builds an HTTP outbound that sends requests to peers supplied // by the given peer.Chooser. The URL template for used for the different // peers may be customized using the URLTemplate option. // // The peer chooser and outbound must share the same transport, in this case // the HTTP transport. // The peer chooser must use the transport's RetainPeer to obtain peer // instances and return those peers to the outbound when it calls Choose. // The concrete peer type is private and intrinsic to the HTTP transport. func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound { o := &Outbound{ once: lifecycle.NewOnce(), chooser: chooser, urlTemplate: defaultURLTemplate, tracer: t.tracer, transport: t, bothResponseError: true, } for _, opt := range opts { opt(o) } return o } // NewOutbound builds an HTTP outbound that sends requests to peers supplied // by the given peer.Chooser. The URL template for used for the different // peers may be customized using the URLTemplate option. // // The peer chooser and outbound must share the same transport, in this case // the HTTP transport. // The peer chooser must use the transport's RetainPeer to obtain peer // instances and return those peers to the outbound when it calls Choose. // The concrete peer type is private and intrinsic to the HTTP transport. func NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound { return NewTransport().NewOutbound(chooser, opts...) } // NewSingleOutbound builds an outbound that sends YARPC requests over HTTP // to the specified URL. // // The URLTemplate option has no effect in this form. func (t *Transport) NewSingleOutbound(uri string, opts ...OutboundOption) *Outbound { parsedURL, err := url.Parse(uri) if err != nil { panic(err.Error()) } chooser := peerchooser.NewSingle(hostport.PeerIdentifier(parsedURL.Host), t) o := t.NewOutbound(chooser) for _, opt := range opts { opt(o) } o.setURLTemplate(uri) return o } // Outbound sends YARPC requests over HTTP. It may be constructed using the // NewOutbound function or the NewOutbound or NewSingleOutbound methods on the // HTTP Transport. It is recommended that services use a single HTTP transport // to construct all HTTP outbounds, ensuring efficient sharing of resources // across the different outbounds. type Outbound struct { chooser peer.Chooser urlTemplate *url.URL tracer opentracing.Tracer transport *Transport // Headers to add to all outgoing requests. headers http.Header once *lifecycle.Once // should only be false in testing bothResponseError bool } // setURLTemplate configures an alternate URL template. // The host:port portion of the URL template gets replaced by the chosen peer's // identifier for each outbound request. func (o *Outbound) setURLTemplate(URL string) { parsedURL, err := url.Parse(URL) if err != nil { log.Fatalf("failed to configure HTTP outbound: invalid URL template %q: %s", URL, err) } o.urlTemplate = parsedURL } // Transports returns the outbound's HTTP transport. func (o *Outbound) Transports() []transport.Transport { return []transport.Transport{o.transport} } // Chooser returns the outbound's peer chooser. func (o *Outbound) Chooser() peer.Chooser { return o.chooser } // Start the HTTP outbound func (o *Outbound) Start() error { return o.once.Start(o.chooser.Start) } // Stop the HTTP outbound func (o *Outbound) Stop() error { return o.once.Stop(o.chooser.Stop) } // IsRunning returns whether the Outbound is running. func (o *Outbound) IsRunning() bool { return o.once.IsRunning() } // Call makes a HTTP request func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) { if treq == nil { return nil, yarpcerrors.InvalidArgumentErrorf("request for http unary outbound was nil") } return o.call(ctx, treq) } // CallOneway makes a oneway request func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) { if treq == nil { return nil, yarpcerrors.InvalidArgumentErrorf("request for http oneway outbound was nil") } _, err := o.call(ctx, treq) if err != nil { return nil, err } return time.Now(), nil } func (o *Outbound) call(ctx context.Context, treq *transport.Request) (*transport.Response, error) { start := time.Now() deadline, ok := ctx.Deadline() if !ok { return nil, yarpcerrors.Newf(yarpcerrors.CodeInvalidArgument, "missing context deadline") } ttl := deadline.Sub(start) hreq, err := o.createRequest(treq) if err != nil { return nil, err } hreq.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil) ctx, hreq, span, err := o.withOpentracingSpan(ctx, hreq, treq, start) if err != nil { return nil, err } defer span.Finish() hreq = o.withCoreHeaders(hreq, treq, ttl) hreq = hreq.WithContext(ctx) response, err := o.roundTrip(hreq, treq, start) if err != nil { span.SetTag("error", true) span.LogFields(opentracinglog.String("event", err.Error())) return nil, err } span.SetTag("http.status_code", response.StatusCode) tres := &transport.Response{ Headers: applicationHeaders.FromHTTPHeaders(response.Header, transport.NewHeaders()), Body: response.Body, ApplicationError: response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus, } bothResponseError := response.Header.Get(BothResponseErrorHeader) == AcceptTrue if bothResponseError && o.bothResponseError { if response.StatusCode >= 300 { return tres, getYARPCErrorFromResponse(response, true) } return tres, nil } if response.StatusCode >= 200 && response.StatusCode < 300 { return tres, nil } return nil, getYARPCErrorFromResponse(response, false) } func (o *Outbound) getPeerForRequest(ctx context.Context, treq *transport.Request) (*httpPeer, func(error), error) { p, onFinish, err := o.chooser.Choose(ctx, treq) if err != nil { return nil, nil, err } hpPeer, ok := p.(*httpPeer) if !ok { return nil, nil, peer.ErrInvalidPeerConversion{ Peer: p, ExpectedType: "*httpPeer", } } return hpPeer, onFinish, nil } func (o *Outbound) createRequest(treq *transport.Request) (*http.Request, error) { newURL := *o.urlTemplate return http.NewRequest("POST", newURL.String(), treq.Body) } func (o *Outbound) withOpentracingSpan(ctx context.Context, req *http.Request, treq *transport.Request, start time.Time) (context.Context, *http.Request, opentracing.Span, error) { // Apply HTTP Context headers for tracing and baggage carried by tracing. tracer := o.tracer var parent opentracing.SpanContext // ok to be nil if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { parent = parentSpan.Context() } tags := opentracing.Tags{ "rpc.caller": treq.Caller, "rpc.service": treq.Service, "rpc.encoding": treq.Encoding, "rpc.transport": "http", } for k, v := range yarpc.OpentracingTags { tags[k] = v } span := tracer.StartSpan( treq.Procedure, opentracing.StartTime(start), opentracing.ChildOf(parent), tags, ) ext.PeerService.Set(span, treq.Service) ext.SpanKindRPCClient.Set(span) ext.HTTPUrl.Set(span, req.URL.String()) ctx = opentracing.ContextWithSpan(ctx, span) err := tracer.Inject( span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header), ) return ctx, req, span, err } func (o *Outbound) withCoreHeaders(req *http.Request, treq *transport.Request, ttl time.Duration) *http.Request { // Add default headers to all requests. for k, vs := range o.headers { for _, v := range vs { req.Header.Add(k, v) } } req.Header.Set(CallerHeader, treq.Caller) req.Header.Set(ServiceHeader, treq.Service) req.Header.Set(ProcedureHeader, treq.Procedure) if ttl != 0 { req.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond)) } if treq.ShardKey != "" { req.Header.Set(ShardKeyHeader, treq.ShardKey) } if treq.RoutingKey != "" { req.Header.Set(RoutingKeyHeader, treq.RoutingKey) } if treq.RoutingDelegate != "" { req.Header.Set(RoutingDelegateHeader, treq.RoutingDelegate) } encoding := string(treq.Encoding) if encoding != "" { req.Header.Set(EncodingHeader, encoding) } if o.bothResponseError { req.Header.Set(AcceptsBothResponseErrorHeader, AcceptTrue) } return req } func getYARPCErrorFromResponse(response *http.Response, bothResponseError bool) error { var contents string if bothResponseError { contents = response.Header.Get(ErrorMessageHeader) } else { contentsBytes, err := ioutil.ReadAll(response.Body) if err != nil { return yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error()) } contents = string(contentsBytes) if err := response.Body.Close(); err != nil { return yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error()) } } // use the status code if we can't get a code from the headers code := statusCodeToBestCode(response.StatusCode) if errorCodeText := response.Header.Get(ErrorCodeHeader); errorCodeText != "" { var errorCode yarpcerrors.Code // TODO: what to do with error? if err := errorCode.UnmarshalText([]byte(errorCodeText)); err == nil { code = errorCode } } return intyarpcerrors.NewWithNamef( code, response.Header.Get(ErrorNameHeader), strings.TrimSuffix(contents, "\n"), ) } // RoundTrip implements the http.RoundTripper interface, making a YARPC HTTP outbound suitable as a // Transport when constructing an HTTP Client. An HTTP client is suitable only for relative paths to // a single outbound service. The HTTP outbound overrides the host:port portion of the URL of the // provided request. // // Sample usage: // // client := http.Client{Transport: outbound} // // Thereafter use the Golang standard library HTTP to send requests with this client. // // ctx, cancel := context.WithTimeout(context.Background(), time.Second) // defer cancel() // req, err := http.NewRequest("GET", "http://example.com/", nil /* body */) // req = req.WithContext(ctx) // res, err := client.Do(req) // // All requests must have a deadline on the context. // The peer chooser for raw HTTP requests will receive a YARPC transport.Request with no body. // // OpenTracing information must be added manually, before this call, to support context propagation. func (o *Outbound) RoundTrip(hreq *http.Request) (*http.Response, error) { return o.roundTrip(hreq, nil /* treq */, time.Now()) } func (o *Outbound) roundTrip(hreq *http.Request, treq *transport.Request, start time.Time) (*http.Response, error) { ctx := hreq.Context() deadline, ok := ctx.Deadline() if !ok { return nil, yarpcerrors.Newf( yarpcerrors.CodeInvalidArgument, "missing context deadline") } ttl := deadline.Sub(start) // When sending requests through the RoundTrip method, we construct the // transport request from the HTTP headers as if it were an inbound // request. // The API for setting transport metadata for an outbound request when // using the go stdlib HTTP client is to use headers as the YAPRC HTTP // transport header conventions. if treq == nil { treq = &transport.Request{ Caller: hreq.Header.Get(CallerHeader), Service: hreq.Header.Get(ServiceHeader), Encoding: transport.Encoding(hreq.Header.Get(EncodingHeader)), Procedure: hreq.Header.Get(ProcedureHeader), ShardKey: hreq.Header.Get(ShardKeyHeader), RoutingKey: hreq.Header.Get(RoutingKeyHeader), RoutingDelegate: hreq.Header.Get(RoutingDelegateHeader), Headers: applicationHeaders.FromHTTPHeaders(hreq.Header, transport.Headers{}), } } if err := o.once.WaitUntilRunning(ctx); err != nil { return nil, intyarpcerrors.AnnotateWithInfo( yarpcerrors.FromError(err), "error waiting for HTTP outbound to start for service: %s", treq.Service) } p, onFinish, err := o.getPeerForRequest(ctx, treq) if err != nil { return nil, err } hres, err := o.doWithPeer(ctx, hreq, treq, start, ttl, p) // Call the onFinish method before returning (with the error from call with peer) onFinish(err) return hres, err } func (o *Outbound) doWithPeer( ctx context.Context, hreq *http.Request, treq *transport.Request, start time.Time, ttl time.Duration, p *httpPeer, ) (*http.Response, error) { hreq.URL.Host = p.HostPort() response, err := o.transport.client.Do(hreq.WithContext(ctx)) if err != nil { // Workaround borrowed from ctxhttp until // https://github.com/golang/go/issues/17711 is resolved. select { case <-ctx.Done(): err = ctx.Err() default: } if err == context.DeadlineExceeded { end := time.Now() return nil, yarpcerrors.Newf( yarpcerrors.CodeDeadlineExceeded, "client timeout for procedure %q of service %q after %v", treq.Procedure, treq.Service, end.Sub(start)) } // Note that the connection may have been lost so the peer connection // maintenance loop resumes probing for availability. p.OnDisconnected() return nil, yarpcerrors.Newf(yarpcerrors.CodeUnknown, "unknown error from http client: %s", err.Error()) } return response, nil } // Introspect returns basic status about this outbound. func (o *Outbound) Introspect() introspection.OutboundStatus { state := "Stopped" if o.IsRunning() { state = "Running" } var chooser introspection.ChooserStatus if i, ok := o.chooser.(introspection.IntrospectableChooser); ok { chooser = i.Introspect() } else { chooser = introspection.ChooserStatus{ Name: "Introspection not available", } } return introspection.OutboundStatus{ Transport: "http", Endpoint: o.urlTemplate.String(), State: state, Chooser: chooser, } }
1
16,797
Might we want a hook to allow emitting metrics or logs in the case of a permitted empty service header response (or similar UpdateSpanWithErr on empty service header in response if strict enforcement is desired by the caller)?
yarpc-yarpc-go
go
@@ -63,6 +63,7 @@ class AzureBlobClient(FileSystem): * `token_credential` - A token credential used to authenticate HTTPS requests. The token value should be updated before its expiration. """ self.options = {"account_name": account_name, "account_key": account_key, "sas_token": sas_token} + kwargs["protocol"] = kwargs.get("protocol") or "https" # Default protocol to https if it's not set self.kwargs = kwargs @property
1
# -*- coding: utf-8 -*- # # Copyright (c) 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # import os import tempfile import logging import datetime from azure.storage.blob import blockblobservice from luigi.format import get_default_format from luigi.target import FileAlreadyExists, FileSystem, AtomicLocalFile, FileSystemTarget logger = logging.getLogger('luigi-interface') class AzureBlobClient(FileSystem): """ Create an Azure Blob Storage client for authentication. Users can create multiple storage account, each of which acts like a silo. Under each storage account, we can create a container. Inside each container, the user can create multiple blobs. For each account, there should be an account key. This account key cannot be changed and one can access all the containers and blobs under this account using the account key. Usually using an account key might not always be the best idea as the key can be leaked and cannot be revoked. The solution to this issue is to create Shared `Access Signatures` aka `sas`. A SAS can be created for an entire container or just a single blob. SAS can be revoked. """ def __init__(self, account_name=None, account_key=None, sas_token=None, **kwargs): """ :param str account_name: The storage account name. This is used to authenticate requests signed with an account key\ and to construct the storage endpoint. It is required unless a connection string is given,\ or if a custom domain is used with anonymous authentication. :param str account_key: The storage account key. This is used for shared key authentication. :param str sas_token: A shared access signature token to use to authenticate requests instead of the account key. :param dict kwargs: A key-value pair to provide additional connection options. * `protocol` - The protocol to use for requests. Defaults to https. * `connection_string` - If specified, this will override all other parameters besides request session.\ See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ for the connection string format * `endpoint_suffix` - The host base component of the url, minus the account name. Defaults to Azure\ (core.windows.net). Override this to use the China cloud (core.chinacloudapi.cn). * `custom_domain` - The custom domain to use. This can be set in the Azure Portal. For example, ‘www.mydomain.com’. * `token_credential` - A token credential used to authenticate HTTPS requests. The token value should be updated before its expiration. """ self.options = {"account_name": account_name, "account_key": account_key, "sas_token": sas_token} self.kwargs = kwargs @property def connection(self): return blockblobservice.BlockBlobService(account_name=self.options.get("account_name"), account_key=self.options.get("account_key"), sas_token=self.options.get("sas_token"), protocol=self.kwargs.get("protocol"), connection_string=self.kwargs.get("connection_string"), endpoint_suffix=self.kwargs.get("endpoint_suffix"), custom_domain=self.kwargs.get("custom_domain"), is_emulated=self.kwargs.get("is_emulated") or False) def upload(self, tmp_path, container, blob, **kwargs): logging.debug("Uploading file '{tmp_path}' to container '{container}' and blob '{blob}'".format( tmp_path=tmp_path, container=container, blob=blob)) self.create_container(container) lease_id = self.connection.acquire_blob_lease(container, blob)\ if self.exists("{container}/{blob}".format(container=container, blob=blob)) else None try: self.connection.create_blob_from_path(container, blob, tmp_path, lease_id=lease_id, progress_callback=kwargs.get("progress_callback")) finally: if lease_id is not None: self.connection.release_blob_lease(container, blob, lease_id) def download_as_bytes(self, container, blob, bytes_to_read=None): start_range, end_range = (0, bytes_to_read-1) if bytes_to_read is not None else (None, None) logging.debug("Downloading from container '{container}' and blob '{blob}' as bytes".format( container=container, blob=blob)) return self.connection.get_blob_to_bytes(container, blob, start_range=start_range, end_range=end_range).content def download_as_file(self, container, blob, location): logging.debug("Downloading from container '{container}' and blob '{blob}' to {location}".format( container=container, blob=blob, location=location)) return self.connection.get_blob_to_path(container, blob, location) def create_container(self, container_name): return self.connection.create_container(container_name) def delete_container(self, container_name): lease_id = self.connection.acquire_container_lease(container_name) self.connection.delete_container(container_name, lease_id=lease_id) def exists(self, path): container, blob = self.splitfilepath(path) return self.connection.exists(container, blob) def remove(self, path, recursive=True, skip_trash=True): container, blob = self.splitfilepath(path) if not self.exists(path): return False lease_id = self.connection.acquire_blob_lease(container, blob) self.connection.delete_blob(container, blob, lease_id=lease_id) return True def mkdir(self, path, parents=True, raise_if_exists=False): container, blob = self.splitfilepath(path) if raise_if_exists and self.exists(path): raise FileAlreadyExists("The Azure blob path '{blob}' already exists under container '{container}'".format( blob=blob, container=container)) def isdir(self, path): """ Azure Blob Storage has no concept of directories. It always returns False :param str path: Path of the Azure blob storage :return: False """ return False def move(self, path, dest): try: return self.copy(path, dest) and self.remove(path) except IOError: self.remove(dest) return False def copy(self, path, dest): source_container, source_blob = self.splitfilepath(path) dest_container, dest_blob = self.splitfilepath(dest) if source_container != dest_container: raise Exception( "Can't copy blob from '{source_container}' to '{dest_container}'. File can be moved within container".format( source_container=source_container, dest_container=dest_container )) source_lease_id = self.connection.acquire_blob_lease(source_container, source_blob) destination_lease_id = self.connection.acquire_blob_lease(dest_container, dest_blob) if self.exists(dest) else None try: return self.connection.copy_blob(source_container, dest_blob, self.connection.make_blob_url( source_container, source_blob), destination_lease_id=destination_lease_id, source_lease_id=source_lease_id) finally: self.connection.release_blob_lease(source_container, source_blob, source_lease_id) if destination_lease_id is not None: self.connection.release_blob_lease(dest_container, dest_blob, destination_lease_id) def rename_dont_move(self, path, dest): self.move(path, dest) @staticmethod def splitfilepath(filepath): splitpath = filepath.split("/") container = splitpath[0] blobsplit = splitpath[1:] blob = None if not blobsplit else "/".join(blobsplit) return container, blob class ReadableAzureBlobFile: def __init__(self, container, blob, client, download_when_reading, **kwargs): self.container = container self.blob = blob self.client = client self.closed = False self.download_when_reading = download_when_reading self.azure_blob_options = kwargs self.download_file_location = os.path.join(tempfile.mkdtemp(prefix=str(datetime.datetime.utcnow())), blob) self.fid = None def read(self, n=None): return self.client.download_as_bytes(self.container, self.blob, n) def __enter__(self): if self.download_when_reading: self.client.download_as_file(self.container, self.blob, self.download_file_location) self.fid = open(self.download_file_location) return self.fid else: return self def __exit__(self, exc_type, exc, traceback): self.close() def __del__(self): self.close() if os._exists(self.download_file_location): os.remove(self.download_file_location) def close(self): if self.download_when_reading: if self.fid is not None and not self.fid.closed: self.fid.close() self.fid = None def readable(self): return True def writable(self): return False def seekable(self): return False def seek(self, offset, whence=None): pass class AtomicAzureBlobFile(AtomicLocalFile): def __init__(self, container, blob, client, **kwargs): super(AtomicAzureBlobFile, self).__init__(os.path.join(container, blob)) self.container = container self.blob = blob self.client = client self.azure_blob_options = kwargs def move_to_final_destination(self): self.client.upload(self.tmp_path, self.container, self.blob, **self.azure_blob_options) class AzureBlobTarget(FileSystemTarget): """ Create an Azure Blob Target for storing data on Azure Blob Storage """ def __init__(self, container, blob, client=None, format=None, download_when_reading=True, **kwargs): """ :param str account_name: The storage account name. This is used to authenticate requests signed with an account key and to construct the storage endpoint. It is required unless a connection string is given, or if a custom domain is used with anonymous authentication. :param str container: The azure container in which the blob needs to be stored :param str blob: The name of the blob under container specified :param str client: An instance of :class:`.AzureBlobClient`. If none is specified, anonymous access would be used :param str format: An instance of :class:`luigi.format`. :param bool download_when_reading: Determines whether the file has to be downloaded to temporary location on disk. Defaults to `True`. Pass the argument **progress_callback** with signature *(func(current, total))* to get real time progress of upload """ super(AzureBlobTarget, self).__init__(os.path.join(container, blob)) if format is None: format = get_default_format() self.container = container self.blob = blob self.client = client or AzureBlobClient() self.format = format self.download_when_reading = download_when_reading self.azure_blob_options = kwargs @property def fs(self): """ The :py:class:`FileSystem` associated with :class:`.AzureBlobTarget` """ return self.client def open(self, mode): """ Open the target for reading or writing :param char mode: 'r' for reading and 'w' for writing. 'b' is not supported and will be stripped if used. For binary mode, use `format` :return: * :class:`.ReadableAzureBlobFile` if 'r' * :class:`.AtomicAzureBlobFile` if 'w' """ if mode not in ('r', 'w'): raise ValueError("Unsupported open mode '%s'" % mode) if mode == 'r': return self.format.pipe_reader(ReadableAzureBlobFile(self.container, self.blob, self.client, self.download_when_reading, **self.azure_blob_options)) else: return self.format.pipe_writer(AtomicAzureBlobFile(self.container, self.blob, self.client, **self.azure_blob_options))
1
19,938
if using dict.get, this could just be `kwargs.get('protocol', 'https')`
spotify-luigi
py
@@ -443,6 +443,14 @@ public class SurfaceNamer extends NameFormatterDelegator { return getNotImplementedString("SurfaceNamer.getAsyncApiMethodExampleName"); } + public String getGrpcStreamingApiMethodName(Method method) { + return getApiMethodName(method); + } + + public String getGrpcStreamingApiMethodExampleName(Interface interfaze, Method method) { + return getGrpcStreamingApiMethodName(method); + } + /** * The name of a variable to hold a value for the given proto message field * (such as a flattened parameter).
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer; import com.google.api.codegen.CollectionConfig; import com.google.api.codegen.MethodConfig; import com.google.api.codegen.util.CommonRenderingUtil; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.NameFormatter; import com.google.api.codegen.util.NameFormatterDelegator; import com.google.api.codegen.util.NamePath; import com.google.api.codegen.util.SymbolTable; import com.google.api.codegen.util.TypeNameConverter; import com.google.api.tools.framework.aspects.documentation.model.DocumentationUtil; import com.google.api.tools.framework.model.Field; import com.google.api.tools.framework.model.Interface; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.ProtoElement; import com.google.api.tools.framework.model.TypeRef; import io.grpc.Status; import java.util.ArrayList; import java.util.List; /** * A SurfaceNamer provides language-specific names for specific components of a view for a surface. * * Naming is composed of two steps: * * 1. Composing a Name instance with the name pieces * 2. Formatting the Name for the particular type of identifier needed. * * This class delegates step 2 to the provided name formatter, which generally * would be a language-specific namer. */ public class SurfaceNamer extends NameFormatterDelegator { private ModelTypeFormatter modelTypeFormatter; private TypeNameConverter typeNameConverter; public SurfaceNamer( NameFormatter languageNamer, ModelTypeFormatter modelTypeFormatter, TypeNameConverter typeNameConverter) { super(languageNamer); this.modelTypeFormatter = modelTypeFormatter; this.typeNameConverter = typeNameConverter; } public ModelTypeFormatter getModelTypeFormatter() { return modelTypeFormatter; } public TypeNameConverter getTypeNameConverter() { return typeNameConverter; } public String getNotImplementedString(String feature) { return "$ NOT IMPLEMENTED: " + feature + " $"; } /** The full path to the source file */ public String getSourceFilePath(String path, String className) { return getNotImplementedString("SurfaceNamer.getSourceFilePath"); } /** The name of the class that implements a particular proto interface. */ public String getApiWrapperClassName(Interface interfaze) { return className(Name.upperCamel(interfaze.getSimpleName(), "Api")); } /** The name of the implementation class that implements a particular proto interface. */ public String getApiWrapperClassImplName(Interface interfaze) { return getNotImplementedString("SurfaceNamer.getApiWrapperClassImplName"); } /** * The name of the constructor for the service client. * The client is VKit generated, not GRPC. */ public String getApiWrapperClassConstructorName(Interface interfaze) { return className(Name.upperCamel(interfaze.getSimpleName(), "Api")); } /** * The name of example of the constructor for the service client. * The client is VKit generated, not GRPC. */ public String getApiWrapperClassConstructorExampleName(Interface interfaze) { return getApiWrapperClassConstructorName(interfaze); } /** * Constructor name for the type with the given nickname. */ public String getTypeConstructor(String typeNickname) { return typeNickname; } /** * The name of a variable that holds an instance of the class that implements * a particular proto interface. */ public String getApiWrapperVariableName(Interface interfaze) { return localVarName(Name.upperCamel(interfaze.getSimpleName(), "Api")); } /** * The name of a variable that holds an instance of the module that contains * the implementation of a particular proto interface. So far it is used by * just NodeJS. */ public String getApiWrapperModuleName(Interface interfaze) { return getNotImplementedString("SurfaceNamer.getApiWrapperModuleName"); } /** * The name of the settings class for a particular proto interface; * not used in most languages. */ public String getApiSettingsClassName(Interface interfaze) { return className(Name.upperCamel(interfaze.getSimpleName(), "Settings")); } /** The function name to retrieve default client option */ public String getDefaultApiSettingsFunctionName(Interface service) { return getNotImplementedString("SurfaceNamer.getDefaultClientOptionFunctionName"); } /** * The name of a variable that holds the settings class for a particular * proto interface; not used in most languages. */ public String getApiSettingsVariableName(Interface interfaze) { return localVarName(Name.upperCamel(interfaze.getSimpleName(), "Settings")); } /** * The name of the builder class for the settings class for a particular * proto interface; not used in most languages. */ public String getApiSettingsBuilderVarName(Interface interfaze) { return localVarName(Name.upperCamel(interfaze.getSimpleName(), "SettingsBuilder")); } /** The variable name for the given identifier that is formatted. */ public String getFormattedVariableName(Name identifier) { return localVarName(Name.from("formatted").join(identifier)); } /** The name of the field. */ public String getFieldName(Field field) { return publicFieldName(Name.from(field.getSimpleName())); } /** The function name to set the given proto field. */ public String getFieldSetFunctionName(FeatureConfig featureConfig, Field field) { if (featureConfig.useResourceNameFormatOption(field)) { return getResourceNameFieldSetFunctionName(field.getType(), Name.from(field.getSimpleName())); } else { return getFieldSetFunctionName(field.getType(), Name.from(field.getSimpleName())); } } /** The function name to set a field having the given type and name. */ public String getFieldSetFunctionName(TypeRef type, Name identifier) { if (type.isMap()) { return publicMethodName(Name.from("put", "all").join(identifier)); } else if (type.isRepeated()) { return publicMethodName(Name.from("add", "all").join(identifier)); } else { return publicMethodName(Name.from("set").join(identifier)); } } public String getResourceNameFieldSetFunctionName(TypeRef type, Name identifier) { if (type.isMap()) { return getNotImplementedString("SurfaceNamer.getResourceNameFieldSetFunctionName:map-type"); } else if (type.isRepeated()) { return publicMethodName(Name.from("add", "all").join(identifier).join("with_resources")); } else { return publicMethodName(Name.from("set").join(identifier).join("with_resource")); } } /** The function name to get the given proto field. */ public String getFieldGetFunctionName(FeatureConfig featureConfig, Field field) { if (featureConfig.useResourceNameFormatOption(field)) { return getResourceNameFieldGetFunctionName(field.getType(), Name.from(field.getSimpleName())); } else { return getFieldGetFunctionName(field.getType(), Name.from(field.getSimpleName())); } } /** The function name to get a field having the given type and name. */ public String getFieldGetFunctionName(TypeRef type, Name identifier) { if (type.isRepeated() && !type.isMap()) { return publicMethodName(Name.from("get").join(identifier).join("list")); } else { return publicMethodName(Name.from("get").join(identifier)); } } public String getResourceNameFieldGetFunctionName(TypeRef type, Name identifier) { if (type.isMap()) { return getNotImplementedString("SurfaceNamer.getResourceNameFieldGetFunctionName:map-type"); } else if (type.isRepeated()) { return publicMethodName(Name.from("get").join(identifier).join("list").join("as_resources")); } else { return publicMethodName(Name.from("get").join(identifier).join("as_resource")); } } /** * The function name to get the count of elements in the given field. * * @throws IllegalArgumentException if the field is not a repeated field. */ public String getFieldCountGetFunctionName(Field field) { if (field.isRepeated()) { return publicMethodName(Name.from("get", field.getSimpleName(), "count")); } else { throw new IllegalArgumentException( "Non-repeated field " + field.getSimpleName() + " has no count function."); } } /** * The function name to get an element by index from the given field. * * @throws IllegalArgumentException if the field is not a repeated field. */ public String getByIndexGetFunctionName(Field field) { if (field.isRepeated()) { return publicMethodName(Name.from("get", field.getSimpleName())); } else { throw new IllegalArgumentException( "Non-repeated field " + field.getSimpleName() + " has no get-by-index function."); } } /** * The name of the package */ public String getLocalPackageName() { return getNotImplementedString("SurfaceNamer.getLocalPackageName"); } /** * The name of the example package */ public String getExamplePackageName() { return getNotImplementedString("SurfaceNamer.getExamplePackageName"); } /** * The name of a path template constant for the given collection, * to be held in an API wrapper class. */ public String getPathTemplateName(Interface service, CollectionConfig collectionConfig) { return inittedConstantName(Name.from(collectionConfig.getEntityName(), "path", "template")); } /** The name of a getter function to get a particular path template for the given collection. */ public String getPathTemplateNameGetter(Interface service, CollectionConfig collectionConfig) { return publicMethodName(Name.from("get", collectionConfig.getEntityName(), "name", "template")); } /** The name of the path template resource, in human format. */ public String getPathTemplateResourcePhraseName(CollectionConfig collectionConfig) { return Name.from(collectionConfig.getEntityName()).toPhrase(); } /** The function name to format the entity for the given collection. */ public String getFormatFunctionName(CollectionConfig collectionConfig) { return staticFunctionName(Name.from("format", collectionConfig.getEntityName(), "name")); } /** * The function name to parse a variable from the string representing the entity for * the given collection. */ public String getParseFunctionName(String var, CollectionConfig collectionConfig) { return staticFunctionName( Name.from("parse", var, "from", collectionConfig.getEntityName(), "name")); } /** The entity name for the given collection. */ public String getEntityName(CollectionConfig collectionConfig) { return localVarName(Name.from(collectionConfig.getEntityName())); } /** The parameter name for the entity for the given collection config. */ public String getEntityNameParamName(CollectionConfig collectionConfig) { return localVarName(Name.from(collectionConfig.getEntityName(), "name")); } /** The parameter name for the given lower-case field name. */ public String getParamName(String var) { return localVarName(Name.from(var)); } /** The documentation name of a parameter for the given lower-case field name. */ public String getParamDocName(String var) { return localVarName(Name.from(var)); } /** The method name of the retry filter for the given key */ public String retryFilterMethodName(String key) { return privateMethodName(Name.from(key).join("retry").join("filter")); } /** The method name of the retry backoff for the given key */ public String retryBackoffMethodName(String key) { return privateMethodName(Name.from("get").join(key).join("retry").join("backoff")); } /** The method name of the timeout backoff for the given key */ public String timeoutBackoffMethodName(String key) { return privateMethodName(Name.from("get").join(key).join("timeout").join("backoff")); } /** The page streaming descriptor name for the given method. */ public String getPageStreamingDescriptorName(Method method) { return privateFieldName(Name.upperCamel(method.getSimpleName(), "PageStreamingDescriptor")); } /** The name of the constant to hold the page streaming descriptor for the given method. */ public String getPageStreamingDescriptorConstName(Method method) { return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("page_str_desc")); } /** The name of the constant to hold the bundling descriptor for the given method. */ public String getBundlingDescriptorConstName(Method method) { return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("bundling_desc")); } /** Adds the imports used in the implementation of page streaming descriptors. */ public void addPageStreamingDescriptorImports(ModelTypeTable typeTable) { // do nothing } /** Adds the imports used in the implementation of bundling descriptors. */ public void addBundlingDescriptorImports(ModelTypeTable typeTable) { // do nothing } /** Adds the imports used for page streaming call settings. */ public void addPageStreamingCallSettingsImports(ModelTypeTable typeTable) { // do nothing } /** Adds the imports used for bundling call settings. */ public void addBundlingCallSettingsImports(ModelTypeTable typeTable) { // do nothing } /** The key to use in a dictionary for the given method. */ public String getMethodKey(Method method) { return keyName(Name.upperCamel(method.getSimpleName())); } /** The path to the client config for the given interface. */ public String getClientConfigPath(Interface service) { return getNotImplementedString("SurfaceNamer.getClientConfigPath"); } /** * Human-friendly name of this service */ public String getServicePhraseName(Interface service) { return Name.upperCamel(service.getSimpleName()).toPhrase(); } /** * The type name of the Grpc client class. * This needs to match what Grpc generates for the particular language. */ public String getGrpcClientTypeName(Interface service) { NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service)); String className = className(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Client")); return qualifiedName(namePath.withHead(className)); } /** * The type name of the Grpc container class. * This needs to match what Grpc generates for the particular language. */ public String getGrpcContainerTypeName(Interface service) { NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service)); String className = className(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc")); return qualifiedName(namePath.withHead(className)); } /** * The type name of the Grpc service class * This needs to match what Grpc generates for the particular language. */ public String getGrpcServiceClassName(Interface service) { NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service)); String grpcContainerName = className(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc")); String serviceClassName = className(Name.upperCamelKeepUpperAcronyms(service.getSimpleName(), "ImplBase")); return qualifiedName(namePath.withHead(grpcContainerName).append(serviceClassName)); } /** * The type name of the method constant in the Grpc container class. * This needs to match what Grpc generates for the particular language. */ public String getGrpcMethodConstant(Method method) { return inittedConstantName( Name.from("method").join(Name.upperCamelKeepUpperAcronyms(method.getSimpleName()))); } /** The name of the surface method which can call the given API method. */ public String getApiMethodName(Method method) { return publicMethodName(Name.upperCamel(method.getSimpleName())); } /** The name of the example for the method. */ public String getApiMethodExampleName(Interface interfaze, Method method) { return getApiMethodName(method); } /** The name of the async surface method which can call the given API method. */ public String getAsyncApiMethodName(Method method) { return getNotImplementedString("SurfaceNamer.getAsyncApiMethodName"); } public String getAsyncApiMethodExampleName(Method method) { return getNotImplementedString("SurfaceNamer.getAsyncApiMethodExampleName"); } /** * The name of a variable to hold a value for the given proto message field * (such as a flattened parameter). */ public String getVariableName(Field field) { return localVarName(Name.from(field.getSimpleName())); } /** * The name of a field as a method. */ public String getFieldAsMethodName(Field field) { return privateMethodName(Name.from(field.getSimpleName())); } /** * Returns true if the request object param type for the given field should be imported. */ public boolean shouldImportRequestObjectParamType(Field field) { return true; } /** * Returns true if the request object param element type for the given field should be imported. */ public boolean shouldImportRequestObjectParamElementType(Field field) { return true; } /** Converts the given text to doc lines in the format of the current language. */ public List<String> getDocLines(String text) { return CommonRenderingUtil.getDocLines(text); } /** Provides the doc lines for the given proto element in the current language. */ public List<String> getDocLines(ProtoElement element) { return getDocLines(DocumentationUtil.getDescription(element)); } /** The doc lines that declare what exception(s) are thrown for an API method. */ public List<String> getThrowsDocLines() { return new ArrayList<>(); } /** The doc lines that describe the return value for an API method. */ public List<String> getReturnDocLines( SurfaceTransformerContext context, MethodConfig methodConfig, Synchronicity synchronicity) { return new ArrayList<>(); } /** The public access modifier for the current language. */ public String getPublicAccessModifier() { return "public"; } /** The private access modifier for the current language. */ public String getPrivateAccessModifier() { return "private"; } /** * The name used in Grpc for the given API method. * This needs to match what Grpc generates. */ public String getGrpcMethodName(Method method) { // This might seem silly, but it makes clear what we're dealing with (upper camel). // This is language-independent because of gRPC conventions. return Name.upperCamelKeepUpperAcronyms(method.getSimpleName()).toUpperCamel(); } /** The type name for retry settings. */ public String getRetrySettingsTypeName() { return getNotImplementedString("SurfaceNamer.getRetrySettingsClassName"); } /** The type name for an optional array argument; not used in most languages. */ public String getOptionalArrayTypeName() { return getNotImplementedString("SurfaceNamer.getOptionalArrayTypeName"); } /** The return type name in a dynamic language for the given method. */ public String getDynamicLangReturnTypeName(Method method, MethodConfig methodConfig) { return getNotImplementedString("SurfaceNamer.getDynamicReturnTypeName"); } /** The return type name in a static language for the given method. */ public String getStaticLangReturnTypeName(Method method, MethodConfig methodConfig) { return getNotImplementedString("SurfaceNamer.getStaticLangReturnTypeName"); } /** The async return type name in a static language for the given method. */ public String getStaticLangAsyncReturnTypeName(Method method, MethodConfig methodConfig) { return getNotImplementedString("SurfaceNamer.getStaticLangAsyncReturnTypeName"); } /** The name of the paged callable variant of the given method. */ public String getPagedCallableMethodName(Method method) { return publicMethodName(Name.upperCamel(method.getSimpleName(), "PagedCallable")); } /** The name of the example for the paged callable variant. */ public String getPagedCallableMethodExampleName(Interface interfaze, Method method) { return getPagedCallableMethodName(method); } /** The name of the callable for the paged callable variant of the given method. */ public String getPagedCallableName(Method method) { return privateFieldName(Name.upperCamel(method.getSimpleName(), "PagedCallable")); } /** The name of the plain callable variant of the given method. */ public String getCallableMethodName(Method method) { return publicMethodName(Name.upperCamel(method.getSimpleName(), "Callable")); } /** The name of the example for the plain callable variant. */ public String getCallableMethodExampleName(Interface interfaze, Method method) { return getCallableMethodName(method); } /** The name of the plain callable for the given method. */ public String getCallableName(Method method) { return privateFieldName(Name.upperCamel(method.getSimpleName(), "Callable")); } /** The name of the settings member name for the given method. */ public String getSettingsMemberName(Method method) { return publicMethodName(Name.upperCamel(method.getSimpleName(), "Settings")); } /** The getter function name for the settings for the given method. */ public String getSettingsFunctionName(Method method) { return getSettingsMemberName(method); } /** The type name of call options */ public String getCallSettingsTypeName(Interface service) { return className(Name.upperCamel(service.getSimpleName(), "Settings")); } /** The function name to retrieve default call option */ public String getDefaultCallSettingsFunctionName(Interface service) { return publicMethodName(Name.upperCamel(service.getSimpleName(), "Settings")); } /** * The generic-aware response type name for the given type. * For example, in Java, this will be the type used for ListenableFuture&lt;...&gt;. */ public String getGenericAwareResponseTypeName(TypeRef outputType) { return getNotImplementedString("SurfaceNamer.getGenericAwareResponseType"); } /** * The function name to get the given proto field as a list. * * @throws IllegalArgumentException if the field is not a repeated field. */ public String getGetResourceListCallName(Field resourcesField) { if (resourcesField.isRepeated()) { return publicMethodName(Name.from("get", resourcesField.getSimpleName(), "list")); } else { throw new IllegalArgumentException( "Non-repeated field " + resourcesField.getSimpleName() + " cannot be accessed as a list."); } } /** * Computes the nickname of the response type name for the given input and output types and * resources field, saves it in the given type table, and returns it. */ public String getAndSavePagedResponseTypeName( FeatureConfig featureConfig, ModelTypeTable typeTable, TypeRef inputTypeName, TypeRef outputTypeName, Field resourcesField) { return getNotImplementedString("SurfaceNamer.getAndSavePagedResponseTypeName"); } public String getAndSaveFieldTypeName( FeatureConfig featureConfig, ModelTypeTable typeTable, Field resourceField) { return typeTable.getAndSaveNicknameFor(resourceField.getType()); } public String getAndSaveElementFieldTypeName( FeatureConfig featureConfig, ModelTypeTable typeTable, Field resourceField) { return typeTable.getAndSaveNicknameForElementType(resourceField.getType()); } /** * The test case name for the given method. */ public String getTestCaseName(SymbolTable symbolTable, Method method) { Name testCaseName = symbolTable.getNewSymbol(Name.upperCamel(method.getSimpleName(), "Test")); return publicMethodName(testCaseName); } /** The unit test class name for the given API service. */ public String getUnitTestClassName(Interface service) { return className(Name.upperCamel(service.getSimpleName(), "Test")); } /** The smoke test class name for the given API service. */ public String getSmokeTestClassName(Interface service) { return className(Name.upperCamel(service.getSimpleName(), "Smoke", "Test")); } /** The class name of the mock gRPC service for the given API service. */ public String getMockServiceClassName(Interface service) { return className(Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName())); } /** The class name of a variable to hold the mock gRPC service for the given API service. */ public String getMockServiceVarName(Interface service) { return localVarName(Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName())); } /** The class name of the mock gRPC service implementation for the given API service. */ public String getMockGrpcServiceImplName(Interface service) { return className(Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName(), "Impl")); } /** The file name for an API service. */ public String getServiceFileName(Interface service, String packageName) { return getNotImplementedString("SurfaceNamer.getServiceFileName"); } /** The file name for the example of an API service. */ public String getExampleFileName(Interface service, String packageName) { return getNotImplementedString("SurfaceNamer.getExampleFileName"); } /** * The fully qualified class name of a an API service. * TODO: Support the general pattern of package + class name in NameFormatter. */ public String getFullyQualifiedApiWrapperClassName(Interface interfaze, String packageName) { return getNotImplementedString("SurfaceNamer.getFullyQualifiedApiWrapperClassName"); } /** The name of the variable that will hold the stub for a service. */ public String getStubName(Interface service) { return privateFieldName(Name.upperCamel(service.getSimpleName(), "Stub")); } /** The name of the function that will create a stub. */ public String getCreateStubFunctionName(Interface service) { return privateMethodName( Name.upperCamel("Create", service.getSimpleName(), "Stub", "Function")); } /** The name of the array which will hold the methods for a given stub. */ public String getStubMethodsArrayName(Interface service) { return privateMethodName(Name.upperCamel(service.getSimpleName(), "Stub", "Methods")); } /** The name of the import for a specific grpcClient */ public String getGrpcClientImportName(Interface service) { return getNotImplementedString("SurfaceNamer.getGrpcClientImportName"); } /** The fully qualified type name for the stub of a service. */ public String getFullyQualifiedStubType(Interface service) { return getNotImplementedString("SurfaceNamer.getFullyQualifiedStubType"); } /** The name of the variable to hold the grpc client of a service. */ public String getGrpcClientVariableName(Interface service) { return localVarName(Name.upperCamel(service.getSimpleName(), "Client")); } /** The qualified namespace of a service. */ public String getNamespace(Interface service) { NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service)); return qualifiedName(namePath.withoutHead()); } public String getProtoFileImportFromService(Interface service) { return getNotImplementedString("SurfaceNamer.getProtoFileImportFromService"); } /** * Returns the service name with common suffixes removed. * * For example: * "LoggingServiceV2" becomes Name("Logging") */ public Name getReducedServiceName(Interface service) { String name = service.getSimpleName().replaceAll("V[0-9]+$", ""); name = name.replaceAll("Service$", ""); return Name.upperCamel(name); } /** The name of an RPC status code */ public String getStatusCodeName(Status.Code code) { return privateMethodName(Name.upperUnderscore(code.toString())); } /* The name of a retry definition */ public String getRetryDefinitionName(String retryDefinitionKey) { return privateMethodName(Name.from(retryDefinitionKey)); } }
1
18,247
Add docs to the new methods here
googleapis-gapic-generator
java
@@ -28,9 +28,11 @@ namespace Nethermind.Trie.Pruning _memoryLimit = memoryLimit; } + public bool Enabled => true; + public bool ShouldPrune(in long currentMemory) { return currentMemory >= _memoryLimit; } } -} +}
1
// Copyright (c) 2020 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System.Diagnostics; namespace Nethermind.Trie.Pruning { [DebuggerDisplay("{_memoryLimit/(1024*1024)}MB")] public class MemoryLimit : IPruningStrategy { private readonly long _memoryLimit; public MemoryLimit(long memoryLimit) { _memoryLimit = memoryLimit; } public bool ShouldPrune(in long currentMemory) { return currentMemory >= _memoryLimit; } } }
1
24,779
what does it mean enabled?
NethermindEth-nethermind
.cs
@@ -128,7 +128,7 @@ public class SmartStore { public static synchronized void changeKey(SQLiteDatabase db, String oldKey, String newKey) { synchronized(db) { if (newKey != null && !newKey.trim().equals("")) { - db.execSQL("PRAGMA rekey = '" + newKey + "'"); + db.query("PRAGMA rekey = '" + newKey + "'"); DBOpenHelper.reEncryptAllFiles(db, oldKey, newKey); } }
1
/* * Copyright (c) 2012-present, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.smartstore.store; import android.content.ContentValues; import android.database.Cursor; import android.text.TextUtils; import androidx.annotation.NonNull; import com.salesforce.androidsdk.analytics.EventBuilderHelper; import com.salesforce.androidsdk.app.SalesforceSDKManager; import com.salesforce.androidsdk.smartstore.store.LongOperation.LongOperationType; import com.salesforce.androidsdk.smartstore.store.QuerySpec.QueryType; import com.salesforce.androidsdk.smartstore.util.SmartStoreLogger; import com.salesforce.androidsdk.util.JSONObjectHelper; import net.sqlcipher.database.SQLiteDatabase; import net.sqlcipher.database.SQLiteOpenHelper; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.io.File; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; /** * Smart store * * Provides a secure means for SalesforceMobileSDK Container-based applications to store objects in a persistent * and searchable manner. Similar in some ways to CouchDB, SmartStore stores documents as JSON values. * SmartStore is inspired by the Apple Newton OS Soup/Store model. * The main challenge here is how to effectively store documents with dynamic fields, and still allow indexing and searching. */ public class SmartStore { private static final String TAG = "SmartStore"; // Table to keep track of soup names and attributes. public static final String SOUP_ATTRS_TABLE = "soup_attrs"; // Fts table suffix public static final String FTS_SUFFIX = "_fts"; // Table to keep track of soup's index specs public static final String SOUP_INDEX_MAP_TABLE = "soup_index_map"; // Table to keep track of status of long operations in flight protected static final String LONG_OPERATIONS_STATUS_TABLE = "long_operations_status"; // Columns of the soup index map table public static final String SOUP_NAME_COL = "soupName"; public static final String PATH_COL = "path"; protected static final String COLUMN_NAME_COL = "columnName"; public static final String COLUMN_TYPE_COL = "columnType"; // Columns of a soup table protected static final String ID_COL = "id"; protected static final String CREATED_COL = "created"; protected static final String LAST_MODIFIED_COL = "lastModified"; protected static final String SOUP_COL = "soup"; // Column of a fts soup table protected static final String ROWID_COL = "rowid"; // Columns of long operations status table protected static final String TYPE_COL = "type"; protected static final String DETAILS_COL = "details"; protected static final String STATUS_COL = "status"; // JSON fields added to soup element on insert/update public static final String SOUP_ENTRY_ID = "_soupEntryId"; public static final String SOUP_LAST_MODIFIED_DATE = "_soupLastModifiedDate"; public static final String SOUP_CREATED_DATE = "_soupCreatedDate"; // Predicates protected static final String SOUP_NAME_PREDICATE = SOUP_NAME_COL + " = ?"; protected static final String ID_PREDICATE = ID_COL + " = ?"; protected static final String ROWID_PREDICATE = ROWID_COL + " =?"; // Backing database protected SQLiteDatabase dbLocal; protected SQLiteOpenHelper dbOpenHelper; protected String encryptionKey; // FTS extension to use protected FtsExtension ftsExtension = FtsExtension.fts5; // background executor private final ExecutorService threadPool = Executors.newFixedThreadPool(1); /** * Changes the encryption key on the smartstore. * * @param db Database object. * @param oldKey Old encryption key. * @param newKey New encryption key. */ public static synchronized void changeKey(SQLiteDatabase db, String oldKey, String newKey) { synchronized(db) { if (newKey != null && !newKey.trim().equals("")) { db.execSQL("PRAGMA rekey = '" + newKey + "'"); DBOpenHelper.reEncryptAllFiles(db, oldKey, newKey); } } } /** * Create soup index map table to keep track of soups' index specs * Create soup name map table to keep track of soup name to table name mappings * Called when the database is first created * * @param db */ public static void createMetaTables(SQLiteDatabase db) { synchronized(db) { // Create soup_index_map table StringBuilder sb = new StringBuilder(); sb.append("CREATE TABLE ").append(SOUP_INDEX_MAP_TABLE).append(" (") .append(SOUP_NAME_COL).append(" TEXT") .append(",").append(PATH_COL).append(" TEXT") .append(",").append(COLUMN_NAME_COL).append(" TEXT") .append(",").append(COLUMN_TYPE_COL).append(" TEXT") .append(")"); db.execSQL(sb.toString()); // Add index on soup_name column db.execSQL(String.format("CREATE INDEX %s on %s ( %s )", SOUP_INDEX_MAP_TABLE + "_0", SOUP_INDEX_MAP_TABLE, SOUP_NAME_COL)); // Create soup_names table // The table name for the soup will simply be table_<soupId> sb = new StringBuilder(); sb.append("CREATE TABLE ").append(SOUP_ATTRS_TABLE).append(" (") .append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT") .append(",").append(SOUP_NAME_COL).append(" TEXT"); // Create columns for all possible soup features for (String feature : SoupSpec.ALL_FEATURES) { sb.append(",").append(feature).append(" INTEGER DEFAULT 0"); } sb.append(")"); db.execSQL(sb.toString()); // Add index on soup_name column db.execSQL(String.format("CREATE INDEX %s on %s ( %s )", SOUP_ATTRS_TABLE + "_0", SOUP_ATTRS_TABLE, SOUP_NAME_COL)); // Create alter_soup_status table createLongOperationsStatusTable(db); } } /** * Create long_operations_status table * @param db */ public static void createLongOperationsStatusTable(SQLiteDatabase db) { synchronized(db) { StringBuilder sb = new StringBuilder(); sb.append("CREATE TABLE IF NOT EXISTS ").append(LONG_OPERATIONS_STATUS_TABLE).append(" (") .append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT") .append(",").append(TYPE_COL).append(" TEXT") .append(",").append(DETAILS_COL).append(" TEXT") .append(",").append(STATUS_COL).append(" TEXT") .append(", ").append(CREATED_COL).append(" INTEGER") .append(", ").append(LAST_MODIFIED_COL).append(" INTEGER") .append(")"); db.execSQL(sb.toString()); } } /** * Relies on SQLiteOpenHelper for database handling. * * @param dbOpenHelper DB open helper. * @param encryptionKey Encryption key. */ public SmartStore(SQLiteOpenHelper dbOpenHelper, String encryptionKey) { this.dbOpenHelper = dbOpenHelper; this.encryptionKey = encryptionKey; } /** * Package-level constructor. Should be used in tests only. * * @param db Database. */ SmartStore(SQLiteDatabase db) { this.dbLocal = db; } /** * Return db */ public SQLiteDatabase getDatabase() { if (dbLocal != null) { return dbLocal; } else { return this.dbOpenHelper.getWritableDatabase(encryptionKey); } } /** * If turned on, explain query plan is run before executing a query and stored in lastExplainQueryPlan * and also get logged * @param captureExplainQueryPlan true to turn capture on and false to turn off */ public void setCaptureExplainQueryPlan(boolean captureExplainQueryPlan) { DBHelper.getInstance(getDatabase()).setCaptureExplainQueryPlan(captureExplainQueryPlan); } /** * @return explain query plan for last query run (if captureExplainQueryPlan is true) */ public JSONObject getLastExplainQueryPlan() { return DBHelper.getInstance(getDatabase()).getLastExplainQueryPlan(); } /** * Get database size */ public int getDatabaseSize() { int size = (int) (new File(getDatabase().getPath()).length()); // XXX That cast will be trouble if the file is more than 2GB if (dbOpenHelper instanceof DBOpenHelper) { size += ((DBOpenHelper) dbOpenHelper).getSizeOfDir(null); } return size; } /** * Start transaction * NB: to avoid deadlock, caller should have synchronized(store.getDatabase()) around the whole transaction */ public void beginTransaction() { getDatabase().beginTransaction(); } /** * End transaction (commit or rollback) */ public void endTransaction() { getDatabase().endTransaction(); } /** * Mark transaction as successful (next call to endTransaction will be a commit) */ public void setTransactionSuccessful() { getDatabase().setTransactionSuccessful(); } /** * Register a soup without any features. Use {@link #registerSoupWithSpec(SoupSpec, IndexSpec[])} to enable features such as external storage, etc. * * Create table for soupName with a column for the soup itself and columns for paths specified in indexSpecs * Create indexes on the new table to make lookup faster * Create rows in soup index map table for indexSpecs * @param soupName * @param indexSpecs */ public void registerSoup(String soupName, IndexSpec[] indexSpecs) { registerSoupWithSpec(new SoupSpec(soupName), indexSpecs); } /** * Register a soup using the given soup specifications. This allows the soup to use extra features such as external storage. * * Create table for soupName with a column for the soup itself and columns for paths specified in indexSpecs * Create indexes on the new table to make lookup faster * Create rows in soup index map table for indexSpecs * @param soupSpec * @param indexSpecs */ public void registerSoupWithSpec(final SoupSpec soupSpec, final IndexSpec[] indexSpecs) { final SQLiteDatabase db = getDatabase(); synchronized (db) { String soupName = soupSpec.getSoupName(); if (soupName == null) throw new SmartStoreException("Bogus soup name:" + soupName); if (indexSpecs.length == 0) throw new SmartStoreException("No indexSpecs specified for soup: " + soupName); if (IndexSpec.hasJSON1(indexSpecs) && soupSpec.getFeatures().contains(SoupSpec.FEATURE_EXTERNAL_STORAGE)) throw new SmartStoreException("Can't have JSON1 index specs in externally stored soup:" + soupName); if (hasSoup(soupName)) return; // soup already exist - do nothing // First get a table name String soupTableName = null; ContentValues soupMapValues = new ContentValues(); soupMapValues.put(SOUP_NAME_COL, soupName); // Register features from soup spec for (String feature : soupSpec.getFeatures()) { soupMapValues.put(feature, 1); } try { db.beginTransaction(); long soupId = DBHelper.getInstance(db).insert(db, SOUP_ATTRS_TABLE, soupMapValues); soupTableName = getSoupTableName(soupId); // Do the rest - create table / indexes registerSoupUsingTableName(soupSpec, indexSpecs, soupTableName); db.setTransactionSuccessful(); } finally { db.endTransaction(); } if (SalesforceSDKManager.getInstance().getIsTestRun()) { logRegisterSoupEvent(soupSpec, indexSpecs); } else { threadPool.execute(new Runnable() { @Override public void run() { logRegisterSoupEvent(soupSpec, indexSpecs); } }); } } } /** * Log the soup event. * @param soupSpec * @param indexSpecs */ private void logRegisterSoupEvent(final SoupSpec soupSpec, final IndexSpec[] indexSpecs) { final JSONArray features = new JSONArray(); if (IndexSpec.hasJSON1(indexSpecs)) { features.put("JSON1"); } if (IndexSpec.hasFTS(indexSpecs)) { features.put("FTS"); } if (soupSpec.getFeatures().contains(SoupSpec.FEATURE_EXTERNAL_STORAGE)) { features.put("ExternalStorage"); } final JSONObject attributes = new JSONObject(); try { attributes.put("features", features); } catch (JSONException e) { SmartStoreLogger.e(TAG, "Exception thrown while building page object", e); } EventBuilderHelper.createAndStoreEventSync("registerSoup", null, TAG, attributes); } /** * Helper method for registerSoup using soup spec * * @param soupSpec * @param indexSpecs * @param soupTableName */ protected void registerSoupUsingTableName(SoupSpec soupSpec, IndexSpec[] indexSpecs, String soupTableName) { // Prepare SQL for creating soup table and its indices StringBuilder createTableStmt = new StringBuilder(); // to create new soup table StringBuilder createFtsStmt = new StringBuilder(); // to create fts table List<String> createIndexStmts = new ArrayList<String>(); // to create indices on new soup table List<ContentValues> soupIndexMapInserts = new ArrayList<ContentValues>(); // to be inserted in soup index map table IndexSpec[] indexSpecsToCache = new IndexSpec[indexSpecs.length]; List<String> columnsForFts = new ArrayList<String>(); String soupName = soupSpec.getSoupName(); createTableStmt.append("CREATE TABLE ").append(soupTableName).append(" (") .append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT"); if (!usesExternalStorage(soupName)) { // If external storage is used, do not add column for soup in the db since it will be empty. createTableStmt.append(", ").append(SOUP_COL).append(" TEXT"); } createTableStmt.append(", ").append(CREATED_COL).append(" INTEGER") .append(", ").append(LAST_MODIFIED_COL).append(" INTEGER"); final String createIndexFormat = "CREATE INDEX %s_%s_idx on %s ( %s )"; for (String col : new String[]{CREATED_COL, LAST_MODIFIED_COL}) { createIndexStmts.add(String.format(createIndexFormat, soupTableName, col, soupTableName, col)); } int i = 0; for (IndexSpec indexSpec : indexSpecs) { // Column name or expression the db index is on String columnName = soupTableName + "_" + i; if (TypeGroup.value_indexed_with_json_extract.isMember(indexSpec.type)) { columnName = "json_extract(" + SOUP_COL + ", '$." + indexSpec.path + "')"; } // for create table if (TypeGroup.value_extracted_to_column.isMember(indexSpec.type)) { String columnType = indexSpec.type.getColumnType(); createTableStmt.append(", ").append(columnName).append(" ").append(columnType); } // for fts if (indexSpec.type == Type.full_text) { columnsForFts.add(columnName); } // for insert ContentValues values = new ContentValues(); values.put(SOUP_NAME_COL, soupName); values.put(PATH_COL, indexSpec.path); values.put(COLUMN_NAME_COL, columnName); values.put(COLUMN_TYPE_COL, indexSpec.type.toString()); soupIndexMapInserts.add(values); // for create index createIndexStmts.add(String.format(createIndexFormat, soupTableName, "" + i, soupTableName, columnName));; // for the cache indexSpecsToCache[i] = new IndexSpec(indexSpec.path, indexSpec.type, columnName); i++; } createTableStmt.append(")"); // fts if (columnsForFts.size() > 0) { createFtsStmt.append(String.format("CREATE VIRTUAL TABLE %s%s USING %s(%s)", soupTableName, FTS_SUFFIX, ftsExtension, TextUtils.join(",", columnsForFts))); } // Run SQL for creating soup table and its indices final SQLiteDatabase db = getDatabase(); db.execSQL(createTableStmt.toString()); if (columnsForFts.size() > 0) { db.execSQL(createFtsStmt.toString()); } for (String createIndexStmt : createIndexStmts) { db.execSQL(createIndexStmt.toString()); } try { db.beginTransaction(); for (ContentValues values : soupIndexMapInserts) { DBHelper.getInstance(db).insert(db, SOUP_INDEX_MAP_TABLE, values); } if (usesExternalStorage(soupName) && dbOpenHelper instanceof DBOpenHelper) { ((DBOpenHelper) dbOpenHelper).createExternalBlobsDirectory(soupTableName); } db.setTransactionSuccessful(); // Add to soupNameToTableNamesMap DBHelper.getInstance(db).cacheTableName(soupName, soupTableName); // Add to soupNameToIndexSpecsMap DBHelper.getInstance(db).cacheIndexSpecs(soupName, indexSpecsToCache); } finally { db.endTransaction(); } } /** * Finish long operations that were interrupted */ public void resumeLongOperations() { final SQLiteDatabase db = getDatabase(); synchronized(db) { for (LongOperation longOperation : getLongOperations()) { try { longOperation.run(); } catch (Exception e) { SmartStoreLogger.e(TAG, "Unexpected error", e); } } } } /** * @return unfinished long operations */ public LongOperation[] getLongOperations() { final SQLiteDatabase db = getDatabase(); List<LongOperation> longOperations = new ArrayList<LongOperation>(); synchronized(db) { Cursor cursor = null; try { cursor = DBHelper.getInstance(db).query(db, LONG_OPERATIONS_STATUS_TABLE, new String[] {ID_COL, TYPE_COL, DETAILS_COL, STATUS_COL}, null, null, null); if (cursor.moveToFirst()) { do { try { long rowId = cursor.getLong(0); LongOperationType operationType = LongOperationType.valueOf(cursor.getString(1)); JSONObject details = new JSONObject(cursor.getString(2)); String statusStr = cursor.getString(3); longOperations.add(operationType.getOperation(this, rowId, details, statusStr)); } catch (Exception e) { SmartStoreLogger.e(TAG, "Unexpected error", e); } } while (cursor.moveToNext()); } } finally { safeClose(cursor); } } return longOperations.toArray(new LongOperation[0]); } /** * Alter soup using only soup name without extra soup features. * * @param soupName * @param indexSpecs array of index specs * @param reIndexData * @throws JSONException */ public void alterSoup(String soupName, IndexSpec[] indexSpecs, boolean reIndexData) throws JSONException { alterSoup(soupName, new SoupSpec(soupName, new String[0]), indexSpecs, reIndexData); } /** * Alter soup with new soup spec. * * @param soupName name of soup to alter * @param soupSpec * @param indexSpecs array of index specs * @param reIndexData * @throws JSONException */ public void alterSoup(String soupName, SoupSpec soupSpec, IndexSpec[] indexSpecs, boolean reIndexData) throws JSONException { AlterSoupLongOperation operation = new AlterSoupLongOperation(this, soupName, soupSpec, indexSpecs, reIndexData); operation.run(); } /** * Re-index all soup elements for passed indexPaths * NB: only indexPath that have IndexSpec on them will be indexed * * @param soupName * @param indexPaths * @param handleTx */ public void reIndexSoup(String soupName, String[] indexPaths, boolean handleTx) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); // Getting index specs from indexPaths skipping json1 index specs Map<String, IndexSpec> mapAllSpecs = IndexSpec.mapForIndexSpecs(getSoupIndexSpecs(soupName)); List<IndexSpec> indexSpecsList = new ArrayList<IndexSpec>(); for (String indexPath : indexPaths) { if (mapAllSpecs.containsKey(indexPath)) { IndexSpec indexSpec = mapAllSpecs.get(indexPath); if (TypeGroup.value_extracted_to_column.isMember(indexSpec.type)) { indexSpecsList.add(indexSpec); } } else { SmartStoreLogger.w(TAG, "Can not re-index " + indexPath + " - it does not have an index"); } } IndexSpec[] indexSpecs = indexSpecsList.toArray(new IndexSpec[0]); if (indexSpecs.length == 0) { // Nothing to do return; } boolean hasFts = IndexSpec.hasFTS(indexSpecs); if (handleTx) { db.beginTransaction(); } Cursor cursor = null; try { String[] projection; if (usesExternalStorage(soupName)) { projection = new String[] {ID_COL}; } else { projection = new String[] {ID_COL, SOUP_COL}; } cursor = DBHelper.getInstance(db).query(db, soupTableName, projection, null, null, null); if (cursor.moveToFirst()) { do { String soupEntryId = cursor.getString(0); try { JSONObject soupElt; if (usesExternalStorage(soupName) && dbOpenHelper instanceof DBOpenHelper) { soupElt = ((DBOpenHelper) dbOpenHelper).loadSoupBlob(soupTableName, Long.parseLong(soupEntryId), encryptionKey); } else { String soupRaw = cursor.getString(1); soupElt = new JSONObject(soupRaw); } ContentValues contentValues = new ContentValues(); projectIndexedPaths(soupElt, contentValues, indexSpecs, TypeGroup.value_extracted_to_column); DBHelper.getInstance(db).update(db, soupTableName, contentValues, ID_PREDICATE, soupEntryId + ""); // Fts if (hasFts) { String soupTableNameFts = soupTableName + FTS_SUFFIX; ContentValues contentValuesFts = new ContentValues(); projectIndexedPaths(soupElt, contentValuesFts, indexSpecs, TypeGroup.value_extracted_to_fts_column); DBHelper.getInstance(db).update(db, soupTableNameFts, contentValuesFts, ROWID_PREDICATE, soupEntryId + ""); } } catch (JSONException e) { SmartStoreLogger.w(TAG, "Could not parse soup element " + soupEntryId, e); // Should not have happen - just keep going } } while (cursor.moveToNext()); } } finally { if (handleTx) { db.setTransactionSuccessful(); db.endTransaction(); } safeClose(cursor); } } } /** * Return indexSpecs of soup * * @param soupName * @return */ public IndexSpec[] getSoupIndexSpecs(String soupName) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); return DBHelper.getInstance(db).getIndexSpecs(db, soupName); } } /** * Clear all rows from a soup * @param soupName */ public void clearSoup(String soupName) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); db.beginTransaction(); try { DBHelper.getInstance(db).delete(db, soupTableName, null); if (hasFTS(soupName)) { DBHelper.getInstance(db).delete(db, soupTableName + FTS_SUFFIX, null); } if (dbOpenHelper instanceof DBOpenHelper) { ((DBOpenHelper) dbOpenHelper).removeExternalBlobsDirectory(soupTableName); } } finally { db.setTransactionSuccessful(); db.endTransaction(); } } } /** * Check if soup exists * * @param soupName * @return true if soup exists, false otherwise */ public boolean hasSoup(String soupName) { final SQLiteDatabase db = getDatabase(); synchronized(db) { return DBHelper.getInstance(db).getSoupTableName(db, soupName) != null; } } /** * Destroy a soup * * Drop table for soupName * Cleanup entries in soup index map table * @param soupName */ public void dropSoup(String soupName) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName != null) { db.execSQL("DROP TABLE IF EXISTS " + soupTableName); if (hasFTS(soupName)) { db.execSQL("DROP TABLE IF EXISTS " + soupTableName + FTS_SUFFIX); } try { db.beginTransaction(); DBHelper.getInstance(db).delete(db, SOUP_ATTRS_TABLE, SOUP_NAME_PREDICATE, soupName); DBHelper.getInstance(db).delete(db, SOUP_INDEX_MAP_TABLE, SOUP_NAME_PREDICATE, soupName); if (dbOpenHelper instanceof DBOpenHelper) { ((DBOpenHelper) dbOpenHelper).removeExternalBlobsDirectory(soupTableName); } db.setTransactionSuccessful(); // Remove from cache DBHelper.getInstance(db).removeFromCache(soupName); } finally { db.endTransaction(); } } } } /** * Destroy all the soups in the smartstore */ public void dropAllSoups() { final SQLiteDatabase db = getDatabase(); synchronized(db) { List<String> soupNames = getAllSoupNames(); for(String soupName : soupNames) { dropSoup(soupName); } } } /** * @return all soup names in the smartstore */ public List<String> getAllSoupNames() { final SQLiteDatabase db = getDatabase(); synchronized(db) { List<String> soupNames = new ArrayList<String>(); Cursor cursor = null; try { cursor = DBHelper.getInstance(db).query(db, SOUP_ATTRS_TABLE, new String[]{SOUP_NAME_COL}, SOUP_NAME_COL, null, null); if (cursor.moveToFirst()) { do { soupNames.add(cursor.getString(0)); } while (cursor.moveToNext()); } } finally { safeClose(cursor); } return soupNames; } } /** * Returns the entire SoupSpec of the given soup. * @param soupName * @return SoupSpec for given soup name. */ public SoupSpec getSoupSpec(String soupName) { final SQLiteDatabase db = getDatabase(); List<String> features = DBHelper.getInstance(db).getFeatures(db, soupName); return new SoupSpec(soupName, features.toArray(new String[features.size()])); } /** * Run a query given by its query Spec, only returned results from selected page * @param querySpec * @param pageIndex * @throws JSONException */ public JSONArray query(QuerySpec querySpec, int pageIndex) throws JSONException { JSONArray resultAsArray = new JSONArray(); runQuery(resultAsArray, null, querySpec, pageIndex); return resultAsArray; } /** * Run a query given by its query Spec, only returned results from selected page * without deserializing any JSON * * @param resultBuilder string builder to which results are appended * @param querySpec * @param pageIndex */ public void queryAsString(StringBuilder resultBuilder, QuerySpec querySpec, int pageIndex) { try { runQuery(null, resultBuilder, querySpec, pageIndex); } catch (JSONException e) { // shouldn't happen since we call runQuery with a string builder throw new SmartStoreException("Unexpected json exception", e); } } private void runQuery(JSONArray resultAsArray, StringBuilder resultAsStringBuilder, QuerySpec querySpec, int pageIndex) throws JSONException { boolean computeResultAsString = resultAsStringBuilder != null; final SQLiteDatabase db = getDatabase(); synchronized(db) { QueryType qt = querySpec.queryType; String sql = convertSmartSql(querySpec.smartSql); // Page int offsetRows = querySpec.pageSize * pageIndex; int numberRows = querySpec.pageSize; String limit = offsetRows + "," + numberRows; Cursor cursor = null; try { cursor = DBHelper.getInstance(db).limitRawQuery(db, sql, limit, querySpec.getArgs()); if (computeResultAsString) { resultAsStringBuilder.append("["); } int currentRow = 0; if (cursor.moveToFirst()) { do { if (computeResultAsString && currentRow > 0) { resultAsStringBuilder.append(", "); } currentRow++; // Smart queries if (qt == QueryType.smart || querySpec.selectPaths != null) { if (computeResultAsString) { getDataFromRow(null, resultAsStringBuilder, cursor); } else { JSONArray rowArray = new JSONArray(); getDataFromRow(rowArray, null, cursor); resultAsArray.put(rowArray); } } // Exact/like/range queries else { String rowAsString = null; if (cursor.getColumnIndex(SoupSpec.FEATURE_EXTERNAL_STORAGE) >= 0) { // Presence of external storage column implies we must fetch from storage. Soup name and entry id values can be extracted String soupTableName = cursor.getString(cursor.getColumnIndex(SoupSpec.FEATURE_EXTERNAL_STORAGE)); Long soupEntryId = cursor.getLong(cursor.getColumnIndex(SmartStore.SOUP_ENTRY_ID)); rowAsString = ((DBOpenHelper) dbOpenHelper).loadSoupBlobAsString(soupTableName, soupEntryId, encryptionKey); } else { rowAsString = cursor.getString(0); } if (computeResultAsString) { resultAsStringBuilder.append(rowAsString); } else { resultAsArray.put(new JSONObject(rowAsString)); } } } while (cursor.moveToNext()); } if (computeResultAsString) { resultAsStringBuilder.append("]"); } } finally { safeClose(cursor); } } } private void getDataFromRow(JSONArray resultAsArray, StringBuilder resultAsStringBuilder, Cursor cursor) throws JSONException { boolean computeResultAsString = resultAsStringBuilder != null; int columnCount = cursor.getColumnCount(); if (computeResultAsString) { resultAsStringBuilder.append("["); } for (int i=0; i<columnCount; i++) { if (computeResultAsString && i > 0) { resultAsStringBuilder.append(","); } int valueType = cursor.getType(i); String columnName = cursor.getColumnName(i); if (valueType == Cursor.FIELD_TYPE_NULL) { if (computeResultAsString) { resultAsStringBuilder.append("null"); } else { resultAsArray.put(null); } } else if (valueType == Cursor.FIELD_TYPE_STRING) { String raw = cursor.getString(i); if (columnName.equals(SoupSpec.FEATURE_EXTERNAL_STORAGE)) { // Presence of external storage column implies we must fetch from storage. Soup name and entry id values can be extracted String soupTableName = cursor.getString(i); Long soupEntryId = cursor.getLong(i + 1); if (computeResultAsString) { resultAsStringBuilder.append(((DBOpenHelper) dbOpenHelper).loadSoupBlobAsString(soupTableName, soupEntryId, encryptionKey)); } else { resultAsArray.put(((DBOpenHelper) dbOpenHelper).loadSoupBlob(soupTableName, soupEntryId, encryptionKey)); } i++; // skip next column (_soupEntryId) } else if (columnName.equals(SOUP_COL) || columnName.startsWith(SOUP_COL + ":") /* :num is appended to column name when result set has more than one column with same name */) { if (computeResultAsString) { resultAsStringBuilder.append(raw); } else { resultAsArray.put(new JSONObject(raw)); } // Note: we could end up returning a string if you aliased the column } else { if (computeResultAsString) { raw = escapeStringValue(raw); resultAsStringBuilder.append("\"").append(raw).append("\""); } else { resultAsArray.put(raw); } } } else if (valueType == Cursor.FIELD_TYPE_INTEGER) { if (computeResultAsString) { resultAsStringBuilder.append(cursor.getLong(i)); } else { resultAsArray.put(cursor.getLong(i)); } } else if (valueType == Cursor.FIELD_TYPE_FLOAT) { if (computeResultAsString) { resultAsStringBuilder.append(cursor.getDouble(i)); } else { resultAsArray.put(cursor.getDouble(i)); } } } if (computeResultAsString) { resultAsStringBuilder.append("]"); } } private String escapeStringValue(String raw) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < raw.length(); i ++) { char c = raw.charAt(i); switch (c) { case '\\': case '"': sb.append('\\'); sb.append(c); break; case '/': sb.append('\\'); sb.append(c); break; case '\b': sb.append("\\b"); break; case '\t': sb.append("\\t"); break; case '\n': sb.append("\\n"); break; case '\f': sb.append("\\f"); break; case '\r': sb.append("\\r"); break; default: if (c < ' ') { String t = "000" + Integer.toHexString(c); sb.append("\\u" + t.substring(t.length() - 4)); } else { sb.append(c); } } } return sb.toString(); } /** * @param querySpec * @return count of results for a query */ public int countQuery(QuerySpec querySpec) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String countSql = convertSmartSql(querySpec.countSmartSql); return DBHelper.getInstance(db).countRawCountQuery(db, countSql, querySpec.getArgs()); } } /** * @param smartSql * @return */ public String convertSmartSql(String smartSql) { final SQLiteDatabase db = getDatabase(); synchronized (db) { return SmartSqlHelper.getInstance(db).convertSmartSql(db, smartSql); } } /** * Create (and commits) * Note: Passed soupElt is modified (last modified date and soup entry id fields) * @param soupName * @param soupElt * @return soupElt created or null if creation failed * @throws JSONException */ public JSONObject create(String soupName, JSONObject soupElt) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { return create(soupName, soupElt, true); } } /** * Create * Note: Passed soupElt is modified (last modified date and soup entry id fields) * @param soupName * @param soupElt * @return * @throws JSONException */ public JSONObject create(String soupName, JSONObject soupElt, boolean handleTx) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); IndexSpec[] indexSpecs = DBHelper.getInstance(db).getIndexSpecs(db, soupName); try { if (handleTx) { db.beginTransaction(); } long now = System.currentTimeMillis(); long soupEntryId = DBHelper.getInstance(db).getNextId(db, soupTableName); // Adding fields to soup element soupElt.put(SOUP_ENTRY_ID, soupEntryId); soupElt.put(SOUP_LAST_MODIFIED_DATE, now); ContentValues contentValues = new ContentValues(); contentValues.put(ID_COL, soupEntryId); contentValues.put(CREATED_COL, now); contentValues.put(LAST_MODIFIED_COL, now); if (!usesExternalStorage(soupName)) { contentValues.put(SOUP_COL, soupElt.toString()); } projectIndexedPaths(soupElt, contentValues, indexSpecs, TypeGroup.value_extracted_to_column); // Inserting into database boolean success = DBHelper.getInstance(db).insert(db, soupTableName, contentValues) == soupEntryId; // Fts if (success && hasFTS(soupName)) { String soupTableNameFts = soupTableName + FTS_SUFFIX; ContentValues contentValuesFts = new ContentValues(); contentValuesFts.put(ROWID_COL, soupEntryId); projectIndexedPaths(soupElt, contentValuesFts, indexSpecs, TypeGroup.value_extracted_to_fts_column); // InsertHelper not working against virtual fts table db.insert(soupTableNameFts, null, contentValuesFts); } // Add to external storage if applicable if (success && usesExternalStorage(soupName) && dbOpenHelper instanceof DBOpenHelper) { success = ((DBOpenHelper) dbOpenHelper).saveSoupBlob(soupTableName, soupEntryId, soupElt, encryptionKey); } // Commit if successful if (success) { if (handleTx) { db.setTransactionSuccessful(); } return soupElt; } else { return null; } } finally { if (handleTx) { db.endTransaction(); } } } } /** * @soupName * @return true if soup has at least one full-text search index */ private boolean hasFTS(String soupName) { SQLiteDatabase db = getDatabase(); synchronized (db) { return DBHelper.getInstance(db).hasFTS(db, soupName); } } /** * Populate content values by projecting index specs that have a type in typeGroup * @param soupElt * @param contentValues * @param indexSpecs * @param typeGroup */ private void projectIndexedPaths(JSONObject soupElt, ContentValues contentValues, IndexSpec[] indexSpecs, TypeGroup typeGroup) { for (IndexSpec indexSpec : indexSpecs) { if (typeGroup.isMember(indexSpec.type)) { projectIndexedPath(soupElt, contentValues, indexSpec); } } } /** * @param soupElt * @param contentValues * @param indexSpec */ private void projectIndexedPath(JSONObject soupElt, ContentValues contentValues, IndexSpec indexSpec) { Object value = project(soupElt, indexSpec.path); contentValues.put(indexSpec.columnName, (String) null); // fall back if (value != null) { try { switch (indexSpec.type) { case integer: contentValues.put(indexSpec.columnName, ((Number) value).longValue()); break; case string: case full_text: contentValues.put(indexSpec.columnName, value.toString()); break; case floating: contentValues.put(indexSpec.columnName, ((Number) value).doubleValue()); break; } } catch (Exception e) { // Ignore (will use the null value) SmartStoreLogger.e(TAG, "Unexpected error", e); } } } /** * Retrieve * @param soupName * @param soupEntryIds * @return JSONArray of JSONObject's with the given soupEntryIds * @throws JSONException */ public JSONArray retrieve(String soupName, Long... soupEntryIds) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); JSONArray result = new JSONArray(); if (usesExternalStorage(soupName) && dbOpenHelper instanceof DBOpenHelper) { for (long soupEntryId : soupEntryIds) { JSONObject raw = ((DBOpenHelper) dbOpenHelper).loadSoupBlob(soupTableName, soupEntryId, encryptionKey); if (raw != null) { result.put(raw); } } } else { Cursor cursor = null; try { cursor = DBHelper.getInstance(db).query(db, soupTableName, new String[] { SOUP_COL }, null, null, getSoupEntryIdsPredicate(soupEntryIds), (String[]) null); if (!cursor.moveToFirst()) { return result; } do { String raw = cursor.getString(cursor.getColumnIndex(SOUP_COL)); result.put(new JSONObject(raw)); } while (cursor.moveToNext()); } finally { safeClose(cursor); } } return result; } } /** * Update (and commits) * Note: Passed soupElt is modified (last modified date and soup entry id fields) * @param soupName * @param soupElt * @param soupEntryId * @return soupElt updated or null if update failed * @throws JSONException */ public JSONObject update(String soupName, JSONObject soupElt, long soupEntryId) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { return update(soupName, soupElt, soupEntryId, true); } } /** * Update * Note: Passed soupElt is modified (last modified date and soup entry id fields) * @param soupName * @param soupElt * @param soupEntryId * @param handleTx * @return * @throws JSONException */ public JSONObject update(String soupName, JSONObject soupElt, long soupEntryId, boolean handleTx) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { try { if (handleTx) { db.beginTransaction(); } String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); IndexSpec[] indexSpecs = DBHelper.getInstance(db).getIndexSpecs(db, soupName); long now = System.currentTimeMillis(); // In the case of an upsert with external id, _soupEntryId won't be in soupElt soupElt.put(SOUP_ENTRY_ID, soupEntryId); // Updating last modified field in soup element soupElt.put(SOUP_LAST_MODIFIED_DATE, now); // Preparing data for row ContentValues contentValues = new ContentValues(); contentValues.put(LAST_MODIFIED_COL, now); projectIndexedPaths(soupElt, contentValues, indexSpecs, TypeGroup.value_extracted_to_column); if (!usesExternalStorage(soupName)) { contentValues.put(SOUP_COL, soupElt.toString()); } // Updating database boolean success = DBHelper.getInstance(db).update(db, soupTableName, contentValues, ID_PREDICATE, soupEntryId + "") == 1; // Fts if (success && hasFTS(soupName)) { String soupTableNameFts = soupTableName + FTS_SUFFIX; ContentValues contentValuesFts = new ContentValues(); projectIndexedPaths(soupElt, contentValuesFts, indexSpecs, TypeGroup.value_extracted_to_fts_column); success = DBHelper.getInstance(db).update(db, soupTableNameFts, contentValuesFts, ROWID_PREDICATE, soupEntryId + "") == 1; } // Add to external storage if applicable if (success && usesExternalStorage(soupName) && dbOpenHelper instanceof DBOpenHelper) { success = ((DBOpenHelper) dbOpenHelper).saveSoupBlob(soupTableName, soupEntryId, soupElt, encryptionKey); } if (success) { if (handleTx) { db.setTransactionSuccessful(); } return soupElt; } else { return null; } } finally { if (handleTx) { db.endTransaction(); } } } } /** * Upsert (and commits) * @param soupName * @param soupElt * @param externalIdPath * @return soupElt upserted or null if upsert failed * @throws JSONException */ public JSONObject upsert(String soupName, JSONObject soupElt, String externalIdPath) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { return upsert(soupName, soupElt, externalIdPath, true); } } /** * Upsert (and commits) expecting _soupEntryId in soupElt for updates * @param soupName * @param soupElt * @return * @throws JSONException */ public JSONObject upsert(String soupName, JSONObject soupElt) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { return upsert(soupName, soupElt, SOUP_ENTRY_ID); } } /** * Upsert * @param soupName * @param soupElt * @param externalIdPath * @param handleTx * @return * @throws JSONException */ public JSONObject upsert(String soupName, JSONObject soupElt, String externalIdPath, boolean handleTx) throws JSONException { final SQLiteDatabase db = getDatabase(); synchronized(db) { long entryId = -1; if (externalIdPath.equals(SOUP_ENTRY_ID)) { if (soupElt.has(SOUP_ENTRY_ID)) { entryId = soupElt.getLong(SOUP_ENTRY_ID); } } else { Object externalIdObj = project(soupElt, externalIdPath); if (externalIdObj != null) { entryId = lookupSoupEntryId(soupName, externalIdPath, externalIdObj + ""); } else { // Cannot have empty values for user-defined external ID upsert. throw new SmartStoreException(String.format("For upsert with external ID path '%s', value cannot be empty for any entries.", externalIdPath)); } } // If we have an entryId, let's do an update, otherwise let's do a create if (entryId != -1) { return update(soupName, soupElt, entryId, handleTx); } else { return create(soupName, soupElt, handleTx); } } } /** * Look for a soup element where fieldPath's value is fieldValue * Return its soupEntryId * Return -1 if not found * Throw an exception if fieldName is not indexed * Throw an exception if more than one soup element are found * * @param soupName * @param fieldPath * @param fieldValue */ public long lookupSoupEntryId(String soupName, String fieldPath, String fieldValue) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); String columnName = DBHelper.getInstance(db).getColumnNameForPath(db, soupName, fieldPath); Cursor cursor = null; try { cursor = db.query(soupTableName, new String[] {ID_COL}, columnName + " = ?", new String[] { fieldValue }, null, null, null); if (cursor.getCount() > 1) { throw new SmartStoreException(String.format("There are more than one soup elements where %s is %s", fieldPath, fieldValue)); } if (cursor.moveToFirst()) { return cursor.getLong(0); } else { return -1; // not found } } finally { safeClose(cursor); } } } /** * Delete soup elements given by their ids (and commits) * @param soupName * @param soupEntryIds */ public void delete(String soupName, Long... soupEntryIds) { final SQLiteDatabase db = getDatabase(); synchronized(db) { delete(soupName, soupEntryIds, true); } } /** * Delete soup elements given by their ids * @param soupName * @param soupEntryIds * @param handleTx */ public void delete(String soupName, Long[] soupEntryIds, boolean handleTx) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); if (handleTx) { db.beginTransaction(); } try { db.delete(soupTableName, getSoupEntryIdsPredicate(soupEntryIds), (String []) null); if (hasFTS(soupName)) { db.delete(soupTableName + FTS_SUFFIX, getRowIdsPredicate(soupEntryIds), (String[]) null); } if (usesExternalStorage(soupName) && dbOpenHelper instanceof DBOpenHelper) { ((DBOpenHelper) dbOpenHelper).removeSoupBlob(soupTableName, soupEntryIds); } if (handleTx) { db.setTransactionSuccessful(); } } finally { if (handleTx) { db.endTransaction(); } } } } /** * Delete soup elements selected by querySpec (and commits) * @param soupName * @param querySpec Query returning entries to delete (if querySpec uses smartSQL, it must select soup entry ids) */ public void deleteByQuery(String soupName, QuerySpec querySpec) { final SQLiteDatabase db = getDatabase(); synchronized(db) { deleteByQuery(soupName, querySpec, true); } } /** * Delete soup elements selected by querySpec * @param soupName * @param querySpec * @param handleTx */ public void deleteByQuery(String soupName, QuerySpec querySpec, boolean handleTx) { final SQLiteDatabase db = getDatabase(); synchronized(db) { String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName); if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist"); if (handleTx) { db.beginTransaction(); } try { String subQuerySql = String.format("SELECT %s FROM (%s) LIMIT %d", ID_COL, convertSmartSql(querySpec.idsSmartSql), querySpec.pageSize); String[] args = querySpec.getArgs(); if (usesExternalStorage(soupName) && dbOpenHelper instanceof DBOpenHelper) { // Query list of ids and remove them from external storage Cursor c = null; try { c = db.query(soupTableName, new String[] { ID_COL }, buildInStatement(ID_COL, subQuerySql), args, null, null, null); if (c.moveToFirst()) { Long[] ids = new Long[c.getCount()]; int counter = 0; do { ids[counter++] = c.getLong(0); } while (c.moveToNext()); ((DBOpenHelper) dbOpenHelper).removeSoupBlob(soupTableName, ids); } } finally { if (c != null) { c.close(); } } } db.delete(soupTableName, buildInStatement(ID_COL, subQuerySql), args); if (hasFTS(soupName)) { db.delete(soupTableName + FTS_SUFFIX, buildInStatement(ROWID_COL, subQuerySql), args); } if (handleTx) { db.setTransactionSuccessful(); } } finally { if (handleTx) { db.endTransaction(); } } } } /** * @return predicate to match soup entries by id */ private String getSoupEntryIdsPredicate(Long[] soupEntryIds) { return buildInStatement(ID_COL, TextUtils.join(",", soupEntryIds)); } /** * @return predicate to match entries by rowid */ private String getRowIdsPredicate(Long[] rowids) { return buildInStatement(ROWID_COL, TextUtils.join(",", rowids)); } /** * @param col * @param inPredicate * @return in statement */ private String buildInStatement(String col, String inPredicate) { return String.format("%s IN (%s)", col, inPredicate); } /** * @return ftsX to be used when creating the virtual table to support full_text queries */ public FtsExtension getFtsExtension() { return ftsExtension; } /** * Sets the ftsX to be used when creating the virtual table to support full_text queries * NB: only used in tests * @param ftsExtension */ public void setFtsExtension(FtsExtension ftsExtension) { this.ftsExtension = ftsExtension; } /** * @param soupId * @return */ public static String getSoupTableName(long soupId) { return "TABLE_" + soupId; } /** * @param cursor */ private void safeClose(Cursor cursor) { if (cursor != null) { cursor.close(); } } /** * @param soup * @param path * @return object at path in soup * * Examples (in pseudo code): * * json = {"a": {"b": [{"c":"xx"}, {"c":"xy"}, {"d": [{"e":1}, {"e":2}]}, {"d": [{"e":3}, {"e":4}]}] }} * projectIntoJson(jsonObj, "a") = {"b": [{"c":"xx"}, {"c":"xy"}, {"d": [{"e":1}, {"e":2}]}, {"d": [{"e":3}, {"e":4}]} ]} * projectIntoJson(json, "a.b") = [{c:"xx"}, {c:"xy"}, {"d": [{"e":1}, {"e":2}]}, {"d": [{"e":3}, {"e":4}]}] * projectIntoJson(json, "a.b.c") = ["xx", "xy"] // new in 4.1 * projectIntoJson(json, "a.b.d") = [[{"e":1}, {"e":2}], [{"e":3}, {"e":4}]] // new in 4.1 * projectIntoJson(json, "a.b.d.e") = [[1, 2], [3, 4]] // new in 4.1 * */ public static Object project(JSONObject soup, String path) { if (soup == null) { return null; } if (path == null || path.equals("")) { return soup; } String[] pathElements = path.split("[.]"); return project(soup, pathElements, 0); } private static Object project(Object jsonObj, String[] pathElements, int index) { Object result = null; if (index == pathElements.length) { return jsonObj; } if (null != jsonObj) { String pathElement = pathElements[index]; if (jsonObj instanceof JSONObject) { JSONObject jsonDict = (JSONObject) jsonObj; Object dictVal = JSONObjectHelper.opt(jsonDict, pathElement); result = project(dictVal, pathElements, index+1); } else if (jsonObj instanceof JSONArray) { JSONArray jsonArr = (JSONArray) jsonObj; result = new JSONArray(); for (int i=0; i<jsonArr.length(); i++) { Object arrayElt = JSONObjectHelper.opt(jsonArr, i); Object resultPart = project(arrayElt, pathElements, index); if (resultPart != null) { ((JSONArray) result).put(resultPart); } } if (((JSONArray) result).length() == 0) { result = null; } } } return result; } /** * Enum for column type */ public enum Type { string("TEXT"), integer("INTEGER"), floating("REAL"), full_text("TEXT"), json1(null); private String columnType; private Type(String columnType) { this.columnType = columnType; } public String getColumnType() { return columnType; } } /** * Enum for type groups */ public enum TypeGroup { value_extracted_to_column { @Override public boolean isMember(Type type) { return type == Type.string || type == Type.integer || type == Type.floating || type == Type.full_text; } }, value_extracted_to_fts_column { @Override public boolean isMember(Type type) { return type == Type.full_text; } }, value_indexed_with_json_extract { @Override public boolean isMember(Type type) { return type == Type.json1; } }; public abstract boolean isMember(Type type); } /** * Enum for fts extensions */ public enum FtsExtension { fts4, fts5 } /** * Exception thrown by smart store * */ public static class SmartStoreException extends RuntimeException { public SmartStoreException(String message) { super(message); } public SmartStoreException(String message, Throwable t) { super(message, t); } private static final long serialVersionUID = -6369452803270075464L; } /** * Updates the given table with a new name and adds columns if any. * * @param db Database to update * @param oldName Old name of the table to be renamed, null if table should not be renamed. * @param newName New name of the table to be renamed, null if table should not be renamed. * @param columns Columns to add. Null if no new columns should be added. */ public static void updateTableNameAndAddColumns(SQLiteDatabase db, String oldName, String newName, String[] columns) { synchronized(SmartStore.class) { StringBuilder sb = new StringBuilder(); if (columns != null && columns.length > 0) { for (String column : columns) { sb.append("ALTER TABLE ").append(oldName).append(" ADD COLUMN ").append(column).append(" INTEGER DEFAULT 0;"); } db.execSQL(sb.toString()); } if (oldName != null && newName != null) { sb = new StringBuilder(); sb.append("ALTER TABLE ").append(oldName).append(" RENAME TO ").append(newName).append(';'); db.execSQL(sb.toString()); } } } /** * Determines if the given soup features external storage. * * @param soupName Name of the soup to determine external storage enablement. * * @return True if soup uses external storage; false otherwise. */ public boolean usesExternalStorage(String soupName) { final SQLiteDatabase db = getDatabase(); synchronized (db) { return DBHelper.getInstance(db).getFeatures(db, soupName).contains(SoupSpec.FEATURE_EXTERNAL_STORAGE); } } /** * Get SQLCipher runtime settings * * @return list of SQLCipher runtime settings */ public List<String> getRuntimeSettings() { return queryPragma("cipher_settings"); } /** * Get SQLCipher compile options * * @return list of SQLCipher compile options */ public List<String> getCompileOptions() { return queryPragma("compile_options"); } /** * Get SQLCipher version * * @return SQLCipher version */ public String getSQLCipherVersion() { return TextUtils.join(" ", queryPragma("cipher_version")); } @NonNull private List<String> queryPragma(String pragma) { final SQLiteDatabase db = getDatabase(); ArrayList<String> results = new ArrayList<>(); Cursor c = null; try { c = db.rawQuery("PRAGMA " + pragma, null); while (c.moveToNext()) { results.add(c.getString(0)); } } finally { safeClose(c); } return results; } }
1
17,746
Getting an error when calling with db.execSQL (in sqlcipher 4.3.0, pragma returns ok).
forcedotcom-SalesforceMobileSDK-Android
java
@@ -83,11 +83,11 @@ func retrieveFeeds(ctx *middleware.Context, ctxUserID, userID, offset int64, isP } func Dashboard(ctx *middleware.Context) { - ctx.Data["Title"] = ctx.Tr("dashboard") + ctxUser := getDashboardContextUser(ctx) + ctx.Data["Title"] = ctxUser.DisplayName() + " " + ctx.Tr("dashboard") ctx.Data["PageIsDashboard"] = true ctx.Data["PageIsNews"] = true - ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return }
1
// Copyright 2014 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package user import ( "bytes" "fmt" "github.com/Unknwon/com" "github.com/Unknwon/paginater" "github.com/gogits/gogs/models" "github.com/gogits/gogs/modules/base" "github.com/gogits/gogs/modules/middleware" "github.com/gogits/gogs/modules/setting" ) const ( DASHBOARD base.TplName = "user/dashboard/dashboard" ISSUES base.TplName = "user/dashboard/issues" PROFILE base.TplName = "user/profile" ORG_HOME base.TplName = "org/home" ) func getDashboardContextUser(ctx *middleware.Context) *models.User { ctxUser := ctx.User orgName := ctx.Params(":org") if len(orgName) > 0 { // Organization. org, err := models.GetUserByName(orgName) if err != nil { if models.IsErrUserNotExist(err) { ctx.Handle(404, "GetUserByName", err) } else { ctx.Handle(500, "GetUserByName", err) } return nil } ctxUser = org } ctx.Data["ContextUser"] = ctxUser if err := ctx.User.GetOrganizations(true); err != nil { ctx.Handle(500, "GetOrganizations", err) return nil } ctx.Data["Orgs"] = ctx.User.Orgs return ctxUser } func retrieveFeeds(ctx *middleware.Context, ctxUserID, userID, offset int64, isProfile bool) { actions, err := models.GetFeeds(ctxUserID, userID, offset, isProfile) if err != nil { ctx.Handle(500, "GetFeeds", err) return } // Check access of private repositories. feeds := make([]*models.Action, 0, len(actions)) unameAvatars := make(map[string]string) for _, act := range actions { // Cache results to reduce queries. _, ok := unameAvatars[act.ActUserName] if !ok { u, err := models.GetUserByName(act.ActUserName) if err != nil { if models.IsErrUserNotExist(err) { continue } ctx.Handle(500, "GetUserByName", err) return } unameAvatars[act.ActUserName] = u.AvatarLink() } act.ActAvatar = unameAvatars[act.ActUserName] feeds = append(feeds, act) } ctx.Data["Feeds"] = feeds } func Dashboard(ctx *middleware.Context) { ctx.Data["Title"] = ctx.Tr("dashboard") ctx.Data["PageIsDashboard"] = true ctx.Data["PageIsNews"] = true ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return } if !ctxUser.IsOrganization() { collaborateRepos, err := ctx.User.GetAccessibleRepositories() if err != nil { ctx.Handle(500, "GetAccessibleRepositories", err) return } for i := range collaborateRepos { if err = collaborateRepos[i].GetOwner(); err != nil { ctx.Handle(500, "GetOwner: "+collaborateRepos[i].Name, err) return } } ctx.Data["CollaborateCount"] = len(collaborateRepos) ctx.Data["CollaborativeRepos"] = collaborateRepos } var repos []*models.Repository if ctxUser.IsOrganization() { if err := ctxUser.GetUserRepositories(ctx.User.Id); err != nil { ctx.Handle(500, "GetUserRepositories", err) return } repos = ctxUser.Repos } else { var err error repos, err = models.GetRepositories(ctxUser.Id, true) if err != nil { ctx.Handle(500, "GetRepositories", err) return } } ctx.Data["Repos"] = repos // Get mirror repositories. mirrors := make([]*models.Repository, 0, 5) for _, repo := range repos { if repo.IsMirror { if err := repo.GetMirror(); err != nil { ctx.Handle(500, "GetMirror: "+repo.Name, err) return } mirrors = append(mirrors, repo) } } ctx.Data["MirrorCount"] = len(mirrors) ctx.Data["Mirrors"] = mirrors retrieveFeeds(ctx, ctxUser.Id, ctx.User.Id, 0, false) if ctx.Written() { return } ctx.HTML(200, DASHBOARD) } func Issues(ctx *middleware.Context) { isPullList := ctx.Params(":type") == "pulls" if isPullList { ctx.Data["Title"] = ctx.Tr("pull_requests") ctx.Data["PageIsPulls"] = true } else { ctx.Data["Title"] = ctx.Tr("issues") ctx.Data["PageIsIssues"] = true } ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return } // Organization does not have view type and filter mode. var ( viewType string sortType = ctx.Query("sort") filterMode = models.FM_ALL assigneeID int64 posterID int64 ) if ctxUser.IsOrganization() { viewType = "all" } else { viewType = ctx.Query("type") types := []string{"assigned", "created_by"} if !com.IsSliceContainsStr(types, viewType) { viewType = "all" } switch viewType { case "assigned": filterMode = models.FM_ASSIGN assigneeID = ctxUser.Id case "created_by": filterMode = models.FM_CREATE posterID = ctxUser.Id } } repoID := ctx.QueryInt64("repo") isShowClosed := ctx.Query("state") == "closed" // Get repositories. if ctxUser.IsOrganization() { if err := ctxUser.GetUserRepositories(ctx.User.Id); err != nil { ctx.Handle(500, "GetRepositories", err) return } } else { if err := ctxUser.GetRepositories(); err != nil { ctx.Handle(500, "GetRepositories", err) return } } repos := ctxUser.Repos allCount := 0 repoIDs := make([]int64, 0, len(repos)) showRepos := make([]*models.Repository, 0, len(repos)) for _, repo := range repos { if (isPullList && repo.NumPulls == 0) || (!isPullList && repo.NumIssues == 0) { continue } repoIDs = append(repoIDs, repo.ID) if isPullList { allCount += repo.NumOpenPulls repo.NumOpenIssues = repo.NumOpenPulls repo.NumClosedIssues = repo.NumClosedPulls } else { allCount += repo.NumOpenIssues } if filterMode != models.FM_ALL { // Calculate repository issue count with filter mode. numOpen, numClosed := repo.IssueStats(ctxUser.Id, filterMode, isPullList) repo.NumOpenIssues, repo.NumClosedIssues = int(numOpen), int(numClosed) } if repo.ID == repoID || (isShowClosed && repo.NumClosedIssues > 0) || (!isShowClosed && repo.NumOpenIssues > 0) { showRepos = append(showRepos, repo) } } ctx.Data["Repos"] = showRepos issueStats := models.GetUserIssueStats(repoID, ctxUser.Id, repoIDs, filterMode, isPullList) issueStats.AllCount = int64(allCount) page := ctx.QueryInt("page") if page <= 1 { page = 1 } var total int if !isShowClosed { total = int(issueStats.OpenCount) } else { total = int(issueStats.ClosedCount) } ctx.Data["Page"] = paginater.New(total, setting.IssuePagingNum, page, 5) // Get issues. issues, err := models.Issues(&models.IssuesOptions{ UserID: ctxUser.Id, AssigneeID: assigneeID, RepoID: repoID, PosterID: posterID, RepoIDs: repoIDs, Page: page, IsClosed: isShowClosed, IsPull: isPullList, SortType: sortType, }) if err != nil { ctx.Handle(500, "Issues: %v", err) return } // Get posters and repository. for i := range issues { issues[i].Repo, err = models.GetRepositoryByID(issues[i].RepoID) if err != nil { ctx.Handle(500, "GetRepositoryByID", fmt.Errorf("[#%d]%v", issues[i].ID, err)) return } if err = issues[i].Repo.GetOwner(); err != nil { ctx.Handle(500, "GetOwner", fmt.Errorf("[#%d]%v", issues[i].ID, err)) return } if err = issues[i].GetPoster(); err != nil { ctx.Handle(500, "GetPoster", fmt.Errorf("[#%d]%v", issues[i].ID, err)) return } } ctx.Data["Issues"] = issues ctx.Data["IssueStats"] = issueStats ctx.Data["ViewType"] = viewType ctx.Data["SortType"] = sortType ctx.Data["RepoID"] = repoID ctx.Data["IsShowClosed"] = isShowClosed if isShowClosed { ctx.Data["State"] = "closed" } else { ctx.Data["State"] = "open" } ctx.HTML(200, ISSUES) } func ShowSSHKeys(ctx *middleware.Context, uid int64) { keys, err := models.ListPublicKeys(uid) if err != nil { ctx.Handle(500, "ListPublicKeys", err) return } var buf bytes.Buffer for i := range keys { buf.WriteString(keys[i].OmitEmail()) buf.WriteString("\n") } ctx.PlainText(200, buf.Bytes()) } func showOrgProfile(ctx *middleware.Context) { ctx.SetParams(":org", ctx.Params(":username")) middleware.HandleOrgAssignment(ctx) if ctx.Written() { return } org := ctx.Org.Organization ctx.Data["Title"] = org.FullName if ctx.IsSigned { if err := org.GetUserRepositories(ctx.User.Id); err != nil { ctx.Handle(500, "GetUserRepositories", err) return } ctx.Data["Repos"] = org.Repos } else { repos, err := models.GetRepositories(org.Id, false) if err != nil { ctx.Handle(500, "GetRepositories", err) return } ctx.Data["Repos"] = repos } if err := org.GetMembers(); err != nil { ctx.Handle(500, "GetMembers", err) return } ctx.Data["Members"] = org.Members ctx.Data["Teams"] = org.Teams ctx.HTML(200, ORG_HOME) } func Email2User(ctx *middleware.Context) { u, err := models.GetUserByEmail(ctx.Query("email")) if err != nil { if models.IsErrUserNotExist(err) { ctx.Handle(404, "GetUserByEmail", err) } else { ctx.Handle(500, "GetUserByEmail", err) } return } ctx.Redirect(setting.AppSubUrl + "/user/" + u.Name) }
1
10,582
Maybe we could remove `" " + ctx.Tr("dashboard")` completely?
gogs-gogs
go
@@ -1,13 +1,13 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX - License - Identifier: Apache - 2.0 +# SPDX-License-Identifier: Apache-2.0 + +# Purpose +# This code demonstrates how to copy an object from one Amazon Simple Storage Solution (Amazon S3) bucket to another, +# changing the object's server-side encryption state during the copy operation. # snippet-start:[s3.ruby.copy_object_encrypt_copy.rb] require 'aws-sdk-s3' -# Copies an object from one Amazon S3 bucket to another, -# changing the object's server-side encryption state during -# the copy operation. -# # Prerequisites: # # - A bucket containing an object to be copied.
1
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX - License - Identifier: Apache - 2.0 # snippet-start:[s3.ruby.copy_object_encrypt_copy.rb] require 'aws-sdk-s3' # Copies an object from one Amazon S3 bucket to another, # changing the object's server-side encryption state during # the copy operation. # # Prerequisites: # # - A bucket containing an object to be copied. # - A separate bucket to copy the object into. # # @param s3_client [Aws::S3::Client] An initialized Amazon S3 client. # @param source_bucket_name [String] The source bucket's name. # @param source_object_key [String] The name of the object to be copied. # @param target_bucket_name [String] The target bucket's name. # @param target_object_key [String] The name of the copied object. # @param encryption_type [String] The server-side encryption type for # the copied object. # @return [Boolean] true if the object was copied with the specified # server-side encryption; otherwise, false. # @example # s3_client = Aws::S3::Client.new(region: 'us-east-1') # if object_copied_with_encryption?( # s3_client, # 'doc-example-bucket1', # 'my-source-file.txt', # 'doc-example-bucket2', # 'my-target-file.txt', # 'AES256' # ) # puts 'Copied.' # else # puts 'Not copied.' # end def object_copied_with_encryption?( s3_client, source_bucket_name, source_object_key, target_bucket_name, target_object_key, encryption_type ) response = s3_client.copy_object( bucket: target_bucket_name, copy_source: source_bucket_name + '/' + source_object_key, key: target_object_key, server_side_encryption: encryption_type ) return true if response.copy_object_result rescue StandardError => e puts "Error while copying object: #{e.message}" end # snippet-end:[s3.ruby.copy_object_encrypt_copy.rb] # Full example call: def run_me s3_client = Aws::S3::Client.new(region: 'us-east-1') if object_copied_with_encryption?( s3_client, 'doc-example-bucket1', 'my-source-file.txt', 'doc-example-bucket2', 'my-target-file.txt', 'AES256' ) puts 'Copied.' else puts 'Not copied.' end end run_me if $PROGRAM_NAME == __FILE__
1
20,518
Simple Storage **Service**
awsdocs-aws-doc-sdk-examples
rb
@@ -42,6 +42,15 @@ type jsonHandler struct { handler reflect.Value } +type jsonHandler2 struct { + handler reflect.Value +} + +func (h jsonHandler2) Handle(ctx context.Context, reqBody interface{}) (interface{}, error) { + results := h.handler.Call([]reflect.Value{reflect.ValueOf(ctx), reflect.ValueOf(reqBody)}) + return results[0].Interface(), results[1].Interface().(error) +} + func (h jsonHandler) Handle(ctx context.Context, req *yarpc.Request, reqBuf *yarpc.Buffer) (*yarpc.Response, *yarpc.Buffer, error) { if err := yarpcencoding.ExpectEncodings(req, Encoding); err != nil { return nil, nil, err
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package yarpcjson import ( "context" "encoding/json" "reflect" yarpc "go.uber.org/yarpc/v2" "go.uber.org/yarpc/v2/yarpcencoding" ) var _ yarpc.UnaryTransportHandler = (*jsonHandler)(nil) // jsonHandler adapts a user-provided high-level handler into a transport-level // UnaryTransportHandler. // // The wrapped function must already be in the correct format: // // f(ctx context.Context, body $reqBody) ($resBody, error) type jsonHandler struct { reader requestReader handler reflect.Value } func (h jsonHandler) Handle(ctx context.Context, req *yarpc.Request, reqBuf *yarpc.Buffer) (*yarpc.Response, *yarpc.Buffer, error) { if err := yarpcencoding.ExpectEncodings(req, Encoding); err != nil { return nil, nil, err } ctx, call := yarpc.NewInboundCall(ctx) if err := call.ReadFromRequest(req); err != nil { return nil, nil, err } reqBody, err := h.reader.Read(json.NewDecoder(reqBuf)) if err != nil { return nil, nil, yarpcencoding.RequestBodyDecodeError(req, err) } results := h.handler.Call([]reflect.Value{reflect.ValueOf(ctx), reqBody}) res := &yarpc.Response{} resBuf := &yarpc.Buffer{} call.WriteToResponse(res) // we want to return the appErr if it exists as this is what // the previous behavior was so we deprioritize this error var encodeErr error if result := results[0].Interface(); result != nil { if err := json.NewEncoder(resBuf).Encode(result); err != nil { encodeErr = yarpcencoding.ResponseBodyEncodeError(req, err) } } if appErr, _ := results[1].Interface().(error); appErr != nil { res.ApplicationError = true // TODO(apeatsbond): now that we propogate a Response struct back, the // Response should hold the actual application error. Errors returned by the // handler (not through the Response) could be considered fatal. return res, resBuf, appErr } return res, resBuf, encodeErr } // requestReader is used to parse a JSON request argument from a JSON decoder. type requestReader interface { Read(*json.Decoder) (reflect.Value, error) } type structReader struct { // Type of the struct (not a pointer to the struct) Type reflect.Type } func (r structReader) Read(d *json.Decoder) (reflect.Value, error) { value := reflect.New(r.Type) err := d.Decode(value.Interface()) return value, err } type mapReader struct { Type reflect.Type // Type of the map } func (r mapReader) Read(d *json.Decoder) (reflect.Value, error) { value := reflect.New(r.Type) err := d.Decode(value.Interface()) return value.Elem(), err } type ifaceEmptyReader struct{} func (ifaceEmptyReader) Read(d *json.Decoder) (reflect.Value, error) { value := reflect.New(_interfaceEmptyType) err := d.Decode(value.Interface()) return value.Elem(), err }
1
17,951
Let's add some assertions here to ensure we don't panic. Length of results. Conditionally cast second result to error.
yarpc-yarpc-go
go
@@ -23,8 +23,10 @@ namespace Microsoft.AspNetCore.Server.Kestrel // Matches the default LimitRequestFields in Apache httpd. private int _maxRequestHeaderCount = 100; - // Matches the default http.sys connection timeout. - private TimeSpan _connectionTimeout = TimeSpan.FromMinutes(2); + // Matches the default http.sys connectionTimeout. + private TimeSpan _keepAliveTimeout = TimeSpan.FromMinutes(2); + + private TimeSpan _requestHeadersTimeout = TimeSpan.FromSeconds(30); /// <summary> /// Gets or sets the maximum size of the response buffer before write
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; namespace Microsoft.AspNetCore.Server.Kestrel { public class KestrelServerLimits { // Matches the non-configurable default response buffer size for Kestrel in 1.0.0 private long? _maxResponseBufferSize = 64 * 1024; // Matches the default client_max_body_size in nginx. Also large enough that most requests // should be under the limit. private long? _maxRequestBufferSize = 1024 * 1024; // Matches the default large_client_header_buffers in nginx. private int _maxRequestLineSize = 8 * 1024; // Matches the default large_client_header_buffers in nginx. private int _maxRequestHeadersTotalSize = 32 * 1024; // Matches the default LimitRequestFields in Apache httpd. private int _maxRequestHeaderCount = 100; // Matches the default http.sys connection timeout. private TimeSpan _connectionTimeout = TimeSpan.FromMinutes(2); /// <summary> /// Gets or sets the maximum size of the response buffer before write /// calls begin to block or return tasks that don't complete until the /// buffer size drops below the configured limit. /// </summary> /// <remarks> /// When set to null, the size of the response buffer is unlimited. /// When set to zero, all write calls will block or return tasks that /// don't complete until the entire response buffer is flushed. /// Defaults to 65,536 bytes (64 KB). /// </remarks> public long? MaxResponseBufferSize { get { return _maxResponseBufferSize; } set { if (value.HasValue && value.Value < 0) { throw new ArgumentOutOfRangeException(nameof(value), "Value must be null or a non-negative integer."); } _maxResponseBufferSize = value; } } /// <summary> /// Gets or sets the maximum size of the request buffer. /// </summary> /// <remarks> /// When set to null, the size of the request buffer is unlimited. /// Defaults to 1,048,576 bytes (1 MB). /// </remarks> public long? MaxRequestBufferSize { get { return _maxRequestBufferSize; } set { if (value.HasValue && value.Value <= 0) { throw new ArgumentOutOfRangeException(nameof(value), "Value must be null or a positive integer."); } _maxRequestBufferSize = value; } } /// <summary> /// Gets or sets the maximum allowed size for the HTTP request line. /// </summary> /// <remarks> /// Defaults to 8,192 bytes (8 KB). /// </remarks> public int MaxRequestLineSize { get { return _maxRequestLineSize; } set { if (value <= 0) { throw new ArgumentOutOfRangeException(nameof(value), "Value must be a positive integer."); } _maxRequestLineSize = value; } } /// <summary> /// Gets or sets the maximum allowed size for the HTTP request headers. /// </summary> /// <remarks> /// Defaults to 32,768 bytes (32 KB). /// </remarks> public int MaxRequestHeadersTotalSize { get { return _maxRequestHeadersTotalSize; } set { if (value <= 0) { throw new ArgumentOutOfRangeException(nameof(value), "Value must a positive integer."); } _maxRequestHeadersTotalSize = value; } } /// <summary> /// Gets or sets the maximum allowed number of headers per HTTP request. /// </summary> /// <remarks> /// Defaults to 100. /// </remarks> public int MaxRequestHeaderCount { get { return _maxRequestHeaderCount; } set { if (value <= 0) { throw new ArgumentOutOfRangeException(nameof(value), "Value must a positive integer."); } _maxRequestHeaderCount = value; } } /// <summary> /// Gets or sets the keep-alive timeout. /// </summary> /// <remarks> /// Defaults to 2 minutes. /// </remarks> public TimeSpan KeepAliveTimeout { get { return _connectionTimeout; } set { _connectionTimeout = value; } } } }
1
10,123
Where did we take this default from?
aspnet-KestrelHttpServer
.cs
@@ -122,6 +122,8 @@ const baseSelectors = { * @param {Object} options Options for generating the report. * @param {string} options.startDate Required, unless dateRange is provided. Start date to query report data for as YYYY-mm-dd. * @param {string} options.endDate Required, unless dateRange is provided. End date to query report data for as YYYY-mm-dd. + * @param {string} [options.compareStartDate] Optional. Start date to compare report data for as YYYY-mm-dd. + * @param {string} [options.compareEndDate] Optional. End date to compare report data for as YYYY-mm-dd. * @param {string} options.dateRange Required, alternative to startDate and endDate. A date range string such as 'last-28-days'. * @param {boolean} [options.compareDateRanges] Optional. Only relevant with dateRange. Default false. * @param {boolean} [options.multiDateRange] Optional. Only relevant with dateRange. Default false.
1
/** * `modules/analytics` data store: report. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import invariant from 'invariant'; import isPlainObject from 'lodash/isPlainObject'; /** * Internal dependencies */ import API from 'googlesitekit-api'; import Data from 'googlesitekit-data'; import { STORE_NAME } from './constants'; import { stringifyObject } from '../../../util'; import { createFetchStore } from '../../../googlesitekit/data/create-fetch-store'; import { isValidDateRange, isValidOrders } from '../../../util/report-validation'; import { isValidDimensions, isValidMetrics } from '../util/report-validation'; import { actions as adsenseActions } from './adsense'; import { normalizeReportOptions } from '../util/report-normalization'; import { isRestrictedMetricsError } from '../util/error'; const fetchGetReportStore = createFetchStore( { baseName: 'getReport', controlCallback: ( { options } ) => { return API.get( 'modules', 'analytics', 'report', normalizeReportOptions( options ) ); }, reducerCallback: ( state, report, { options } ) => { return { ...state, reports: { ...state.reports, [ stringifyObject( options ) ]: report, }, }; }, argsToParams: ( options ) => { return { options }; }, validateParams: ( { options } = {} ) => { invariant( isPlainObject( options ), 'Options for Analytics report must be an object.' ); invariant( isValidDateRange( options ), 'Either date range or start/end dates must be provided for Analytics report.' ); const { metrics, dimensions, orderby } = normalizeReportOptions( options ); invariant( metrics.length, 'Requests must specify at least one metric for an Analytics report.' ); invariant( isValidMetrics( metrics ), 'Metrics for an Analytics report must be either a string, an array of strings, an object, an array of objects or a mix of strings and objects. If an object is used, it must have "expression" and "alias" properties.', ); if ( dimensions ) { invariant( isValidDimensions( dimensions ), 'Dimensions for an Analytics report must be either a string, an array of strings, an object, an array of objects or a mix of strings and objects. If an object is used, it must have "name" property.', ); } if ( orderby ) { invariant( isValidOrders( orderby ), 'Orders for an Analytics report must be either an object or an array of objects where each object should have "fieldName" and "sortOrder" properties.', ); } }, } ); const baseInitialState = { reports: {}, }; const baseResolvers = { *getReport( options = {} ) { const registry = yield Data.commonActions.getRegistry(); const existingReport = registry.select( STORE_NAME ).getReport( options ); // If there is already a report loaded in state, consider it fulfilled // and don't make an API request. if ( existingReport ) { return; } const { error } = yield fetchGetReportStore.actions.fetchGetReport( options ); // If the report was requested with AdSense metrics, set `adsenseLinked` accordingly. if ( normalizeReportOptions( options ).metrics.some( ( { expression } ) => /^ga:adsense/.test( expression ) ) ) { if ( isRestrictedMetricsError( error, 'ga:adsense' ) ) { // If the error is a restricted metrics error for AdSense metrics, the services are not linked. yield adsenseActions.setAdsenseLinked( false ); } else { // If there is no restricted metrics error OR the restricted metrics error // does not cite any AdSense metrics, then the services are linked. yield adsenseActions.setAdsenseLinked( true ); } } }, }; const baseSelectors = { /** * Gets an Analytics report for the given options. * * @since 1.13.0 * * @param {Object} state Data store's state. * @param {Object} options Options for generating the report. * @param {string} options.startDate Required, unless dateRange is provided. Start date to query report data for as YYYY-mm-dd. * @param {string} options.endDate Required, unless dateRange is provided. End date to query report data for as YYYY-mm-dd. * @param {string} options.dateRange Required, alternative to startDate and endDate. A date range string such as 'last-28-days'. * @param {boolean} [options.compareDateRanges] Optional. Only relevant with dateRange. Default false. * @param {boolean} [options.multiDateRange] Optional. Only relevant with dateRange. Default false. * @param {Array.<string>} options.metrics Required. List of metrics to query. * @param {Array.<string>} [options.dimensions] Optional. List of dimensions to group results by. Default an empty array. * @param {Array.<Object>} [options.orderby] Optional. An order definition object, or a list of order definition objects, each one containing 'fieldName' and 'sortOrder'. 'sortOrder' must be either 'ASCENDING' or 'DESCENDING'. Default empty array. * @param {string} [options.url] Optional. URL to get a report for only this URL. Default an empty string. * @param {number} [options.limit] Optional. Maximum number of entries to return. Default 1000. * @return {(Array.<Object>|undefined)} An Analytics report; `undefined` if not loaded. */ getReport( state, options ) { const { reports } = state; return reports[ stringifyObject( options ) ]; }, }; const store = Data.combineStores( fetchGetReportStore, { initialState: baseInitialState, resolvers: baseResolvers, selectors: baseSelectors, } ); export const initialState = store.initialState; export const actions = store.actions; export const controls = store.controls; export const reducer = store.reducer; export const resolvers = store.resolvers; export const selectors = store.selectors; export default store;
1
34,948
Let's move these down to be after all required arguments rather than in between.
google-site-kit-wp
js
@@ -90,6 +90,10 @@ namespace FlatBuffers _objectStart = 0; _numVtables = 0; _vectorNumElems = 0; + if (_sharedStringMap != null) + { + _sharedStringMap.Clear(); + } } /// <summary>
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Collections.Generic; using System.Text; /// @file /// @addtogroup flatbuffers_csharp_api /// @{ namespace FlatBuffers { /// <summary> /// Responsible for building up and accessing a FlatBuffer formatted byte /// array (via ByteBuffer). /// </summary> public class FlatBufferBuilder { private int _space; private ByteBuffer _bb; private int _minAlign = 1; // The vtable for the current table (if _vtableSize >= 0) private int[] _vtable = new int[16]; // The size of the vtable. -1 indicates no vtable private int _vtableSize = -1; // Starting offset of the current struct/table. private int _objectStart; // List of offsets of all vtables. private int[] _vtables = new int[16]; // Number of entries in `vtables` in use. private int _numVtables = 0; // For the current vector being built. private int _vectorNumElems = 0; // For CreateSharedString private Dictionary<string, StringOffset> _sharedStringMap = null; /// <summary> /// Create a FlatBufferBuilder with a given initial size. /// </summary> /// <param name="initialSize"> /// The initial size to use for the internal buffer. /// </param> public FlatBufferBuilder(int initialSize) { if (initialSize <= 0) throw new ArgumentOutOfRangeException("initialSize", initialSize, "Must be greater than zero"); _space = initialSize; _bb = new ByteBuffer(initialSize); } /// <summary> /// Create a FlatBufferBuilder backed by the pased in ByteBuffer /// </summary> /// <param name="buffer">The ByteBuffer to write to</param> public FlatBufferBuilder(ByteBuffer buffer) { _bb = buffer; _space = buffer.Length; buffer.Reset(); } /// <summary> /// Reset the FlatBufferBuilder by purging all data that it holds. /// </summary> public void Clear() { _space = _bb.Length; _bb.Reset(); _minAlign = 1; while (_vtableSize > 0) _vtable[--_vtableSize] = 0; _vtableSize = -1; _objectStart = 0; _numVtables = 0; _vectorNumElems = 0; } /// <summary> /// Gets and sets a Boolean to disable the optimization when serializing /// default values to a Table. /// /// In order to save space, fields that are set to their default value /// don't get serialized into the buffer. /// </summary> public bool ForceDefaults { get; set; } /// @cond FLATBUFFERS_INTERNAL public int Offset { get { return _bb.Length - _space; } } public void Pad(int size) { _bb.PutByte(_space -= size, 0, size); } // Doubles the size of the ByteBuffer, and copies the old data towards // the end of the new buffer (since we build the buffer backwards). void GrowBuffer() { _bb.GrowFront(_bb.Length << 1); } // Prepare to write an element of `size` after `additional_bytes` // have been written, e.g. if you write a string, you need to align // such the int length field is aligned to SIZEOF_INT, and the string // data follows it directly. // If all you need to do is align, `additional_bytes` will be 0. public void Prep(int size, int additionalBytes) { // Track the biggest thing we've ever aligned to. if (size > _minAlign) _minAlign = size; // Find the amount of alignment needed such that `size` is properly // aligned after `additional_bytes` var alignSize = ((~((int)_bb.Length - _space + additionalBytes)) + 1) & (size - 1); // Reallocate the buffer if needed. while (_space < alignSize + size + additionalBytes) { var oldBufSize = (int)_bb.Length; GrowBuffer(); _space += (int)_bb.Length - oldBufSize; } if (alignSize > 0) Pad(alignSize); } public void PutBool(bool x) { _bb.PutByte(_space -= sizeof(byte), (byte)(x ? 1 : 0)); } public void PutSbyte(sbyte x) { _bb.PutSbyte(_space -= sizeof(sbyte), x); } public void PutByte(byte x) { _bb.PutByte(_space -= sizeof(byte), x); } public void PutShort(short x) { _bb.PutShort(_space -= sizeof(short), x); } public void PutUshort(ushort x) { _bb.PutUshort(_space -= sizeof(ushort), x); } public void PutInt(int x) { _bb.PutInt(_space -= sizeof(int), x); } public void PutUint(uint x) { _bb.PutUint(_space -= sizeof(uint), x); } public void PutLong(long x) { _bb.PutLong(_space -= sizeof(long), x); } public void PutUlong(ulong x) { _bb.PutUlong(_space -= sizeof(ulong), x); } public void PutFloat(float x) { _bb.PutFloat(_space -= sizeof(float), x); } /// <summary> /// Puts an array of type T into this builder at the /// current offset /// </summary> /// <typeparam name="T">The type of the input data </typeparam> /// <param name="x">The array to copy data from</param> public void Put<T>(T[] x) where T : struct { _space = _bb.Put(_space, x); } #if ENABLE_SPAN_T && (UNSAFE_BYTEBUFFER || NETSTANDARD2_1) /// <summary> /// Puts a span of type T into this builder at the /// current offset /// </summary> /// <typeparam name="T">The type of the input data </typeparam> /// <param name="x">The span to copy data from</param> public void Put<T>(Span<T> x) where T : struct { _space = _bb.Put(_space, x); } #endif public void PutDouble(double x) { _bb.PutDouble(_space -= sizeof(double), x); } /// @endcond /// <summary> /// Add a `bool` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `bool` to add to the buffer.</param> public void AddBool(bool x) { Prep(sizeof(byte), 0); PutBool(x); } /// <summary> /// Add a `sbyte` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `sbyte` to add to the buffer.</param> public void AddSbyte(sbyte x) { Prep(sizeof(sbyte), 0); PutSbyte(x); } /// <summary> /// Add a `byte` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `byte` to add to the buffer.</param> public void AddByte(byte x) { Prep(sizeof(byte), 0); PutByte(x); } /// <summary> /// Add a `short` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `short` to add to the buffer.</param> public void AddShort(short x) { Prep(sizeof(short), 0); PutShort(x); } /// <summary> /// Add an `ushort` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `ushort` to add to the buffer.</param> public void AddUshort(ushort x) { Prep(sizeof(ushort), 0); PutUshort(x); } /// <summary> /// Add an `int` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `int` to add to the buffer.</param> public void AddInt(int x) { Prep(sizeof(int), 0); PutInt(x); } /// <summary> /// Add an `uint` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `uint` to add to the buffer.</param> public void AddUint(uint x) { Prep(sizeof(uint), 0); PutUint(x); } /// <summary> /// Add a `long` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `long` to add to the buffer.</param> public void AddLong(long x) { Prep(sizeof(long), 0); PutLong(x); } /// <summary> /// Add an `ulong` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `ulong` to add to the buffer.</param> public void AddUlong(ulong x) { Prep(sizeof(ulong), 0); PutUlong(x); } /// <summary> /// Add a `float` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `float` to add to the buffer.</param> public void AddFloat(float x) { Prep(sizeof(float), 0); PutFloat(x); } /// <summary> /// Add an array of type T to the buffer (aligns the data and grows if necessary). /// </summary> /// <typeparam name="T">The type of the input data</typeparam> /// <param name="x">The array to copy data from</param> public void Add<T>(T[] x) where T : struct { if (x == null) { throw new ArgumentNullException("Cannot add a null array"); } if( x.Length == 0) { // don't do anything if the array is empty return; } if(!ByteBuffer.IsSupportedType<T>()) { throw new ArgumentException("Cannot add this Type array to the builder"); } int size = ByteBuffer.SizeOf<T>(); // Need to prep on size (for data alignment) and then we pass the // rest of the length (minus 1) as additional bytes Prep(size, size * (x.Length - 1)); Put(x); } #if ENABLE_SPAN_T && (UNSAFE_BYTEBUFFER || NETSTANDARD2_1) /// <summary> /// Add a span of type T to the buffer (aligns the data and grows if necessary). /// </summary> /// <typeparam name="T">The type of the input data</typeparam> /// <param name="x">The span to copy data from</param> public void Add<T>(Span<T> x) where T : struct { if (!ByteBuffer.IsSupportedType<T>()) { throw new ArgumentException("Cannot add this Type array to the builder"); } int size = ByteBuffer.SizeOf<T>(); // Need to prep on size (for data alignment) and then we pass the // rest of the length (minus 1) as additional bytes Prep(size, size * (x.Length - 1)); Put(x); } #endif /// <summary> /// Add a `double` to the buffer (aligns the data and grows if necessary). /// </summary> /// <param name="x">The `double` to add to the buffer.</param> public void AddDouble(double x) { Prep(sizeof(double), 0); PutDouble(x); } /// <summary> /// Adds an offset, relative to where it will be written. /// </summary> /// <param name="off">The offset to add to the buffer.</param> public void AddOffset(int off) { Prep(sizeof(int), 0); // Ensure alignment is already done. if (off > Offset) throw new ArgumentException(); off = Offset - off + sizeof(int); PutInt(off); } /// @cond FLATBUFFERS_INTERNAL public void StartVector(int elemSize, int count, int alignment) { NotNested(); _vectorNumElems = count; Prep(sizeof(int), elemSize * count); Prep(alignment, elemSize * count); // Just in case alignment > int. } /// @endcond /// <summary> /// Writes data necessary to finish a vector construction. /// </summary> public VectorOffset EndVector() { PutInt(_vectorNumElems); return new VectorOffset(Offset); } /// <summary> /// Creates a vector of tables. /// </summary> /// <param name="offsets">Offsets of the tables.</param> public VectorOffset CreateVectorOfTables<T>(Offset<T>[] offsets) where T : struct { NotNested(); StartVector(sizeof(int), offsets.Length, sizeof(int)); for (int i = offsets.Length - 1; i >= 0; i--) AddOffset(offsets[i].Value); return EndVector(); } /// @cond FLATBUFFERS_INTENRAL public void Nested(int obj) { // Structs are always stored inline, so need to be created right // where they are used. You'll get this assert if you created it // elsewhere. if (obj != Offset) throw new Exception( "FlatBuffers: struct must be serialized inline."); } public void NotNested() { // You should not be creating any other objects or strings/vectors // while an object is being constructed if (_vtableSize >= 0) throw new Exception( "FlatBuffers: object serialization must not be nested."); } public void StartTable(int numfields) { if (numfields < 0) throw new ArgumentOutOfRangeException("Flatbuffers: invalid numfields"); NotNested(); if (_vtable.Length < numfields) _vtable = new int[numfields]; _vtableSize = numfields; _objectStart = Offset; } // Set the current vtable at `voffset` to the current location in the // buffer. public void Slot(int voffset) { if (voffset >= _vtableSize) throw new IndexOutOfRangeException("Flatbuffers: invalid voffset"); _vtable[voffset] = Offset; } /// <summary> /// Adds a Boolean to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddBool(int o, bool x, bool d) { if (ForceDefaults || x != d) { AddBool(x); Slot(o); } } /// <summary> /// Adds a SByte to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddSbyte(int o, sbyte x, sbyte d) { if (ForceDefaults || x != d) { AddSbyte(x); Slot(o); } } /// <summary> /// Adds a Byte to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddByte(int o, byte x, byte d) { if (ForceDefaults || x != d) { AddByte(x); Slot(o); } } /// <summary> /// Adds a Int16 to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddShort(int o, short x, int d) { if (ForceDefaults || x != d) { AddShort(x); Slot(o); } } /// <summary> /// Adds a UInt16 to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddUshort(int o, ushort x, ushort d) { if (ForceDefaults || x != d) { AddUshort(x); Slot(o); } } /// <summary> /// Adds an Int32 to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddInt(int o, int x, int d) { if (ForceDefaults || x != d) { AddInt(x); Slot(o); } } /// <summary> /// Adds a UInt32 to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddUint(int o, uint x, uint d) { if (ForceDefaults || x != d) { AddUint(x); Slot(o); } } /// <summary> /// Adds an Int64 to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddLong(int o, long x, long d) { if (ForceDefaults || x != d) { AddLong(x); Slot(o); } } /// <summary> /// Adds a UInt64 to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddUlong(int o, ulong x, ulong d) { if (ForceDefaults || x != d) { AddUlong(x); Slot(o); } } /// <summary> /// Adds a Single to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddFloat(int o, float x, double d) { if (ForceDefaults || x != d) { AddFloat(x); Slot(o); } } /// <summary> /// Adds a Double to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// and <see cref="ForceDefaults"/> is false, the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddDouble(int o, double x, double d) { if (ForceDefaults || x != d) { AddDouble(x); Slot(o); } } /// <summary> /// Adds a buffer offset to the Table at index `o` in its vtable using the value `x` and default `d` /// </summary> /// <param name="o">The index into the vtable</param> /// <param name="x">The value to put into the buffer. If the value is equal to the default /// the value will be skipped.</param> /// <param name="d">The default value to compare the value against</param> public void AddOffset(int o, int x, int d) { if (x != d) { AddOffset(x); Slot(o); } } /// @endcond /// <summary> /// Encode the string `s` in the buffer using UTF-8. /// </summary> /// <param name="s">The string to encode.</param> /// <returns> /// The offset in the buffer where the encoded string starts. /// </returns> public StringOffset CreateString(string s) { if (s == null) { return new StringOffset(0); } NotNested(); AddByte(0); var utf8StringLen = Encoding.UTF8.GetByteCount(s); StartVector(1, utf8StringLen, 1); _bb.PutStringUTF8(_space -= utf8StringLen, s); return new StringOffset(EndVector().Value); } #if ENABLE_SPAN_T && (UNSAFE_BYTEBUFFER || NETSTANDARD2_1) /// <summary> /// Creates a string in the buffer from a Span containing /// a UTF8 string. /// </summary> /// <param name="chars">the UTF8 string to add to the buffer</param> /// <returns> /// The offset in the buffer where the encoded string starts. /// </returns> public StringOffset CreateUTF8String(Span<byte> chars) { NotNested(); AddByte(0); var utf8StringLen = chars.Length; StartVector(1, utf8StringLen, 1); _space = _bb.Put(_space, chars); return new StringOffset(EndVector().Value); } #endif /// <summary> /// Store a string in the buffer, which can contain any binary data. /// If a string with this exact contents has already been serialized before, /// instead simply returns the offset of the existing string. /// </summary> /// <param name="s">The string to encode.</param> /// <returns> /// The offset in the buffer where the encoded string starts. /// </returns> public StringOffset CreateSharedString(string s) { if (s == null) { return new StringOffset(0); } if (_sharedStringMap == null) { _sharedStringMap = new Dictionary<string, StringOffset>(); } if (_sharedStringMap.ContainsKey(s)) { return _sharedStringMap[s]; } var stringOffset = CreateString(s); _sharedStringMap.Add(s, stringOffset); return stringOffset; } /// @cond FLATBUFFERS_INTERNAL // Structs are stored inline, so nothing additional is being added. // `d` is always 0. public void AddStruct(int voffset, int x, int d) { if (x != d) { Nested(x); Slot(voffset); } } public int EndTable() { if (_vtableSize < 0) throw new InvalidOperationException( "Flatbuffers: calling EndTable without a StartTable"); AddInt((int)0); var vtableloc = Offset; // Write out the current vtable. int i = _vtableSize - 1; // Trim trailing zeroes. for (; i >= 0 && _vtable[i] == 0; i--) {} int trimmedSize = i + 1; for (; i >= 0 ; i--) { // Offset relative to the start of the table. short off = (short)(_vtable[i] != 0 ? vtableloc - _vtable[i] : 0); AddShort(off); // clear out written entry _vtable[i] = 0; } const int standardFields = 2; // The fields below: AddShort((short)(vtableloc - _objectStart)); AddShort((short)((trimmedSize + standardFields) * sizeof(short))); // Search for an existing vtable that matches the current one. int existingVtable = 0; for (i = 0; i < _numVtables; i++) { int vt1 = _bb.Length - _vtables[i]; int vt2 = _space; short len = _bb.GetShort(vt1); if (len == _bb.GetShort(vt2)) { for (int j = sizeof(short); j < len; j += sizeof(short)) { if (_bb.GetShort(vt1 + j) != _bb.GetShort(vt2 + j)) { goto endLoop; } } existingVtable = _vtables[i]; break; } endLoop: { } } if (existingVtable != 0) { // Found a match: // Remove the current vtable. _space = _bb.Length - vtableloc; // Point table to existing vtable. _bb.PutInt(_space, existingVtable - vtableloc); } else { // No match: // Add the location of the current vtable to the list of // vtables. if (_numVtables == _vtables.Length) { // Arrays.CopyOf(vtables num_vtables * 2); var newvtables = new int[ _numVtables * 2]; Array.Copy(_vtables, newvtables, _vtables.Length); _vtables = newvtables; }; _vtables[_numVtables++] = Offset; // Point table to current vtable. _bb.PutInt(_bb.Length - vtableloc, Offset - vtableloc); } _vtableSize = -1; return vtableloc; } // This checks a required field has been set in a given table that has // just been constructed. public void Required(int table, int field) { int table_start = _bb.Length - table; int vtable_start = table_start - _bb.GetInt(table_start); bool ok = _bb.GetShort(vtable_start + field) != 0; // If this fails, the caller will show what field needs to be set. if (!ok) throw new InvalidOperationException("FlatBuffers: field " + field + " must be set"); } /// @endcond /// <summary> /// Finalize a buffer, pointing to the given `root_table`. /// </summary> /// <param name="rootTable"> /// An offset to be added to the buffer. /// </param> /// <param name="sizePrefix"> /// Whether to prefix the size to the buffer. /// </param> protected void Finish(int rootTable, bool sizePrefix) { Prep(_minAlign, sizeof(int) + (sizePrefix ? sizeof(int) : 0)); AddOffset(rootTable); if (sizePrefix) { AddInt(_bb.Length - _space); } _bb.Position = _space; } /// <summary> /// Finalize a buffer, pointing to the given `root_table`. /// </summary> /// <param name="rootTable"> /// An offset to be added to the buffer. /// </param> public void Finish(int rootTable) { Finish(rootTable, false); } /// <summary> /// Finalize a buffer, pointing to the given `root_table`, with the size prefixed. /// </summary> /// <param name="rootTable"> /// An offset to be added to the buffer. /// </param> public void FinishSizePrefixed(int rootTable) { Finish(rootTable, true); } /// <summary> /// Get the ByteBuffer representing the FlatBuffer. /// </summary> /// <remarks> /// This is typically only called after you call `Finish()`. /// The actual data starts at the ByteBuffer's current position, /// not necessarily at `0`. /// </remarks> /// <returns> /// Returns the ByteBuffer for this FlatBuffer. /// </returns> public ByteBuffer DataBuffer { get { return _bb; } } /// <summary> /// A utility function to copy and return the ByteBuffer data as a /// `byte[]`. /// </summary> /// <returns> /// A full copy of the FlatBuffer data. /// </returns> public byte[] SizedByteArray() { return _bb.ToSizedArray(); } /// <summary> /// Finalize a buffer, pointing to the given `rootTable`. /// </summary> /// <param name="rootTable"> /// An offset to be added to the buffer. /// </param> /// <param name="fileIdentifier"> /// A FlatBuffer file identifier to be added to the buffer before /// `root_table`. /// </param> /// <param name="sizePrefix"> /// Whether to prefix the size to the buffer. /// </param> protected void Finish(int rootTable, string fileIdentifier, bool sizePrefix) { Prep(_minAlign, sizeof(int) + (sizePrefix ? sizeof(int) : 0) + FlatBufferConstants.FileIdentifierLength); if (fileIdentifier.Length != FlatBufferConstants.FileIdentifierLength) throw new ArgumentException( "FlatBuffers: file identifier must be length " + FlatBufferConstants.FileIdentifierLength, "fileIdentifier"); for (int i = FlatBufferConstants.FileIdentifierLength - 1; i >= 0; i--) { AddByte((byte)fileIdentifier[i]); } Finish(rootTable, sizePrefix); } /// <summary> /// Finalize a buffer, pointing to the given `rootTable`. /// </summary> /// <param name="rootTable"> /// An offset to be added to the buffer. /// </param> /// <param name="fileIdentifier"> /// A FlatBuffer file identifier to be added to the buffer before /// `root_table`. /// </param> public void Finish(int rootTable, string fileIdentifier) { Finish(rootTable, fileIdentifier, false); } /// <summary> /// Finalize a buffer, pointing to the given `rootTable`, with the size prefixed. /// </summary> /// <param name="rootTable"> /// An offset to be added to the buffer. /// </param> /// <param name="fileIdentifier"> /// A FlatBuffer file identifier to be added to the buffer before /// `root_table`. /// </param> public void FinishSizePrefixed(int rootTable, string fileIdentifier) { Finish(rootTable, fileIdentifier, true); } } } /// @}
1
19,532
Should we just null the Map and let the GC handle the memory? Clearing just removes items, but not capacity, so this would leave some memory on the table.
google-flatbuffers
java
@@ -107,7 +107,8 @@ func (i *Inbound) start() error { return errRouterNotSet } - handler := newHandler(i) + handler := newHandler(i, i.t.options.logger) + // handler := &handler{i: i, logger: i.t.options.logger} server := grpc.NewServer( grpc.CustomCodec(customCodec{}),
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package grpc import ( "net" "sync" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/pkg/lifecycle" "go.uber.org/yarpc/yarpcerrors" "go.uber.org/zap" "google.golang.org/grpc" ) var ( errRouterNotSet = yarpcerrors.Newf(yarpcerrors.CodeInternal, "router not set") _ transport.Inbound = (*Inbound)(nil) ) // Inbound is a grpc transport.Inbound. type Inbound struct { once *lifecycle.Once lock sync.RWMutex t *Transport listener net.Listener options *inboundOptions router transport.Router server *grpc.Server } // newInbound returns a new Inbound for the given listener. func newInbound(t *Transport, listener net.Listener, options ...InboundOption) *Inbound { return &Inbound{ once: lifecycle.NewOnce(), t: t, listener: listener, options: newInboundOptions(options), } } // Start implements transport.Lifecycle#Start. func (i *Inbound) Start() error { return i.once.Start(i.start) } // Stop implements transport.Lifecycle#Stop. func (i *Inbound) Stop() error { return i.once.Stop(i.stop) } // IsRunning implements transport.Lifecycle#IsRunning. func (i *Inbound) IsRunning() bool { return i.once.IsRunning() } // SetRouter implements transport.Inbound#SetRouter. func (i *Inbound) SetRouter(router transport.Router) { i.lock.Lock() defer i.lock.Unlock() i.router = router } // Addr returns the address on which the server is listening. // // Returns nil if Start has not been called yet func (i *Inbound) Addr() net.Addr { i.lock.RLock() defer i.lock.RUnlock() // i.server is set in start, so checking against nil checks // if Start has been called // we check if i.listener is nil just for safety if i.server == nil || i.listener == nil { return nil } return i.listener.Addr() } // Transports implements transport.Inbound#Transports. func (i *Inbound) Transports() []transport.Transport { return []transport.Transport{i.t} } func (i *Inbound) start() error { i.lock.Lock() defer i.lock.Unlock() if i.router == nil { return errRouterNotSet } handler := newHandler(i) server := grpc.NewServer( grpc.CustomCodec(customCodec{}), grpc.UnknownServiceHandler(handler.handle), grpc.MaxRecvMsgSize(i.t.options.serverMaxRecvMsgSize), grpc.MaxSendMsgSize(i.t.options.serverMaxSendMsgSize), ) go func() { i.t.options.logger.Info("started GRPC inbound", zap.Stringer("address", i.listener.Addr())) if len(i.router.Procedures()) == 0 { i.t.options.logger.Warn("no procedures specified for GRPC inbound") } // TODO there should be some mechanism to block here // there is a race because the listener gets set in the grpc // Server implementation and we should be able to block // until Serve initialization is done // // It would be even better if we could do this outside the // lock in i // // TODO Server always returns a non-nil error but should // we do something with some or all errors? _ = server.Serve(i.listener) }() i.server = server return nil } func (i *Inbound) stop() error { i.lock.Lock() defer i.lock.Unlock() if i.server != nil { i.server.GracefulStop() } i.server = nil return nil } type noopGrpcStruct struct{}
1
16,542
did you mean to delete this line?
yarpc-yarpc-go
go
@@ -420,6 +420,10 @@ public class DatasetField implements Serializable { } else if (template != null) { return template.getDataverse(); } else { + + System.out.print("getDataverseException: " + this.datasetFieldType.getDisplayName()); + System.out.print("getDataverseException Compound: " + this.getCompoundDisplayValue()); + System.out.print("getDataverseException Regular: " + this.getDisplayValue()); throw new IllegalStateException("DatasetField is in an illegal state: no dataset version, compound value, or template is set as its parent."); } }
1
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package edu.harvard.iq.dataverse; /** * * @author skraffmiller */ import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import javax.persistence.CascadeType; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Index; import javax.persistence.JoinColumn; import javax.persistence.JoinTable; import javax.persistence.ManyToMany; import javax.persistence.ManyToOne; import javax.persistence.OneToMany; import javax.persistence.OrderBy; import javax.persistence.Table; import javax.persistence.Transient; import org.apache.commons.lang.StringUtils; @Entity @ValidateDatasetFieldType @Table(indexes = {@Index(columnList="datasetfieldtype_id"),@Index(columnList="datasetversion_id"), @Index(columnList="parentdatasetfieldcompoundvalue_id"),@Index(columnList="template_id")}) public class DatasetField implements Serializable { private static final long serialVersionUID = 1L; public static final String NA_VALUE = "N/A"; /** * Orders dataset fields by their display order. */ public static final Comparator<DatasetField> DisplayOrder = new Comparator<DatasetField>() { @Override public int compare(DatasetField o1, DatasetField o2) { return Integer.compare( o1.getDatasetFieldType().getDisplayOrder(), o2.getDatasetFieldType().getDisplayOrder() ); }}; public static DatasetField createNewEmptyDatasetField(DatasetFieldType dsfType, Object dsv) { DatasetField dsfv = createNewEmptyDatasetField(dsfType); //TODO - a better way to handle this? if (dsv.getClass().getName().equals("edu.harvard.iq.dataverse.DatasetVersion")){ dsfv.setDatasetVersion((DatasetVersion)dsv); } else { dsfv.setTemplate((Template)dsv); } return dsfv; } // originally this was an overloaded method, but we renamed it to get around an issue with Bean Validation // (that looked t overloaded methods, when it meant to look at overriden methods public static DatasetField createNewEmptyChildDatasetField(DatasetFieldType dsfType, DatasetFieldCompoundValue compoundValue) { DatasetField dsfv = createNewEmptyDatasetField(dsfType); dsfv.setParentDatasetFieldCompoundValue(compoundValue); return dsfv; } private static DatasetField createNewEmptyDatasetField(DatasetFieldType dsfType) { DatasetField dsfv = new DatasetField(); dsfv.setDatasetFieldType(dsfType); if (dsfType.isPrimitive()) { if (!dsfType.isControlledVocabulary()) { dsfv.getDatasetFieldValues().add(new DatasetFieldValue(dsfv)); } } else { // compound field dsfv.getDatasetFieldCompoundValues().add(DatasetFieldCompoundValue.createNewEmptyDatasetFieldCompoundValue(dsfv)); } return dsfv; } /** * Groups a list of fields by the block they belong to. * * @param fields well, duh. * @return a map, mapping each block to the fields that belong to it. */ public static Map<MetadataBlock, List<DatasetField>> groupByBlock(List<DatasetField> fields) { Map<MetadataBlock, List<DatasetField>> retVal = new HashMap<>(); for (DatasetField f : fields) { MetadataBlock metadataBlock = f.getDatasetFieldType().getMetadataBlock(); List<DatasetField> lst = retVal.get(metadataBlock); if (lst == null) { retVal.put(metadataBlock, new LinkedList<>(Collections.singleton(f))); } else { lst.add(f); } } return retVal; } @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; public Long getId() { return id; } public void setId(Long id) { this.id = id; } @ManyToOne @JoinColumn(nullable = false) private DatasetFieldType datasetFieldType; public DatasetFieldType getDatasetFieldType() { return datasetFieldType; } public void setDatasetFieldType(DatasetFieldType datasetField) { this.datasetFieldType = datasetField; } @ManyToOne private DatasetVersion datasetVersion; public DatasetVersion getDatasetVersion() { return datasetVersion; } public void setDatasetVersion(DatasetVersion datasetVersion) { this.datasetVersion = datasetVersion; } @ManyToOne private Template template; public Template getTemplate() { return template; } public void setTemplate(Template template) { this.template = template; } @ManyToOne(cascade = CascadeType.MERGE) private DatasetFieldCompoundValue parentDatasetFieldCompoundValue; public DatasetFieldCompoundValue getParentDatasetFieldCompoundValue() { return parentDatasetFieldCompoundValue; } public void setParentDatasetFieldCompoundValue(DatasetFieldCompoundValue parentDatasetFieldCompoundValue) { this.parentDatasetFieldCompoundValue = parentDatasetFieldCompoundValue; } @OneToMany(mappedBy = "parentDatasetField", orphanRemoval = true, cascade = {CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST}) @OrderBy("displayOrder ASC") private List<DatasetFieldCompoundValue> datasetFieldCompoundValues = new ArrayList<>(); public List<DatasetFieldCompoundValue> getDatasetFieldCompoundValues() { return datasetFieldCompoundValues; } public void setDatasetFieldCompoundValues(List<DatasetFieldCompoundValue> datasetFieldCompoundValues) { this.datasetFieldCompoundValues = datasetFieldCompoundValues; } @OneToMany(mappedBy = "datasetField", orphanRemoval = true, cascade = {CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST}) @OrderBy("displayOrder ASC") private List<DatasetFieldValue> datasetFieldValues = new ArrayList<>(); public List<DatasetFieldValue> getDatasetFieldValues() { return this.datasetFieldValues; } public void setDatasetFieldValues(List<DatasetFieldValue> datasetFieldValues) { this.datasetFieldValues = datasetFieldValues; } @ManyToMany(cascade = {CascadeType.MERGE}) @JoinTable(indexes = {@Index(columnList="datasetfield_id"),@Index(columnList="controlledvocabularyvalues_id")}) private List<ControlledVocabularyValue> controlledVocabularyValues = new ArrayList<>(); public List<ControlledVocabularyValue> getControlledVocabularyValues() { return controlledVocabularyValues; } public void setControlledVocabularyValues(List<ControlledVocabularyValue> controlledVocabularyValues) { this.controlledVocabularyValues = controlledVocabularyValues; } // HELPER METHODS public DatasetFieldValue getSingleValue() { if (!datasetFieldValues.isEmpty()) { return datasetFieldValues.get(0); } else { return new DatasetFieldValue(this); } } public void setSingleValue(String value) { if (datasetFieldValues.isEmpty()) { datasetFieldValues.add(new DatasetFieldValue(this)); } datasetFieldValues.get(0).setValue(value); } public ControlledVocabularyValue getSingleControlledVocabularyValue() { if (!controlledVocabularyValues.isEmpty()) { return controlledVocabularyValues.get(0); } else { return null; } } public void setSingleControlledVocabularyValue(ControlledVocabularyValue cvv) { if (!controlledVocabularyValues.isEmpty()) { controlledVocabularyValues.set(0, cvv); } else { controlledVocabularyValues.add(cvv); } } public String getValue() { if (!datasetFieldValues.isEmpty()) { return datasetFieldValues.get(0).getValue(); } else if (controlledVocabularyValues != null && !controlledVocabularyValues.isEmpty()) { if (controlledVocabularyValues.get(0) != null){ return controlledVocabularyValues.get(0).getStrValue(); } } return null; } public String getDisplayValue() { String returnString = ""; for (String value : getValues()) { if(value == null) { value=""; } returnString += (returnString.isEmpty() ? "" : "; ") + value.trim(); } return returnString; } public String getCompoundDisplayValue() { String returnString = ""; for (DatasetFieldCompoundValue dscv : datasetFieldCompoundValues) { for (DatasetField dsf : dscv.getChildDatasetFields()) { for (String value : dsf.getValues()) { if (!(value == null)) { returnString += (returnString.isEmpty() ? "" : "; ") + value.trim(); } } } } return returnString; } /** * despite the name, this returns a list of display values; not a list of values */ public List<String> getValues() { List<String> returnList = new ArrayList<>(); if (!datasetFieldValues.isEmpty()) { for (DatasetFieldValue dsfv : datasetFieldValues) { returnList.add(dsfv.getDisplayValue()); } } else { for (ControlledVocabularyValue cvv : controlledVocabularyValues) { if (cvv != null && cvv.getStrValue() != null) { returnList.add(cvv.getStrValue()); } } } return returnList; } /** * list of values (as opposed to display values). * used for passing to solr for indexing */ public List<String> getValues_nondisplay() { List returnList = new ArrayList(); if (!datasetFieldValues.isEmpty()) { for (DatasetFieldValue dsfv : datasetFieldValues) { String value = dsfv.getValue(); if (value != null) { returnList.add(value); } } } else { for (ControlledVocabularyValue cvv : controlledVocabularyValues) { if (cvv != null && cvv.getStrValue() != null) { returnList.add(cvv.getStrValue()); } } } return returnList; } /** * appears to be only used for sending info to solr; changed to return values * instead of display values */ public List<String> getValuesWithoutNaValues() { List<String> returnList = getValues_nondisplay(); returnList.removeAll(Arrays.asList(NA_VALUE)); return returnList; } public boolean isEmpty() { return isEmpty(false); } public boolean isEmptyForDisplay() { return isEmpty(true); } private boolean isEmpty(boolean forDisplay) { if (datasetFieldType.isPrimitive()) { // primitive for (String value : getValues()) { if (!StringUtils.isBlank(value) && !(forDisplay && DatasetField.NA_VALUE.equals(value))) { return false; } } } else { // compound for (DatasetFieldCompoundValue cv : datasetFieldCompoundValues) { for (DatasetField subField : cv.getChildDatasetFields()) { if (!subField.isEmpty(forDisplay)) { return false; } } } } return true; } @Transient private String validationMessage; public String getValidationMessage() { return validationMessage; } public void setValidationMessage(String validationMessage) { this.validationMessage = validationMessage; } @Transient private Boolean required; public boolean isRequired() { if (required == null) { required = false; if (this.datasetFieldType.isPrimitive() && this.datasetFieldType.isRequired()) { required = true; } if (this.datasetFieldType.isHasRequiredChildren()) { required = true; } Dataverse dv = getDataverse(); while (!dv.isMetadataBlockRoot()) { if (dv.getOwner() == null) { break; // we are at the root; which by defintion is metadata blcok root, regarldess of the value } dv = dv.getOwner(); } List<DataverseFieldTypeInputLevel> dftilListFirst = dv.getDataverseFieldTypeInputLevels(); if (!getDatasetFieldType().isHasChildren()) { for (DataverseFieldTypeInputLevel dsftil : dftilListFirst) { if (dsftil.getDatasetFieldType().equals(this.datasetFieldType)) { required = dsftil.isRequired(); } } } if (getDatasetFieldType().isHasChildren() && (!dftilListFirst.isEmpty())) { for (DatasetFieldType child : getDatasetFieldType().getChildDatasetFieldTypes()) { for (DataverseFieldTypeInputLevel dftilTest : dftilListFirst) { if (child.equals(dftilTest.getDatasetFieldType())) { if (dftilTest.isRequired()) { required = true; } } } } } } // logger.fine("at return " + this.datasetFieldType.getDisplayName() + " " + required); return required; } public Dataverse getDataverse() { if (datasetVersion != null) { return datasetVersion.getDataset().getOwner(); } else if (parentDatasetFieldCompoundValue != null) { return parentDatasetFieldCompoundValue.getParentDatasetField().getDataverse(); } else if (template != null) { return template.getDataverse(); } else { throw new IllegalStateException("DatasetField is in an illegal state: no dataset version, compound value, or template is set as its parent."); } } @Transient private boolean include; public void setInclude(boolean include){ this.include = include; } public boolean isInclude(){ return this.include; } @Override public int hashCode() { int hash = 0; hash += (id != null ? id.hashCode() : 0); return hash; } @Override public boolean equals(Object object) { // TODO: Warning - this method won't work in the case the id fields are not set if (!(object instanceof DatasetField)) { return false; } DatasetField other = (DatasetField) object; return !((this.id == null && other.id != null) || (this.id != null && !this.id.equals(other.id))); } @Override public String toString() { return "edu.harvard.iq.dataverse.DatasetField[ id=" + id + " ]"; } public DatasetField copy(Object version) { return copy(version, null); } // originally this was an overloaded method, but we renamed it to get around an issue with Bean Validation // (that looked t overloaded methods, when it meant to look at overriden methods public DatasetField copyChild(DatasetFieldCompoundValue parent) { return copy(null, parent); } private DatasetField copy(Object version, DatasetFieldCompoundValue parent) { DatasetField dsf = new DatasetField(); dsf.setDatasetFieldType(datasetFieldType); if (version != null) { if (version.getClass().getName().equals("edu.harvard.iq.dataverse.DatasetVersion")) { dsf.setDatasetVersion((DatasetVersion) version); } else { dsf.setTemplate((Template) version); } } dsf.setParentDatasetFieldCompoundValue(parent); dsf.setControlledVocabularyValues(controlledVocabularyValues); for (DatasetFieldValue dsfv : datasetFieldValues) { dsf.getDatasetFieldValues().add(dsfv.copy(dsf)); } for (DatasetFieldCompoundValue compoundValue : datasetFieldCompoundValues) { dsf.getDatasetFieldCompoundValues().add(compoundValue.copy(dsf)); } return dsf; } public boolean removeBlankDatasetFieldValues() { if (this.getDatasetFieldType().isPrimitive()) { if (!this.getDatasetFieldType().isControlledVocabulary()) { Iterator<DatasetFieldValue> dsfvIt = this.getDatasetFieldValues().iterator(); while (dsfvIt.hasNext()) { DatasetFieldValue dsfv = dsfvIt.next(); if (StringUtils.isBlank(dsfv.getValue())) { dsfvIt.remove(); } } if (this.getDatasetFieldValues().isEmpty()) { return true; } } else { // controlled vocab if (this.getControlledVocabularyValues().isEmpty()) { return true; } } } else if (this.getDatasetFieldType().isCompound()) { Iterator<DatasetFieldCompoundValue> cvIt = this.getDatasetFieldCompoundValues().iterator(); while (cvIt.hasNext()) { DatasetFieldCompoundValue cv = cvIt.next(); Iterator<DatasetField> dsfIt = cv.getChildDatasetFields().iterator(); while (dsfIt.hasNext()) { if (dsfIt.next().removeBlankDatasetFieldValues()) { dsfIt.remove(); } } if (cv.getChildDatasetFields().isEmpty()) { cvIt.remove(); } } if (this.getDatasetFieldCompoundValues().isEmpty()) { return true; } } return false; } public void setValueDisplayOrder() { if (this.getDatasetFieldType().isPrimitive() && !this.getDatasetFieldType().isControlledVocabulary()) { for (int i = 0; i < datasetFieldValues.size(); i++) { datasetFieldValues.get(i).setDisplayOrder(i); } } else if (this.getDatasetFieldType().isCompound()) { for (int i = 0; i < datasetFieldCompoundValues.size(); i++) { DatasetFieldCompoundValue compoundValue = datasetFieldCompoundValues.get(i); compoundValue.setDisplayOrder(i); for (DatasetField dsf : compoundValue.getChildDatasetFields()) { dsf.setValueDisplayOrder(); } } } } public void addDatasetFieldValue(int index) { datasetFieldValues.add(index, new DatasetFieldValue(this)); } public void removeDatasetFieldValue(int index) { datasetFieldValues.remove(index); } public void addDatasetFieldCompoundValue(int index) { datasetFieldCompoundValues.add(index, DatasetFieldCompoundValue.createNewEmptyDatasetFieldCompoundValue(this)); } public void removeDatasetFieldCompoundValue(int index) { datasetFieldCompoundValues.remove(index); } /** * If this is a FieldType.TEXT or FieldType.TEXTBOX, then run it through the markup checker * * @return */ public boolean needsTextCleaning(){ if (this.getDatasetFieldType() == null || this.getDatasetFieldType().getFieldType() == null){ return false; } if (this.datasetFieldType.getFieldType().equals(DatasetFieldType.FieldType.TEXT)){ return true; } else if (this.datasetFieldType.getFieldType().equals(DatasetFieldType.FieldType.TEXTBOX)){ return true; } return false; } // end: needsTextCleaning }
1
37,876
Should we use logger instead?
IQSS-dataverse
java
@@ -5,7 +5,7 @@ describe AnalyticsHelper do it "is true when ENV['ANALYTICS'] is present" do ENV['ANALYTICS'] = 'anything' - expect(analytics?).to be_true + expect(analytics?).to be true ENV['ANALYTICS'] = nil end
1
require 'spec_helper' describe AnalyticsHelper do describe '#analytics?' do it "is true when ENV['ANALYTICS'] is present" do ENV['ANALYTICS'] = 'anything' expect(analytics?).to be_true ENV['ANALYTICS'] = nil end it "is false when ENV['ANALYTICS'] is not present" do ENV['ANALYTICS'] = nil expect(analytics?).to be_false end end describe '#analytics_hash' do it 'returns a hash of data to be sent to analytics' do user = build_stubbed(:user, stripe_customer_id: 'something') expect(analytics_hash(user)).to eq( created: user.created_at, email: user.email, has_active_subscription: user.has_active_subscription?, has_logged_in_to_forum: user.has_logged_in_to_forum?, mentor_name: user.mentor_name, name: user.name, plan: user.plan_name, subscribed_at: user.subscribed_at, username: user.github_username, stripe_customer_url: StripeCustomer.new(user).url ) end end end
1
10,551
I think it would be preferred to do `expect(helper).to be_analytics`
thoughtbot-upcase
rb
@@ -10089,7 +10089,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})] if key is None: raise KeyError("none key") - if isinstance(key, (str, tuple, list)): + if isinstance(key, (str, tuple, list, pd.Index)): return self.loc[:, key] elif isinstance(key, slice): if any(type(n) == int or None for n in [key.start, key.stop]):
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ from collections import OrderedDict, defaultdict from distutils.version import LooseVersion import re import warnings import inspect import json import types from functools import partial, reduce import sys from itertools import zip_longest from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar, Iterable, Dict, Callable import numpy as np import pandas as pd from pandas.api.types import is_list_like, is_dict_like, is_scalar if LooseVersion(pd.__version__) >= LooseVersion("0.24"): from pandas.core.dtypes.common import infer_dtype_from_object else: from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.inference import is_sequence import pyspark from pyspark import StorageLevel from pyspark import sql as spark from pyspark.sql import functions as F, Column from pyspark.sql.functions import pandas_udf, PandasUDFType from pyspark.sql.types import ( BooleanType, DoubleType, FloatType, NumericType, StructType, StructField, ArrayType, ) from pyspark.sql.window import Window from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.accessors import KoalasFrameMethods from databricks.koalas.config import option_context, get_option from databricks.koalas.spark import functions as SF from databricks.koalas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods from databricks.koalas.utils import ( validate_arguments_and_invoke_function, align_diff_frames, validate_bool_kwarg, column_labels_level, name_like_string, same_anchor, scol_for, validate_axis, verify_temp_column_name, default_session, ) from databricks.koalas.generic import Frame from databricks.koalas.internal import ( InternalFrame, HIDDEN_COLUMNS, NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT, SPARK_DEFAULT_INDEX_NAME, SPARK_DEFAULT_SERIES_NAME, ) from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.ml import corr from databricks.koalas.typedef import infer_return_type, as_spark_type, DataFrameType, SeriesType from databricks.koalas.plot import KoalasFramePlotMethods # These regular expression patterns are complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ and _repr_html_ in DataFrame. # Two patterns basically seek the footer string from Pandas' REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$" ) _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`. Parameters ---------- other : scalar Any single data Returns ------- DataFrame Result of the arithmetic operation. Examples -------- >>> df = ks.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle'], ... columns=['angles', 'degrees']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. Also reverse version. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(df) angles degrees circle 0 720 triangle 6 360 rectangle 8 720 >>> df + df + df angles degrees circle 0 1080 triangle 9 540 rectangle 12 1080 >>> df.radd(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide and true divide by constant with reverse version. >>> df / 10 angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 >>> df.truediv(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rtruediv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract by constant with reverse version. >>> df - 1 angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.sub(1) angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.rsub(1) angles degrees circle 1 -359 triangle -2 -179 rectangle -3 -359 Multiply by constant with reverse version. >>> df * 1 angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.mul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.rmul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Floor Divide by constant with reverse version. >>> df // 10 angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.floordiv(10) angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.rfloordiv(10) # doctest: +SKIP angles degrees circle inf 0.0 triangle 3.0 0.0 rectangle 2.0 0.0 Mod by constant with reverse version. >>> df % 2 angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.mod(2) angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.rmod(2) angles degrees circle NaN 2 triangle 2.0 2 rectangle 2.0 2 Power by constant with reverse version. >>> df ** 2 angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.pow(2) angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.rpow(2) angles degrees circle 1.0 2.348543e+108 triangle 8.0 1.532496e+54 rectangle 16.0 2.348543e+108 """ T = TypeVar("T") def _create_tuple_for_frame_type(params): from databricks.koalas.typedef import NameTypeHolder if isinstance(params, zip): params = [slice(name, tpe) for name, tpe in params] if isinstance(params, slice): params = (params,) if ( hasattr(params, "__len__") and isinstance(params, Iterable) and all(isinstance(param, slice) for param in params) ): for param in params: if isinstance(param.start, str) and param.step is not None: raise TypeError( "Type hints should be specified as " "DataFrame['name': type]; however, got %s" % param ) name_classes = [] for param in params: new_class = type("NameType", (NameTypeHolder,), {}) new_class.name = param.start # When the given argument is a numpy's dtype instance. new_class.tpe = param.stop.type if isinstance(param.stop, np.dtype) else param.stop name_classes.append(new_class) return Tuple[tuple(name_classes)] if not isinstance(params, Iterable): params = [params] params = [param.type if isinstance(param, np.dtype) else param for param in params] return Tuple[tuple(params)] if (3, 5) <= sys.version_info < (3, 7): from typing import GenericMeta # type: ignore # This is a workaround to support variadic generic in DataFrame in Python 3.5+. # See https://github.com/python/typing/issues/193 # We wrap the input params by a tuple to mimic variadic generic. old_getitem = GenericMeta.__getitem__ # type: ignore def new_getitem(self, params): if hasattr(self, "is_dataframe"): return old_getitem(self, _create_tuple_for_frame_type(params)) else: return old_getitem(self, params) GenericMeta.__getitem__ = new_getitem # type: ignore class DataFrame(Frame, Generic[T]): """ Koalas DataFrame that corresponds to pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _internal: an internal immutable Frame to manage metadata. :type _internal: InternalFrame Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \ or Koalas Series Dict can contain Series, arrays, constants, or list-like objects If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a Koalas Series, other arguments should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ks.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from pandas DataFrame >>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2'])) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ks.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if isinstance(data, InternalFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = data elif isinstance(data, spark.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = InternalFrame(spark_frame=data, index_map=None) elif isinstance(data, ks.Series): assert index is None assert columns is None assert dtype is None assert not copy data = data.to_frame() internal = data._internal else: if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy pdf = data else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) internal = InternalFrame.from_pandas(pdf) self._internal_frame = internal @property def _ksers(self): """ Return a dict of column label -> Series which anchors `self`. """ from databricks.koalas.series import Series if not hasattr(self, "_kseries"): self._kseries = { label: Series(data=self, index=label) for label in self._internal.column_labels } else: kseries = self._kseries assert len(self._internal.column_labels) == len(kseries), ( len(self._internal.column_labels), len(kseries), ) if any(self is not kser._kdf for kser in kseries.values()): # Refresh the dict to contain only Series anchoring `self`. self._kseries = { label: kseries[label] if self is kseries[label]._kdf else Series(data=self, index=label) for label in self._internal.column_labels } return self._kseries @property def _internal(self) -> InternalFrame: return self._internal_frame def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool = True): """ Update InternalFrame with the given one. If the column_label is changed or the new InternalFrame is not the same `anchor`, disconnect the link to the Series and create a new one. If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy, updating the underlying Spark DataFrame which need to combine a different Spark DataFrame. :param internal: the new InternalFrame :param requires_same_anchor: whether checking the same anchor """ from databricks.koalas.series import Series kseries = {} for old_label, new_label in zip_longest( self._internal.column_labels, internal.column_labels ): if old_label is not None: kser = self._ksers[old_label] renamed = old_label != new_label not_same_anchor = requires_same_anchor and not same_anchor(internal, kser) if renamed or not_same_anchor: kdf = DataFrame(self._internal.select_column(old_label)) # type: DataFrame kser._update_anchor(kdf) kser = None else: kser = None if new_label is not None: if kser is None: kser = Series(data=self, index=new_label) kseries[new_label] = kser self._internal_frame = internal self._kseries = kseries if hasattr(self, "_repr_pandas_cache"): del self._repr_pandas_cache @property def ndim(self): """ Return an int representing the number of array dimensions. return 2 for DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', None], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 NaN 7 8 >>> df.ndim 2 """ return 2 @property def axes(self): """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=True): """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. Parameters ---------- sfun : either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. axis: used only for sanity check because series only support index axis. name : original pandas API name. axis : axis to apply. 0 or 1, or 'index' or 'columns. numeric_only : bool, default True Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter currently. """ from inspect import signature from databricks.koalas import Series from databricks.koalas.series import first_series if name not in ("count", "min", "max") and not numeric_only: raise ValueError("Disabling 'numeric_only' parameter is not supported.") axis = validate_axis(axis) if axis == 0: exprs = [] new_column_labels = [] num_args = len(signature(sfun).parameters) for label in self._internal.column_labels: col_sdf = self._internal.spark_column_for(label) col_type = self._internal.spark_type_for(label) is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType)) min_or_max = sfun.__name__ in ("min", "max") keep_column = not numeric_only or is_numeric_or_boolean or min_or_max if keep_column: if isinstance(col_type, BooleanType) and not min_or_max: # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast("integer") if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) exprs.append(col_sdf.alias(name_like_string(label))) new_column_labels.append(label) sdf = self._internal.spark_frame.select(*exprs) # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context( "compute.default_index_type", "distributed", "compute.max_rows", None ): kdf = DataFrame(sdf) internal = InternalFrame( kdf._internal.spark_frame, index_map=kdf._internal.index_map, column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) elif axis == 1: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only) if len(pdf) <= limit: return Series(pser) @pandas_udf(returnType=as_spark_type(pser.dtype.type)) def calculate_columns_axis(*cols): return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only) df = self._internal.spark_frame.select( calculate_columns_axis(*self._internal.data_spark_columns).alias( SPARK_DEFAULT_SERIES_NAME ) ) return DataFrame(df)[SPARK_DEFAULT_SERIES_NAME] else: raise ValueError("No axis named %s for object type %s." % (axis, type(axis))) def _kser_for(self, label): """ Create Series with a proper column label. The given label must be verified to exist in `InternalFrame.column_labels`. For example, in some method, self is like: >>> self = ks.range(3) `self._kser_for(label)` can be used with `InternalFrame.column_labels`: >>> self._kser_for(self._internal.column_labels[0]) 0 0 1 1 2 2 Name: id, dtype: int64 `self._kser_for(label)` must not be used directly with user inputs. In that case, `self[label]` should be used instead, which checks the label exists or not: >>> self['id'] 0 0 1 1 2 2 Name: id, dtype: int64 """ return self._ksers[label] def _apply_series_op(self, op, should_resolve: bool = False): applied = [] for label in self._internal.column_labels: applied.append(op(self._kser_for(label))) internal = self._internal.with_new_columns(applied) if should_resolve: internal = internal.resolved_copy return DataFrame(internal) # Arithmetic Operators def _map_series_op(self, op, other): from databricks.koalas.base import IndexOpsMixin if not isinstance(other, DataFrame) and ( isinstance(other, IndexOpsMixin) or is_sequence(other) ): raise ValueError( "%s with a sequence is currently not supported; " "however, got %s." % (op, type(other)) ) if isinstance(other, DataFrame): if self._internal.column_labels_level != other._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") if not same_anchor(self, other): # Different DataFrames def apply_op(kdf, this_column_labels, that_column_labels): for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( getattr(kdf._kser_for(this_label), op)(kdf._kser_for(that_label)), this_label, ) return align_diff_frames(apply_op, self, other, fillna=True, how="full") else: applied = [] column_labels = [] for label in self._internal.column_labels: if label in other._internal.column_labels: applied.append(getattr(self._kser_for(label), op)(other._kser_for(label))) else: applied.append( F.lit(None) .cast(self._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) for label in other._internal.column_labels: if label not in column_labels: applied.append( F.lit(None) .cast(other._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) internal = self._internal.with_new_columns(applied, column_labels) return DataFrame(internal) else: return self._apply_series_op(lambda kser: getattr(kser, op)(other)) def __add__(self, other): return self._map_series_op("add", other) def __radd__(self, other): return self._map_series_op("radd", other) def __div__(self, other): return self._map_series_op("div", other) def __rdiv__(self, other): return self._map_series_op("rdiv", other) def __truediv__(self, other): return self._map_series_op("truediv", other) def __rtruediv__(self, other): return self._map_series_op("rtruediv", other) def __mul__(self, other): return self._map_series_op("mul", other) def __rmul__(self, other): return self._map_series_op("rmul", other) def __sub__(self, other): return self._map_series_op("sub", other) def __rsub__(self, other): return self._map_series_op("rsub", other) def __pow__(self, other): return self._map_series_op("pow", other) def __rpow__(self, other): return self._map_series_op("rpow", other) def __mod__(self, other): return self._map_series_op("mod", other) def __rmod__(self, other): return self._map_series_op("rmod", other) def __floordiv__(self, other): return self._map_series_op("floordiv", other) def __rfloordiv__(self, other): return self._map_series_op("rfloordiv", other) def __abs__(self): return self._apply_series_op(lambda kser: abs(kser)) def add(self, other): return self + other # create accessor for plot plot = CachedAccessor("plot", KoalasFramePlotMethods) # create accessor for Spark related methods. spark = CachedAccessor("spark", SparkFrameMethods) # create accessor for Koalas specific methods. koalas = CachedAccessor("koalas", KoalasFrameMethods) def hist(self, bins=10, **kwds): return self.plot.hist(bins, **kwds) hist.__doc__ = KoalasFramePlotMethods.hist.__doc__ def kde(self, bw_method=None, ind=None, **kwds): return self.plot.kde(bw_method, ind, **kwds) kde.__doc__ = KoalasFramePlotMethods.kde.__doc__ add.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd" ) def radd(self, other): return other + self radd.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="other + dataframe", reverse="add" ) def div(self, other): return self / other div.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv" ) divide = div def rdiv(self, other): return other / self rdiv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div" ) def truediv(self, other): return self / other truediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv" ) def rtruediv(self, other): return other / self rtruediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv" ) def mul(self, other): return self * other mul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul" ) multiply = mul def rmul(self, other): return other * self rmul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul" ) def sub(self, other): return self - other sub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub" ) subtract = sub def rsub(self, other): return other - self rsub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub" ) def mod(self, other): return self % other mod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod" ) def rmod(self, other): return other % self rmod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod" ) def pow(self, other): return self ** other pow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow" ) def rpow(self, other): return other ** self rpow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow" ) def floordiv(self, other): return self // other floordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv" ) def rfloordiv(self, other): return other // self rfloordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv" ) # Comparison Operators def __eq__(self, other): return self._map_series_op("eq", other) def __ne__(self, other): return self._map_series_op("ne", other) def __lt__(self, other): return self._map_series_op("lt", other) def __le__(self, other): return self._map_series_op("le", other) def __ge__(self, other): return self._map_series_op("ge", other) def __gt__(self, other): return self._map_series_op("gt", other) def eq(self, other): """ Compare if the current value is equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.eq(1) a b a True True b False False c False True d False False """ return self == other equals = eq def gt(self, other): """ Compare if the current value is greater than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.gt(2) a b a False False b False False c True False d True False """ return self > other def ge(self, other): """ Compare if the current value is greater than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ge(1) a b a True True b True False c True True d True False """ return self >= other def lt(self, other): """ Compare if the current value is less than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.lt(1) a b a False False b False False c False False d False False """ return self < other def le(self, other): """ Compare if the current value is less than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.le(2) a b a True True b True False c False True d False False """ return self <= other def ne(self, other): """ Compare if the current value is not equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ne(1) a b a False False b True True c True False d True True """ return self != other def applymap(self, func): """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> def str_len(x) -> int: ... return len(str(x)) >>> df.applymap(str_len) 0 1 0 3 4 1 5 5 >>> def power(x) -> float: ... return x ** 2 >>> df.applymap(power) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 You can omit the type hint and let Koalas infer its type. >>> df.applymap(lambda x: x ** 2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # TODO: We can implement shortcut theoretically since it creates new DataFrame # anyway and we don't have to worry about operations on different DataFrames. return self._apply_series_op(lambda kser: kser.apply(func)) # TODO: not all arguments are implemented comparing to pandas' for now. def aggregate(self, func: Union[List[str], Dict[str, List[str]]]): """Aggregate using one or more operations over the specified axis. Parameters ---------- func : dict or a list a dict mapping from column name (string) to aggregate functions (list of strings). If a list is given, the aggregation is performed against all columns. Returns ------- DataFrame Notes ----- `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Invoke function on DataFrame. DataFrame.transform : Only perform transforming type operations. DataFrame.groupby : Perform operations over groups. Series.aggregate : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) >>> df A B C 0 1.0 2.0 3.0 1 4.0 5.0 6.0 2 7.0 8.0 9.0 3 NaN NaN NaN Aggregate these functions over the rows. >>> df.agg(['sum', 'min'])[['A', 'B', 'C']] A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']] A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN """ from databricks.koalas.groupby import GroupBy if isinstance(func, list): if all((isinstance(f, str) for f in func)): func = dict([(column, func) for column in self.columns]) else: raise ValueError( "If the given function is a list, it " "should only contains function names as strings." ) if not isinstance(func, dict) or not all( isinstance(key, str) and ( isinstance(value, str) or isinstance(value, list) and all(isinstance(v, str) for v in value) ) for key, value in func.items() ): raise ValueError( "aggs must be a dict mapping from column name (string) to aggregate " "functions (list of strings)." ) kdf = DataFrame(GroupBy._spark_groupby(self, func)) # type: DataFrame # The codes below basically converts: # # A B # sum min min max # 0 12.0 1.0 2.0 8.0 # # to: # A B # max NaN 8.0 # min 1.0 2.0 # sum 12.0 NaN # # Aggregated output is usually pretty much small. So it is fine to directly use pandas API. pdf = kdf.to_pandas().stack() pdf.index = pdf.index.droplevel() pdf.columns.names = [None] pdf.index.names = [None] return DataFrame(pdf[list(func.keys())]) agg = aggregate def corr(self, method="pearson"): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : pandas.DataFrame See Also -------- Series.corr Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ return ks.from_pandas(corr(self, method)) def iteritems(self) -> Iterable: """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ return ( (label if len(label) > 1 else label[0], self._kser_for(label)) for label in self._internal.column_labels ) def iterrows(self): """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : pandas.Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = ks.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns internal_index_columns = self._internal.index_spark_column_names internal_data_columns = self._internal.data_spark_column_names def extract_kv_from_spark_row(row): k = ( row[internal_index_columns[0]] if len(internal_index_columns) == 1 else tuple(row[c] for c in internal_index_columns) ) v = [row[c] for c in internal_data_columns] return k, v for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator() ): s = pd.Series(v, index=columns, name=k) yield k, s def items(self) -> Iterable: """This is an alias of ``iteritems``.""" return self.iteritems() def to_clipboard(self, excel=True, sep=None, **kwargs): """ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none See Also -------- read_clipboard : Read text from clipboard. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7 """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args ) def to_html( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal=".", bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False, ): """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args ) def to_string( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal=".", line_width=None, ): """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args ) def to_dict(self, orient="dict", into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args ) def to_latex( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal=".", multicolumn=None, multicolumn_format=None, multirow=None, ): r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n' """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args ) # TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic # when creating arrays) def transpose(self): """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from databricks.koalas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ks.DataFrame({'a': range(1001)}).transpose() Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Returns ------- DataFrame The transposed DataFrame. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the coerced dtype. For instance, if int and float have to be placed in same column, it becomes float. If type coercion is not possible, it fails. Also, note that the values in index should be unique because they become unique column names. In addition, if Spark 2.3 is used, the types should always be exactly same. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2']) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP >>> df1_transposed # doctest: +SKIP 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes # doctest: +SKIP 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'score': [9.5, 8], ... 'kids': [0, 0], ... 'age': [12, 22]} >>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age']) >>> df2 score kids age 0 9.5 0 12 1 8.0 0 22 >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP >>> df2_transposed # doctest: +SKIP 0 1 age 12.0 22.0 kids 0.0 0.0 score 9.5 8.0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the coerced dtype: >>> df2.dtypes score float64 kids int64 age int64 dtype: object >>> df2_transposed.dtypes # doctest: +SKIP 0 float64 1 float64 dtype: object """ max_compute_count = get_option("compute.max_rows") if max_compute_count is not None: pdf = self.head(max_compute_count + 1)._to_internal_pandas() if len(pdf) > max_compute_count: raise ValueError( "Current DataFrame has more then the given limit {0} rows. " "Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' " "to retrieve to retrieve more than {0} rows. Note that, before changing the " "'compute.max_rows', this operation is considerably expensive.".format( max_compute_count ) ) return DataFrame(pdf.transpose()) # Explode the data to be pairs. # # For instance, if the current input DataFrame is as below: # # +------+------+------+------+------+ # |index1|index2|(a,x1)|(a,x2)|(b,x3)| # +------+------+------+------+------+ # | y1| z1| 1| 0| 0| # | y2| z2| 0| 50| 0| # | y3| z3| 3| 2| 1| # +------+------+------+------+------+ # # Output of `exploded_df` becomes as below: # # +-----------------+-----------------+-----------------+-----+ # | index|__index_level_0__|__index_level_1__|value| # +-----------------+-----------------+-----------------+-----+ # |{"a":["y1","z1"]}| a| x1| 1| # |{"a":["y1","z1"]}| a| x2| 0| # |{"a":["y1","z1"]}| b| x3| 0| # |{"a":["y2","z2"]}| a| x1| 0| # |{"a":["y2","z2"]}| a| x2| 50| # |{"a":["y2","z2"]}| b| x3| 0| # |{"a":["y3","z3"]}| a| x1| 3| # |{"a":["y3","z3"]}| a| x2| 2| # |{"a":["y3","z3"]}| b| x3| 1| # +-----------------+-----------------+-----------------+-----+ pairs = F.explode( F.array( *[ F.struct( [ F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label) ] + [self._internal.spark_column_for(label).alias("value")] ) for label in self._internal.column_labels ] ) ) exploded_df = self._internal.spark_frame.withColumn("pairs", pairs).select( [ F.to_json( F.struct( F.array( [scol.cast("string") for scol in self._internal.index_spark_columns] ).alias("a") ) ).alias("index"), F.col("pairs.*"), ] ) # After that, executes pivot with key and its index column. # Note that index column should contain unique values since column names # should be unique. internal_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index") transposed_df = pivoted_df.agg(F.first(F.col("value"))) new_data_columns = list( filter(lambda x: x not in internal_index_columns, transposed_df.columns) ) internal = self._internal.copy( spark_frame=transposed_df, index_map=OrderedDict((col, None) for col in internal_index_columns), column_labels=[tuple(json.loads(col)["a"]) for col in new_data_columns], data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns], column_label_names=None, ) return DataFrame(internal) T = property(transpose) def apply_batch(self, func, args=(), **kwds): warnings.warn( "DataFrame.apply_batch is deprecated as of DataFrame.koalas.apply_batch. " "Please use the API instead.", FutureWarning, ) return self.koalas.apply_batch(func, args=args, **kwds) apply_batch.__doc__ = KoalasFrameMethods.apply_batch.__doc__ # TODO: Remove this API when Koalas 2.0.0. def map_in_pandas(self, func): warnings.warn( "DataFrame.map_in_pandas is deprecated as of DataFrame.koalas.apply_batch. " "Please use the API instead.", FutureWarning, ) return self.koalas.apply_batch(func) map_in_pandas.__doc__ = KoalasFrameMethods.apply_batch.__doc__ def apply(self, func, axis=0, args=(), **kwds): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: when `axis` is 0 or 'index', the `func` is unable to access to the whole input series. Koalas internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole series but of the batch internally ... # used. ... def length(s) -> int: ... return len(s) ... >>> df = ks.DataFrame({'A': range(1000)}) >>> df.apply(length, axis=0) # doctest: +SKIP 0 83 1 83 2 83 ... 10 83 11 83 Name: 0, dtype: int32 .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify the return type as `Series` or scalar value in ``func``, for instance, as below: >>> def square(s) -> ks.Series[np.int32]: ... return s ** 2 Koalas uses return type hint and does not try to infer the type. In case when axis is 1, it requires to specify `DataFrame` or scalar value with type hints as below: >>> def plus_one(x) -> ks.DataFrame[float, float]: ... return x + 1 If the return type is specified as `DataFrame`, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. To specify the column names, you can assign them in a pandas friendly style as below: >>> def plus_one(x) -> ks.DataFrame["a": float, "b": float]: ... return x + 1 >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}) >>> def plus_one(x) -> ks.DataFrame[zip(pdf.dtypes, pdf.columns)]: ... return x + 1 However, this way switches the index type to default index type in the output because the type hint cannot express the index type at this moment. Use `reset_index()` to keep index as a workaround. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap : For elementwise operations. DataFrame.aggregate : Only perform aggregating type operations. DataFrame.transform : Only perform transforming type operations. Series.apply : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> def sqrt(x) -> ks.Series[float]: ... return np.sqrt(x) ... >>> df.apply(sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 You can omit the type hint and let Koalas infer its type. >>> df.apply(np.sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 When `axis` is 1 or 'columns', it applies the function for each row. >>> def summation(x) -> np.int64: ... return np.sum(x) ... >>> df.apply(summation, axis=1) 0 13 1 13 2 13 Name: 0, dtype: int64 Likewise, you can omit the type hint and let Koalas infer its type. >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 Name: 0, dtype: int64 >>> df.apply(max, axis=1) 0 9 1 9 2 9 Name: 0, dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] Name: 0, dtype: object In order to specify the types when `axis` is '1', it should use DataFrame[...] annotation. In this case, the column names are automatically generated. >>> def identify(x) -> ks.DataFrame['A': np.int64, 'B': np.int64]: ... return x ... >>> df.apply(identify, axis=1) A B 0 4 9 1 4 9 2 4 9 You can also specify extra arguments. >>> def plus_two(a, b, c) -> ks.DataFrame[np.int64, np.int64]: ... return a + b + c ... >>> df.apply(plus_two, axis=1, args=(1,), c=3) c0 c1 0 8 13 1 8 13 2 8 13 """ from databricks.koalas.groupby import GroupBy from databricks.koalas.series import first_series if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func func = lambda *args, **kwargs: f(*args, **kwargs) axis = validate_axis(axis) should_return_series = False spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None should_use_map_in_pandas = LooseVersion(pyspark.__version__) >= "3.0" def apply_func(pdf): pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) if isinstance(pdf_or_pser, pd.Series): return pdf_or_pser.to_frame() else: return pdf_or_pser self_applied = DataFrame(self._internal.resolved_copy) if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self_applied.head(limit + 1)._to_internal_pandas() applied = pdf.apply(func, axis=axis, args=args, **kwds) kser_or_kdf = ks.from_pandas(applied) if len(pdf) <= limit: return kser_or_kdf kdf = kser_or_kdf if isinstance(kser_or_kdf, ks.Series): should_return_series = True kdf = kser_or_kdf.to_frame() return_schema = kdf._internal.to_internal_spark_frame.schema if should_use_map_in_pandas: output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=True ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) else: sdf = GroupBy._spark_group_map_apply( self_applied, apply_func, (F.spark_partition_id(),), return_schema, retain_index=True, ) # If schema is inferred, we can restore indexes too. internal = kdf._internal.with_new_sdf(sdf) else: return_type = infer_return_type(func) return_schema = return_type.tpe require_index_axis = isinstance(return_type, SeriesType) require_column_axis = isinstance(return_type, DataFrameType) if require_index_axis: if axis != 0: raise TypeError( "The given function should specify a scalar or a series as its type " "hints when axis is 0 or 'index'; however, the return type " "was %s" % return_sig ) fields_types = zip( self_applied.columns, [return_schema] * len(self_applied.columns) ) return_schema = StructType([StructField(c, t) for c, t in fields_types]) elif require_column_axis: if axis != 1: raise TypeError( "The given function should specify a scalar or a frame as its type " "hints when axis is 1 or 'column'; however, the return type " "was %s" % return_sig ) else: # any axis is fine. should_return_series = True return_schema = StructType([StructField(SPARK_DEFAULT_SERIES_NAME, return_schema)]) if should_use_map_in_pandas: output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=False ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) else: sdf = GroupBy._spark_group_map_apply( self_applied, apply_func, (F.spark_partition_id(),), return_schema, retain_index=False, ) # Otherwise, it loses index. internal = InternalFrame(spark_frame=sdf, index_map=None) result = DataFrame(internal) if should_return_series: return first_series(result) else: return result def transform(self, func, axis=0, *args, **kwargs): """ Call ``func`` on self producing a Series with transformed values and that has the same length as its input. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> ks.Series[np.int32]: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. .. note:: the series within ``func`` is actually multiple pandas series as the segments of the whole Koalas series; therefore, the length of each series is not guaranteed. As an example, an aggregation against each series does work as a global aggregation but an aggregation of each segment. See below: >>> def func(x) -> ks.Series[np.int32]: ... return x + sum(x) Parameters ---------- func : function Function to use for transforming the data. It must work when pandas Series is passed. axis : int, default 0 or 'index' Can only be set to 0 at the moment. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame A DataFrame that must have the same length as self. Raises ------ Exception : If the returned DataFrame has a different length than self. See Also -------- DataFrame.aggregate : Only perform aggregating type operations. DataFrame.apply : Invoke function on DataFrame. Series.transform : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B']) >>> df A B 0 0 1 1 1 2 2 2 3 >>> def square(x) -> ks.Series[np.int32]: ... return x ** 2 >>> df.transform(square) A B 0 0 1 1 1 4 2 4 9 You can omit the type hint and let Koalas infer its type. >>> df.transform(lambda x: x ** 2) A B 0 0 1 1 1 4 2 4 9 For multi-index columns: >>> df.columns = [('X', 'A'), ('X', 'B')] >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 4 2 4 9 >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 2 2 2 3 You can also specify extra arguments. >>> def calculation(x, y, z) -> ks.Series[int]: ... return x ** y + z >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE X A B 0 20 21 1 21 1044 2 1044 59069 """ if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func func = lambda *args, **kwargs: f(*args, **kwargs) axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() transformed = pdf.transform(func, axis, *args, **kwargs) kdf = DataFrame(transformed) if len(pdf) <= limit: return kdf applied = [] for input_label, output_label in zip( self._internal.column_labels, kdf._internal.column_labels ): pudf = pandas_udf( lambda c: func(c, *args, **kwargs), returnType=kdf._internal.spark_type_for(output_label), functionType=PandasUDFType.SCALAR, ) kser = self._kser_for(input_label) applied.append(kser._with_new_scol(scol=pudf(kser.spark.column))) internal = self._internal.with_new_columns(applied) return DataFrame(internal) else: return self._apply_series_op( lambda kser: kser.koalas.transform_batch(func, *args, **kwargs) ) def transform_batch(self, func, *args, **kwargs): warnings.warn( "DataFrame.transform_batch is deprecated as of DataFrame.koalas.transform_batch. " "Please use the API instead.", FutureWarning, ) return self.koalas.transform_batch(func, *args, **kwargs) transform_batch.__doc__ = KoalasFrameMethods.transform_batch.__doc__ def pop(self, item): """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Label of column to be popped. Returns ------- Series Examples -------- >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN Also support for MultiIndex >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df a b name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('a') name class 0 falcon bird 1 parrot bird 2 lion mammal 3 monkey mammal >>> df b max_speed 0 389.0 1 24.0 2 80.5 3 NaN """ result = self[item] self._update_internal_frame(self.drop(item)._internal) return result # TODO: add axis parameter can work when '1' or 'columns' def xs(self, key, axis=0, level=None): """ Return cross-section from the DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : 0 or 'index', default 0 Axis to retrieve cross-section on. currently only support 0 or 'index' level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. Returns ------- DataFrame Cross-section from the original DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = ks.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings locomotion walks 4 0 Get values at specified index and level >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class locomotion mammal walks 4 0 """ from databricks.koalas.series import first_series if not isinstance(key, (str, tuple)): raise ValueError("'key' should be string or tuple that contains strings") if not all(isinstance(index, str) for index in key): raise ValueError( "'key' should have index names as only strings " "or a tuple that contain index names as only strings" ) axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if isinstance(key, str): key = (key,) if len(key) > len(self._internal.index_spark_columns): raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(key), len(self._internal.index_spark_columns) ) ) if level is None: level = 0 scols = ( self._internal.spark_columns[:level] + self._internal.spark_columns[level + len(key) :] ) rows = [self._internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)] sdf = self._internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols) if len(key) == len(self._internal.index_spark_columns): result = first_series( DataFrame(InternalFrame(spark_frame=sdf, index_map=None)).T ).rename(key) else: new_index_map = OrderedDict( list(self._internal.index_map.items())[:level] + list(self._internal.index_map.items())[level + len(key) :] ) internal = self._internal.copy(spark_frame=sdf, index_map=new_index_map) result = DataFrame(internal) return result def where(self, cond, other=np.nan): """ Replace values where the condition is False. Parameters ---------- cond : boolean DataFrame Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is False are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.where(df1 > 0).sort_index() A B 0 NaN 100.0 1 1.0 200.0 2 2.0 300.0 3 3.0 400.0 4 4.0 500.0 >>> df1.where(df1 > 1, 10).sort_index() A B 0 10 100 1 10 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df1 + 100).sort_index() A B 0 100 100 1 101 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df2).sort_index() A B 0 0 100 1 -1 200 2 2 300 3 3 400 4 4 500 When the column name of cond is different from self, it treats all values are False >>> cond = ks.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0 >>> cond C D 0 True False 1 False True 2 False False 3 True False 4 False True >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN When the type of cond is Series, it just check boolean regardless of column name >>> cond = ks.Series([1, 2]) > 1 >>> cond 0 False 1 True Name: 0, dtype: bool >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 1.0 200.0 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> reset_option("compute.ops_on_diff_frames") """ from databricks.koalas.series import Series tmp_cond_col_name = "__tmp_cond_col_{}__".format tmp_other_col_name = "__tmp_other_col_{}__".format kdf = self.copy() tmp_cond_col_names = [ tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(cond, DataFrame): cond = cond[ [ ( cond._internal.spark_column_for(label) if label in cond._internal.column_labels else F.lit(False) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_cond_col_names) ] ] kdf[tmp_cond_col_names] = cond elif isinstance(cond, Series): cond = cond.to_frame() cond = cond[ [cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names] ] kdf[tmp_cond_col_names] = cond else: raise ValueError("type of cond must be a DataFrame or Series") tmp_other_col_names = [ tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(other, DataFrame): other = other[ [ ( other._internal.spark_column_for(label) if label in other._internal.column_labels else F.lit(np.nan) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_other_col_names) ] ] kdf[tmp_other_col_names] = other elif isinstance(other, Series): other = other.to_frame() other = other[ [other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names] ] kdf[tmp_other_col_names] = other else: for label in self._internal.column_labels: kdf[tmp_other_col_name(name_like_string(label))] = other # above logic make spark dataframe looks like below: # +-----------------+---+---+------------------+-------------------+------------------+--... # |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__... # +-----------------+---+---+------------------+-------------------+------------------+--... # | 0| 0|100| true| 0| false| ... # | 1| 1|200| false| -1| false| ... # | 3| 3|400| true| -3| false| ... # | 2| 2|300| false| -2| true| ... # | 4| 4|500| false| -4| false| ... # +-----------------+---+---+------------------+-------------------+------------------+--... data_spark_columns = [] for label in self._internal.column_labels: data_spark_columns.append( F.when( kdf[tmp_cond_col_name(name_like_string(label))].spark.column, kdf._internal.spark_column_for(label), ) .otherwise(kdf[tmp_other_col_name(name_like_string(label))].spark.column) .alias(kdf._internal.spark_column_name_for(label)) ) return DataFrame( kdf._internal.with_new_columns( data_spark_columns, column_labels=self._internal.column_labels ) ) def mask(self, cond, other=np.nan): """ Replace values where the condition is True. Parameters ---------- cond : boolean DataFrame Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is True are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.mask(df1 > 0).sort_index() A B 0 0.0 NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> df1.mask(df1 > 1, 10).sort_index() A B 0 0 10 1 1 10 2 10 10 3 10 10 4 10 10 >>> df1.mask(df1 > 1, df1 + 100).sort_index() A B 0 0 200 1 1 300 2 102 400 3 103 500 4 104 600 >>> df1.mask(df1 > 1, df2).sort_index() A B 0 0 -100 1 1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> reset_option("compute.ops_on_diff_frames") """ from databricks.koalas.series import Series if not isinstance(cond, (DataFrame, Series)): raise ValueError("type of cond must be a DataFrame or Series") cond_inversed = cond._apply_series_op(lambda kser: ~kser) return self.where(cond_inversed, other) @property def index(self): """The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index """ from databricks.koalas.indexes import Index, MultiIndex if len(self._internal.index_map) == 1: return Index(self) else: return MultiIndex(self) @property def empty(self): """ Returns true if the current DataFrame is empty. Otherwise, returns false. Examples -------- >>> ks.range(10).empty False >>> ks.range(0).empty True >>> ks.DataFrame({}, index=list('abc')).empty True """ return ( len(self._internal.column_labels) == 0 or self._internal.resolved_copy.spark_frame.rdd.isEmpty() ) @property def style(self): """ Property returning a Styler object containing methods for building a styled HTML representation fo the DataFrame. .. note:: currently it collects top 1000 rows and return its pandas `pandas.io.formats.style.Styler` instance. Examples -------- >>> ks.range(1001).style # doctest: +ELLIPSIS <pandas.io.formats.style.Styler object at ...> """ max_results = get_option("compute.max_rows") pdf = self.head(max_results + 1).to_pandas() if len(pdf) > max_results: warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning) return pdf.head(max_results).style def set_index(self, keys, drop=True, append=False, inplace=False): """Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ks.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 """ inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(keys, (str, tuple)): keys = [keys] else: keys = list(keys) columns = set(self.columns) for key in keys: if key not in columns: raise KeyError(key) keys = [key if isinstance(key, tuple) else (key,) for key in keys] if drop: column_labels = [label for label in self._internal.column_labels if label not in keys] else: column_labels = self._internal.column_labels if append: index_map = OrderedDict( list(self._internal.index_map.items()) + [(self._internal.spark_column_name_for(label), label) for label in keys] ) else: index_map = OrderedDict( (self._internal.spark_column_name_for(label), label) for label in keys ) internal = self._internal.resolved_copy internal = internal.copy( index_map=index_map, column_labels=column_labels, data_spark_columns=[internal.spark_column_for(label) for label in column_labels], ) if inplace: self._update_internal_frame(internal) else: return DataFrame(internal) def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=""): """Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ks.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, Koalas does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = ks.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df # doctest: +NORMALIZE_WHITESPACE speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") multi_index = len(self._internal.index_map) > 1 def rename(index): if multi_index: return ("level_{}".format(index),) else: if ("index",) not in self._internal.column_labels: return ("index",) else: return ("level_{}".format(index),) if level is None: new_index_map = [ (column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._internal.index_map.items()) ] index_map = [] else: if isinstance(level, (int, str)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for lev in level: if lev >= len(self._internal.index_map): raise IndexError( "Too many levels: Index has only {} level, not {}".format( len(self._internal.index_map), lev + 1 ) ) idx = level elif all(isinstance(lev, str) for lev in level): idx = [] for l in level: try: i = self._internal.index_names.index((l,)) idx.append(i) except ValueError: if multi_index: raise KeyError("Level unknown not found") else: raise KeyError( "Level unknown must be same as name ({})".format( name_like_string(self._internal.index_names[0]) ) ) else: raise ValueError("Level should be all int or all string.") idx.sort() new_index_map = [] index_map_items = list(self._internal.index_map.items()) new_index_map_items = index_map_items.copy() for i in idx: info = index_map_items[i] index_column, index_name = info new_index_map.append( (index_column, index_name if index_name is not None else rename(i)) ) new_index_map_items.remove(info) index_map = OrderedDict(new_index_map_items) if drop: new_index_map = [] for _, name in new_index_map: if name in self._internal.column_labels: raise ValueError("cannot insert {}, already exists".format(name_like_string(name))) sdf = self._internal.spark_frame new_data_scols = [ scol_for(sdf, column).alias(name_like_string(name)) for column, name in new_index_map ] if len(index_map) > 0: index_scols = [scol_for(sdf, column) for column in index_map] sdf = sdf.select( index_scols + new_data_scols + self._internal.data_spark_columns + list(HIDDEN_COLUMNS) ) else: sdf = sdf.select( new_data_scols + self._internal.data_spark_columns + list(HIDDEN_COLUMNS) ) sdf = InternalFrame.attach_default_index(sdf) index_map = OrderedDict({SPARK_DEFAULT_INDEX_NAME: None}) if self._internal.column_labels_level > 1: column_depth = len(self._internal.column_labels[0]) if col_level >= column_depth: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( column_depth, col_level + 1 ) ) if any(col_level + len(name) > column_depth for _, name in new_index_map): raise ValueError("Item must have length equal to number of levels.") column_labels = [ tuple( ([col_fill] * col_level) + list(name) + ([col_fill] * (column_depth - (len(name) + col_level))) ) for _, name in new_index_map ] + self._internal.column_labels else: column_labels = [name for _, name in new_index_map] + self._internal.column_labels internal = self._internal.copy( spark_frame=sdf, index_map=index_map, column_labels=column_labels, data_spark_columns=( [scol_for(sdf, name_like_string(name)) for _, name in new_index_map] + [scol_for(sdf, col) for col in self._internal.data_spark_column_names] ), ) if inplace: self._update_internal_frame(internal) else: return DataFrame(internal) def isnull(self): """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- Dataframe.notnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ return self._apply_series_op(lambda kser: kser.isnull()) isna = isnull def notnull(self): """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- Dataframe.isnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ return self._apply_series_op(lambda kser: kser.notnull()) notna = notnull # TODO: add frep and axis parameter def shift(self, periods=1, fill_value=None): """ Shift DataFrame by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. fill_value : object, optional The scalar value to use for newly introduced missing values. The default depends on the dtype of self. For numeric data, np.nan is used. Returns ------- Copy of input DataFrame, shifted. Examples -------- >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}, ... columns=['Col1', 'Col2', 'Col3']) >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 0 0 0 0 1 0 0 0 2 0 0 0 3 10 13 17 4 20 23 27 """ return self._apply_series_op(lambda kser: kser.shift(periods, fill_value)) # TODO: axis should support 1 or 'columns' either at this moment def diff(self, periods: int = 1, axis: Union[int, str] = 0): """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : int, default 0 or 'index' Can only be set to 0 at the moment. Returns ------- diffed : DataFrame Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') return self._apply_series_op(lambda kser: kser.diff(periods)) # TODO: axis should support 1 or 'columns' either at this moment def nunique( self, axis: Union[int, str] = 0, dropna: bool = True, approx: bool = False, rsd: float = 0.05, ) -> "ks.Series": """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- axis : int, default 0 or 'index' Can only be set to 0 at the moment. dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and return the exact number of unique. If True, it uses the HyperLogLog approximate algorithm, which is significantly faster for large amount of data. Note: This parameter is specific to Koalas and is not found in pandas. rsd: float, default 0.05 Maximum estimation error allowed in the HyperLogLog algorithm. Note: Just like ``approx`` this parameter is specific to Koalas. Returns ------- The number of unique values per column as a Koalas Series. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]}) >>> df.nunique() A 3 B 1 Name: 0, dtype: int64 >>> df.nunique(dropna=False) A 3 B 2 Name: 0, dtype: int64 On big data, we recommend using the approximate algorithm to speed up this function. The result will be very close to the exact unique count. >>> df.nunique(approx=True) A 3 B 1 Name: 0, dtype: int64 """ from databricks.koalas.series import first_series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select( [ self._kser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels ] ) # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context( "compute.default_index_type", "distributed", "compute.max_rows", None ): kdf = DataFrame(sdf) # type: ks.DataFrame internal = InternalFrame( spark_frame=kdf._internal.spark_frame, index_map=kdf._internal.index_map, column_labels=self._internal.column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) def round(self, decimals=0): """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. .. note:: If `decimals` is a Series, it is expected to be small, as all the data is loaded into the driver's memory. Returns ------- DataFrame See Also -------- Series.round Examples -------- >>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076], ... 'B':[0.992815, 0.645646, 0.149370], ... 'C':[0.173891, 0.577595, 0.491027]}, ... columns=['A', 'B', 'C'], ... index=['first', 'second', 'third']) >>> df A B C first 0.028208 0.992815 0.173891 second 0.038683 0.645646 0.577595 third 0.877076 0.149370 0.491027 >>> df.round(2) A B C first 0.03 0.99 0.17 second 0.04 0.65 0.58 third 0.88 0.15 0.49 >>> df.round({'A': 1, 'C': 2}) A B C first 0.0 0.992815 0.17 second 0.0 0.645646 0.58 third 0.9 0.149370 0.49 >>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C']) >>> df.round(decimals) A B C first 0.0 1.0 0.17 second 0.0 1.0 0.58 third 0.9 0.0 0.49 """ if isinstance(decimals, ks.Series): decimals = { k if isinstance(k, tuple) else (k,): v for k, v in decimals._to_internal_pandas().items() } elif isinstance(decimals, dict): decimals = {k if isinstance(k, tuple) else (k,): v for k, v in decimals.items()} elif isinstance(decimals, int): decimals = {k: decimals for k in self._internal.column_labels} else: raise ValueError("decimals must be an integer, a dict-like or a Series") def op(kser): label = kser._internal.column_labels[0] if label in decimals: return F.round(kser.spark.column, decimals[label]).alias( kser._internal.data_spark_column_names[0] ) else: return kser return self._apply_series_op(op) def _mark_duplicates(self, subset=None, keep="first"): if subset is None: subset = self._internal.column_labels else: if isinstance(subset, str): subset = [(subset,)] elif isinstance(subset, tuple): subset = [subset] else: subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] diff = set(subset).difference(set(self._internal.column_labels)) if len(diff) > 0: raise KeyError(", ".join([str(d) if len(d) > 1 else d[0] for d in diff])) group_cols = [self._internal.spark_column_name_for(label) for label in subset] sdf = self._internal.resolved_copy.spark_frame column = verify_temp_column_name(sdf, "__duplicated__") if keep == "first" or keep == "last": if keep == "first": ord_func = spark.functions.asc else: ord_func = spark.functions.desc window = ( Window.partitionBy(group_cols) .orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME)) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) sdf = sdf.withColumn(column, F.row_number().over(window) > 1) elif not keep: window = Window.partitionBy(group_cols).rowsBetween( Window.unboundedPreceding, Window.unboundedFollowing ) sdf = sdf.withColumn(column, F.count("*").over(window) > 1) else: raise ValueError("'keep' only supports 'first', 'last' and False") return sdf, column def duplicated(self, subset=None, keep="first"): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : Series Examples -------- >>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]}, ... columns = ['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 1 1 1 2 1 1 1 3 3 4 5 >>> df.duplicated().sort_index() 0 False 1 True 2 True 3 False Name: 0, dtype: bool Mark duplicates as ``True`` except for the last occurrence. >>> df.duplicated(keep='last').sort_index() 0 True 1 True 2 False 3 False Name: 0, dtype: bool Mark all duplicates as ``True``. >>> df.duplicated(keep=False).sort_index() 0 True 1 True 2 True 3 False Name: 0, dtype: bool """ from databricks.koalas.series import first_series sdf, column = self._mark_duplicates(subset, keep) column_label = (SPARK_DEFAULT_SERIES_NAME,) sdf = sdf.select( self._internal.index_spark_columns + [scol_for(sdf, column).alias(name_like_string(column_label))] ) return first_series( DataFrame( InternalFrame( spark_frame=sdf, index_map=self._internal.index_map, column_labels=[column_label], data_spark_columns=[scol_for(sdf, name_like_string(column_label))], ) ) ) def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None): """ Converts the existing DataFrame into a Koalas DataFrame. This method is monkey-patched into Spark's DataFrame and can be used to convert a Spark DataFrame into a Koalas DataFrame. If running on an existing Koalas DataFrame, the method returns itself. If a Koalas DataFrame is converted to a Spark DataFrame and then back to Koalas, it will lose the index information and the original index will be turned into a normal column. Parameters ---------- index_col: str or list of str, optional, default: None Index column of table in Spark. See Also -------- DataFrame.to_spark Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 >>> spark_df = df.to_spark() >>> spark_df DataFrame[col1: bigint, col2: bigint] >>> kdf = spark_df.to_koalas() >>> kdf col1 col2 0 1 3 1 2 4 We can specify the index columns. >>> kdf = spark_df.to_koalas(index_col='col1') >>> kdf # doctest: +NORMALIZE_WHITESPACE col2 col1 1 3 2 4 Calling to_koalas on a Koalas DataFrame simply returns itself. >>> df.to_koalas() col1 col2 0 1 3 1 2 4 """ if isinstance(self, DataFrame): return self else: assert isinstance(self, spark.DataFrame), type(self) from databricks.koalas.namespace import _get_index_map index_map = _get_index_map(self, index_col) internal = InternalFrame(spark_frame=self, index_map=index_map) return DataFrame(internal) def cache(self): warnings.warn( "DataFrame.cache is deprecated as of DataFrame.spark.cache. " "Please use the API instead.", FutureWarning, ) return self.spark.cache() cache.__doc__ = SparkFrameMethods.cache.__doc__ def persist(self, storage_level=StorageLevel.MEMORY_AND_DISK): warnings.warn( "DataFrame.persist is deprecated as of DataFrame.spark.persist. " "Please use the API instead.", FutureWarning, ) return self.spark.persist(storage_level) persist.__doc__ = SparkFrameMethods.persist.__doc__ def hint(self, name: str, *parameters) -> "DataFrame": warnings.warn( "DataFrame.hint is deprecated as of DataFrame.spark.hint. " "Please use the API instead.", FutureWarning, ) return self.spark.hint(name, *parameters) hint.__doc__ = SparkFrameMethods.hint.__doc__ def to_table( self, name: str, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): return self.spark.to_table(name, format, mode, partition_cols, index_col, **options) to_table.__doc__ = SparkFrameMethods.to_table.__doc__ def to_delta( self, path: str, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): """ Write the DataFrame out as a Delta Lake table. Parameters ---------- path : str, required Path to write to. mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the destination exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Delta Lake. See Also -------- read_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 Create a new Delta Lake table, partitioned by one column: >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') Partitioned by two columns: >>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country']) Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta: >>> df.to_delta('%s/to_delta/bar' % path, ... mode='overwrite', replaceWhere='date >= "2012-01-01"') """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore self.to_spark_io( path=path, mode=mode, format="delta", partition_cols=partition_cols, index_col=index_col, **options ) def to_parquet( self, path: str, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, compression: Optional[str] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): """ Write the DataFrame out as a Parquet file or directory. Parameters ---------- path : str, required Path to write to. mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the destination exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'} Compression codec to use when saving to file. If None is set, it uses the value specified in `spark.sql.parquet.compression.codec`. index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_parquet DataFrame.to_delta DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date') >>> df.to_parquet( ... '%s/to_parquet/foo.parquet' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore builder = self.to_spark(index_col=index_col).write.mode(mode) if partition_cols is not None: builder.partitionBy(partition_cols) builder._set_opts(compression=compression) builder.options(**options).format("parquet").save(path) def to_spark_io( self, path: Optional[str] = None, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options) to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__ def to_spark(self, index_col: Optional[Union[str, List[str]]] = None): return self.spark.frame(index_col) to_spark.__doc__ = SparkFrameMethods.__doc__ def to_pandas(self): """ Return a pandas DataFrame. .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ return self._internal.to_pandas_frame.copy() # Alias to maintain backward compatibility with Spark toPandas = to_pandas def assign(self, **kwargs): """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though Koalas doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15) >>> assigned[['temp_c', 'temp_f', 'temp_k']] temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in Koalas. In Koalas, all items are computed first, and then assigned. """ return self._assign(kwargs) def _assign(self, kwargs): assert isinstance(kwargs, dict) from databricks.koalas.series import Series for k, v in kwargs.items(): if not (isinstance(v, (Series, spark.Column)) or callable(v) or is_scalar(v)): raise TypeError( "Column assignment doesn't support type " "{0}".format(type(v).__name__) ) if callable(v): kwargs[k] = v(self) pairs = { (k if isinstance(k, tuple) else (k,)): ( v.spark.column if isinstance(v, Series) else v if isinstance(v, spark.Column) else F.lit(v) ) for k, v in kwargs.items() } scols = [] for label in self._internal.column_labels: for i in range(len(label)): if label[: len(label) - i] in pairs: name = self._internal.spark_column_name_for(label) scol = pairs[label[: len(label) - i]].alias(name) break else: scol = self._internal.spark_column_for(label) scols.append(scol) column_labels = self._internal.column_labels.copy() for label, scol in pairs.items(): if label not in set(i[: len(label)] for i in self._internal.column_labels): scols.append(scol.alias(name_like_string(label))) column_labels.append(label) level = self._internal.column_labels_level column_labels = [ tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels ] internal = self._internal.with_new_columns(scols, column_labels=column_labels) return DataFrame(internal) @staticmethod def from_records( data: Union[np.array, List[tuple], dict, pd.DataFrame], index: Union[str, list, np.array] = None, exclude: list = None, columns: list = None, coerce_float: bool = False, nrows: int = None, ) -> "DataFrame": """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- df : DataFrame Examples -------- Use dict as input >>> ks.DataFrame.from_records({'A': [1, 2, 3]}) A 0 1 1 2 2 3 Use list of tuples as input >>> ks.DataFrame.from_records([(1, 2), (3, 4)]) 0 1 0 1 2 1 3 4 Use NumPy array as input >>> ks.DataFrame.from_records(np.eye(3)) 0 1 2 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ return DataFrame( pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows) ) def to_records(self, index=True, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')]) """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args ) def copy(self, deep=None) -> "DataFrame": """ Make a copy of this object's indices and data. Parameters ---------- deep : None this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : DataFrame Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df_copy = df.copy() >>> df_copy x y z w 0 1 3 5 7 1 2 4 6 8 """ return DataFrame(self._internal) def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, "inplace") if thresh is None: if how is None: raise TypeError("must specify how or thresh") elif how not in ("any", "all"): raise ValueError("invalid how option: {h}".format(h=how)) if subset is not None: if isinstance(subset, str): labels = [(subset,)] elif isinstance(subset, tuple): labels = [subset] else: labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] else: labels = None if axis == 0: if labels is not None: invalids = [label for label in labels if label not in self._internal.column_labels] if len(invalids) > 0: raise KeyError(invalids) else: labels = self._internal.column_labels cnt = reduce( lambda x, y: x + y, [ F.when(self._kser_for(label).notna().spark.column, 1).otherwise(0) for label in labels ], F.lit(0), ) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == "any": pred = cnt == F.lit(len(labels)) elif how == "all": pred = cnt > F.lit(0) internal = self._internal.with_filter(pred) if inplace: self._update_internal_frame(internal) else: return DataFrame(internal) else: assert axis == 1 internal = self._internal.resolved_copy if labels is not None: if any(len(lbl) != len(internal.index_map) for lbl in labels): raise ValueError( "The length of each subset must be the same as the index size." ) cond = reduce( lambda x, y: x | y, [ reduce( lambda x, y: x & y, [ scol == F.lit(l) for l, scol in zip(lbl, internal.index_spark_columns) ], ) for lbl in labels ], ) internal = internal.with_filter(cond) null_counts = [] for label in internal.column_labels: scol = internal.spark_column_for(label) if isinstance(internal.spark_type_for(label), (FloatType, DoubleType)): cond = scol.isNull() | F.isnan(scol) else: cond = scol.isNull() null_counts.append( F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label)) ) counts = internal.spark_frame.select(null_counts + [F.count("*")]).head() if thresh is not None: column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) >= int(thresh) ] elif how == "any": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) == counts[-1] ] elif how == "all": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0 ] kdf = self[column_labels] if inplace: self._update_internal_frame(kdf._internal) else: return kdf # TODO: add 'limit' when value parameter exists def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None): """Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is not None: if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value)) if limit is not None: raise ValueError("limit parameter for value is not support now") if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v)) value = {k if isinstance(k, tuple) else (k,): v for k, v in value.items()} def op(kser): label = kser._internal.column_labels[0] for k, v in value.items(): if k == label[: len(k)]: return kser._fillna( value=value[k], method=method, axis=axis, limit=limit ) else: return kser else: op = lambda kser: kser._fillna(value=value, method=method, axis=axis, limit=limit) elif method is not None: op = lambda kser: kser._fillna(value=value, method=method, axis=axis, limit=limit) else: raise ValueError("Must specify a fillna 'value' or 'method' parameter.") kdf = self._apply_series_op(op, should_resolve=(method is not None)) inplace = validate_bool_kwarg(inplace, "inplace") if inplace: self._update_internal_frame(kdf._internal, requires_same_anchor=False) else: return kdf def replace( self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method="pad", ): """ Returns a new DataFrame replacing a value with another value. Parameters ---------- to_replace : int, float, string, list or dict Value to be replaced. value : int, float, string, or list Value to use to replace holes. The replacement value must be an int, float, or string. If value is a list, value should be of the same length with to_replace. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame Object after replacement. Examples -------- >>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'], ... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']}, ... columns=['name', 'weapon']) >>> df name weapon 0 Ironman Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash Scalar `to_replace` and `value` >>> df.replace('Ironman', 'War-Machine') name weapon 0 War-Machine Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash List like `to_replace` and `value` >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True) >>> df name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Mjolnir 3 Hulk Smash Dicts can be used to specify different replacement values for different existing values To use a dict in this way the value parameter should be None >>> df.replace({'Mjolnir': 'Stormbuster'}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Dict can specify that different values should be replaced in different columns The value parameter should not be None in this case >>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster') name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Nested dictionaries The value parameter should be None to use a nested dict in this way >>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash """ if method != "pad": raise NotImplementedError("replace currently works only for method='pad") if limit is not None: raise NotImplementedError("replace currently works only when limit=None") if regex is not False: raise NotImplementedError("replace currently doesn't supports regex") inplace = validate_bool_kwarg(inplace, "inplace") if value is not None and not isinstance(value, (int, float, str, list, dict)): raise TypeError("Unsupported type {}".format(type(value))) if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)): raise TypeError("Unsupported type {}".format(type(to_replace))) if isinstance(value, list) and isinstance(to_replace, list): if len(value) != len(to_replace): raise ValueError("Length of to_replace and value must be same") if isinstance(to_replace, dict) and ( value is not None or all(isinstance(i, dict) for i in to_replace.values()) ): def op(kser): if kser.name in to_replace: return kser.replace(to_replace=to_replace[kser.name], value=value, regex=regex) else: return kser else: op = lambda kser: kser.replace(to_replace=to_replace, value=value, regex=regex) kdf = self._apply_series_op(op) if inplace: self._update_internal_frame(kdf._internal) else: return kdf def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame": """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types. """ if is_list_like(lower) or is_list_like(upper): raise ValueError( "List-like value are not supported for 'lower' and 'upper' at the " + "moment" ) if lower is None and upper is None: return self return self._apply_series_op(lambda kser: kser.clip(lower=lower, upper=upper)) def head(self, n: int = 5) -> "DataFrame": """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ if n < 0: n = len(self) + n if n <= 0: return DataFrame(self._internal.with_filter(F.lit(False))) else: sdf = self._internal.resolved_copy.spark_frame if get_option("compute.ordered_head"): sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME) return DataFrame(self._internal.with_new_sdf(sdf.limit(n))) def pivot_table(self, values=None, index=None, columns=None, aggfunc="mean", fill_value=None): """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- values : column to aggregate. They should be either a list less than three or a string. index : column (string) or list of columns If an array is passed, it must be the same length as the data. The list should contain string. columns : column Columns used in the pivot operation. Only one column is supported and it should be a string. aggfunc : function (string), dict, default mean If dict is passed, the resulting pivot table will have columns concatenated by "_" where the first part is the value of columns and the second part is the column name in values If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with. Returns ------- table : DataFrame Examples -------- >>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}, ... columns=['A', 'B', 'C', 'D', 'E']) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum') >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4.0 5 two 7.0 6 foo one 4.0 1 two NaN 6 We can also fill missing values using the `fill_value` parameter. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum', fill_value=0) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 We can also calculate multiple types of aggregations for any given value column. >>> table = df.pivot_table(values=['D'], index =['C'], ... columns="A", aggfunc={'D': 'mean'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D A bar foo C large 5.5 2.000000 small 5.5 2.333333 The next example aggregates on multiple values. >>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'], ... aggfunc={'D': 'mean', 'E': 'sum'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D E A bar foo bar foo C large 5.5 2.000000 15 9 small 5.5 2.333333 17 13 """ if not isinstance(columns, (str, tuple)): raise ValueError("columns should be string or tuple.") if not isinstance(values, (str, tuple)) and not isinstance(values, list): raise ValueError("values should be string or list of one column.") if not isinstance(aggfunc, str) and ( not isinstance(aggfunc, dict) or not all( isinstance(key, (str, tuple)) and isinstance(value, str) for key, value in aggfunc.items() ) ): raise ValueError( "aggfunc must be a dict mapping from column name (string or tuple) " "to aggregate functions (string)." ) if isinstance(aggfunc, dict) and index is None: raise NotImplementedError( "pivot_table doesn't support aggfunc" " as dict and without index." ) if isinstance(values, list) and index is None: raise NotImplementedError("values can't be a list without index.") if columns not in self.columns: raise ValueError("Wrong columns {}.".format(columns)) if isinstance(columns, str): columns = (columns,) if isinstance(values, list): values = [col if isinstance(col, tuple) else (col,) for col in values] if not all( isinstance(self._internal.spark_type_for(col), NumericType) for col in values ): raise TypeError("values should be a numeric type.") else: values = values if isinstance(values, tuple) else (values,) if not isinstance(self._internal.spark_type_for(values), NumericType): raise TypeError("values should be a numeric type.") if isinstance(aggfunc, str): if isinstance(values, list): agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(value), aggfunc ) ) for value in values ] else: agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(values), aggfunc ) ) ] elif isinstance(aggfunc, dict): aggfunc = { key if isinstance(key, tuple) else (key,): value for key, value in aggfunc.items() } agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value) ) for key, value in aggfunc.items() ] agg_columns = [key for key, _ in aggfunc.items()] if set(agg_columns) != set(values): raise ValueError("Columns in aggfunc must be the same as values.") sdf = self._internal.resolved_copy.spark_frame if index is None: sdf = ( sdf.groupBy() .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) elif isinstance(index, list): index = [label if isinstance(label, tuple) else (label,) for label in index] sdf = ( sdf.groupBy([self._internal.spark_column_name_for(label) for label in index]) .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) else: raise ValueError("index should be a None or a list of columns.") if fill_value is not None and isinstance(fill_value, (int, float)): sdf = sdf.fillna(fill_value) if index is not None: if isinstance(values, list): index_columns = [self._internal.spark_column_name_for(label) for label in index] data_columns = [column for column in sdf.columns if column not in index_columns] if len(values) > 1: # If we have two values, Spark will return column's name # in this format: column_values, where column contains # their values in the DataFrame and values is # the column list passed to the pivot_table(). # E.g. if column is b and values is ['b','e'], # then ['2_b', '2_e', '3_b', '3_e']. # We sort the columns of Spark DataFrame by values. data_columns.sort(key=lambda x: x.split("_", 1)[1]) sdf = sdf.select(index_columns + data_columns) column_name_to_index = dict( zip(self._internal.data_spark_column_names, self._internal.column_labels) ) column_labels = [ tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]]) for name in data_columns ] index_map = OrderedDict(zip(index_columns, index)) column_label_names = ([None] * column_labels_level(values)) + [ str(columns) if len(columns) > 1 else columns[0] ] internal = InternalFrame( spark_frame=sdf, index_map=index_map, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) kdf = DataFrame(internal) else: column_labels = [tuple(list(values[0]) + [column]) for column in data_columns] index_map = OrderedDict(zip(index_columns, index)) column_label_names = ([None] * len(values[0])) + [ str(columns) if len(columns) > 1 else columns[0] ] internal = InternalFrame( spark_frame=sdf, index_map=index_map, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) kdf = DataFrame(internal) return kdf else: index_columns = [self._internal.spark_column_name_for(label) for label in index] index_map = OrderedDict(zip(index_columns, index)) column_label_names = [str(columns) if len(columns) > 1 else columns[0]] internal = InternalFrame( spark_frame=sdf, index_map=index_map, column_label_names=column_label_names ) return DataFrame(internal) else: if isinstance(values, list): index_values = values[-1] else: index_values = values index_map = OrderedDict() for i, index_value in enumerate(index_values): colname = SPARK_INDEX_NAME_FORMAT(i) sdf = sdf.withColumn(colname, F.lit(index_value)) index_map[colname] = None column_label_names = [str(columns) if len(columns) > 1 else columns[0]] internal = InternalFrame( spark_frame=sdf, index_map=index_map, column_label_names=column_label_names ) return DataFrame(internal) def pivot(self, index=None, columns=None, values=None): """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation. Parameters ---------- index : string, optional Column to use to make new frame's index. If None, uses existing index. columns : string Column to use to make new frame's columns. values : string, object or a list of the previous Column(s) to use for populating new frame's values. Returns ------- DataFrame Returns reshaped DataFrame. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. Examples -------- >>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}, ... columns=['foo', 'bar', 'baz', 'zoo']) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE bar A B C 0 1.0 NaN NaN 1 NaN 2.0 NaN 2 NaN NaN 3.0 3 4.0 NaN NaN 4 NaN 5.0 NaN 5 NaN NaN 6.0 Notice that, unlike pandas raises an ValueError when duplicated values are found, Koalas' pivot still works with its first value it meets during operation because pivot is an expensive operation and it is preferred to permissively execute over failing fast when processing large data. >>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz']) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1.0 NaN NaN two NaN 3.0 4.0 It also support multi-index and multi-index column. >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')]) >>> df = df.set_index(('a', 'bar'), append=True) >>> df # doctest: +NORMALIZE_WHITESPACE a b foo baz (a, bar) 0 A one 1 1 A one 2 2 B two 3 3 C two 4 >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index() ... # doctest: +NORMALIZE_WHITESPACE ('a', 'foo') one two (a, bar) 0 A 1.0 NaN 1 A 2.0 NaN 2 B NaN 3.0 3 C NaN 4.0 """ if columns is None: raise ValueError("columns should be set.") if values is None: raise ValueError("values should be set.") should_use_existing_index = index is not None if should_use_existing_index: df = self index = [index] else: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: len(self._internal.index_spark_column_names)] df = df.pivot_table(index=index, columns=columns, values=values, aggfunc="first") if should_use_existing_index: return df else: index_columns = df._internal.index_spark_column_names internal = df._internal.copy( index_map=OrderedDict( (index_column, name) for index_column, name in zip(index_columns, self._internal.index_names) ) ) return DataFrame(internal) @property def columns(self): """The column labels of the DataFrame.""" if self._internal.column_labels_level > 1: columns = pd.MultiIndex.from_tuples(self._internal.column_labels) else: columns = pd.Index([label[0] for label in self._internal.column_labels]) if self._internal.column_label_names is not None: columns.names = self._internal.column_label_names return columns @columns.setter def columns(self, columns): if isinstance(columns, pd.MultiIndex): column_labels = columns.tolist() old_names = self._internal.column_labels if len(old_names) != len(column_labels): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(column_labels)) ) column_label_names = columns.names else: old_names = self._internal.column_labels if len(old_names) != len(columns): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(columns)) ) column_labels = [col if isinstance(col, tuple) else (col,) for col in columns] if isinstance(columns, pd.Index): column_label_names = columns.names else: column_label_names = None data_columns = [name_like_string(label) for label in column_labels] data_spark_columns = [ self._internal.spark_column_for(label).alias(name) for label, name in zip(self._internal.column_labels, data_columns) ] self._update_internal_frame( self._internal.with_new_columns( data_spark_columns, column_labels=column_labels, column_label_names=column_label_names, ) ) @property def dtypes(self): """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series( [self._kser_for(label).dtype for label in self._internal.column_labels], index=pd.Index( [label if len(label) > 1 else label[0] for label in self._internal.column_labels] ), ) def spark_schema(self, index_col: Optional[Union[str, List[str]]] = None): warnings.warn( "DataFrame.spark_schema is deprecated as of DataFrame.spark.schema. " "Please use the API instead.", FutureWarning, ) return self.spark.schema(index_col) spark_schema.__doc__ = SparkFrameMethods.schema.__doc__ def print_schema(self, index_col: Optional[Union[str, List[str]]] = None): warnings.warn( "DataFrame.print_schema is deprecated as of DataFrame.spark.print_schema. " "Please use the API instead.", FutureWarning, ) return self.spark.print_schema(index_col) print_schema.__doc__ = SparkFrameMethods.print_schema.__doc__ def select_dtypes(self, include=None, exclude=None): """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. It also takes Spark SQL DDL type strings, for instance, 'string' and 'date'. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes() Traceback (most recent call last): ... ValueError: at least one of include or exclude must be nonempty * If ``include`` and ``exclude`` have overlapping elements >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes(include='a', exclude='a') Traceback (most recent call last): ... TypeError: string dtypes are not allowed, use 'object' instead Notes ----- * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` Examples -------- >>> df = ks.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3, ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 True 1.0 a 1 2 False 2.0 b 2 1 True 1.0 a 3 2 False 2.0 b 4 1 True 1.0 a 5 2 False 2.0 b >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64'], exclude=['int']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c d 0 True 1.0 a 1 False 2.0 b 2 True 1.0 a 3 False 2.0 b 4 True 1.0 a 5 False 2.0 b Spark SQL DDL type strings can be used as well. >>> df.select_dtypes(exclude=['string']) a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 """ from pyspark.sql.types import _parse_datatype_string if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () if not any((include, exclude)): raise ValueError("at least one of include or exclude must be " "nonempty") # can't both include AND exclude! if set(include).intersection(set(exclude)): raise ValueError( "include and exclude overlap on {inc_ex}".format( inc_ex=set(include).intersection(set(exclude)) ) ) # Handle Spark types include_spark_type = [] for inc in include: try: include_spark_type.append(_parse_datatype_string(inc)) except: pass exclude_spark_type = [] for exc in exclude: try: exclude_spark_type.append(_parse_datatype_string(exc)) except: pass # Handle pandas types include_numpy_type = [] for inc in include: try: include_numpy_type.append(infer_dtype_from_object(inc)) except: pass exclude_numpy_type = [] for exc in exclude: try: exclude_numpy_type.append(infer_dtype_from_object(exc)) except: pass column_labels = [] for label in self._internal.column_labels: if len(include) > 0: should_include = ( infer_dtype_from_object(self._kser_for(label).dtype.name) in include_numpy_type or self._internal.spark_type_for(label) in include_spark_type ) else: should_include = not ( infer_dtype_from_object(self._kser_for(label).dtype.name) in exclude_numpy_type or self._internal.spark_type_for(label) in exclude_spark_type ) if should_include: column_labels.append(label) data_spark_columns = [self._internal.spark_column_for(label) for label in column_labels] return DataFrame( self._internal.with_new_columns(data_spark_columns, column_labels=column_labels) ) def count(self, axis=None): """ Count non-NA cells for each column. The values `None`, `NaN` are considered NA. Parameters ---------- axis : {0 or ‘index’, 1 or ‘columns’}, default 0 If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are generated for each row. Returns ------- pandas.Series See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = ks.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}, ... columns=["Person", "Age", "Single"]) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 Name: 0, dtype: int64 >>> df.count(axis=1) 0 3 1 2 2 3 3 3 4 3 Name: 0, dtype: int64 """ return self._reduce_for_stat_function( Frame._count_expr, name="count", axis=axis, numeric_only=False ) def droplevel(self, level, axis=0) -> "DataFrame": """ Return DataFrame with requested index / column level(s) removed. Parameters ---------- level: int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis: {0 or ‘index’, 1 or ‘columns’}, default 0 Returns ------- DataFrame with requested index / column level(s) removed. Examples -------- >>> df = ks.DataFrame( ... [[3, 4], [7, 8], [11, 12]], ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]), ... ) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ axis = validate_axis(axis) kdf = self.copy() if axis == 0: if not isinstance(level, (tuple, list)): if not isinstance(level, (str, int)): raise KeyError("Level {} not found".format(level)) level = [level] spark_frame = self._internal.spark_frame index_map = self._internal.index_map.copy() index_names = self.index.names nlevels = self.index.nlevels int_levels = list() for n in level: if isinstance(n, (str, tuple)): if n not in index_names: raise KeyError("Level {} not found".format(n)) n = index_names.index(n) elif isinstance(n, int): if n < 0: n = n + nlevels if n < 0: raise IndexError( "Too many levels: Index has only {} levels, " "{} is not a valid level number".format(nlevels, (n - nlevels)) ) if n >= nlevels: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( nlevels, (n + 1) ) ) int_levels.append(n) if len(int_levels) >= nlevels: raise ValueError( "Cannot remove {} levels from an index with {} levels: " "at least one level must be left.".format(len(int_levels), nlevels) ) for int_level in int_levels: index_spark_column = self._internal.index_spark_column_names[int_level] spark_frame = spark_frame.drop(index_spark_column) index_map.pop(index_spark_column) internal = self._internal.copy(spark_frame=spark_frame, index_map=index_map) kdf = DataFrame(internal) elif axis == 1: names = self.columns.names nlevels = self.columns.nlevels if not isinstance(level, (tuple, list)): level = [level] for n in level: if isinstance(n, int) and (n > nlevels - 1): raise IndexError( "Too many levels: Column has only {} levels, not {}".format(nlevels, n + 1) ) if isinstance(n, (str, tuple)) and (n not in names): raise KeyError("Level {} not found".format(n)) if len(level) >= nlevels: raise ValueError( "Cannot remove {} levels from an index with {} " "levels: at least one level must be " "left.".format(len(level), nlevels) ) kdf.columns = kdf.columns.droplevel(level) return kdf def drop( self, labels=None, axis=1, columns: Union[str, Tuple[str, ...], List[str], List[Tuple[str, ...]]] = None, ): """ Drop specified labels from columns. Remove columns by specifying label names and axis=1 or columns. When specifying both labels and columns, only labels will be dropped. Removing rows is yet to be implemented. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {1 or 'columns'}, default 1 .. dropna currently only works for axis=1 'columns' axis=0 is yet to be implemented. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('x', axis=1) y z w 0 3 5 7 1 4 6 8 >>> df.drop(['y', 'z'], axis=1) x w 0 1 7 1 2 8 >>> df.drop(columns=['y', 'z']) x w 0 1 7 1 2 8 Also support for MultiIndex >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df # doctest: +NORMALIZE_WHITESPACE a b x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE b z w 0 5 7 1 6 8 Notes ----- Currently only axis = 1 is supported in this function, axis = 0 is yet to be implemented. """ if labels is not None: axis = validate_axis(axis) if axis == 1: return self.drop(columns=labels) raise NotImplementedError("Drop currently only works for axis=1") elif columns is not None: if isinstance(columns, str): columns = [(columns,)] # type: ignore elif isinstance(columns, tuple): columns = [columns] else: columns = [ # type: ignore col if isinstance(col, tuple) else (col,) for col in columns # type: ignore ] drop_column_labels = set( label for label in self._internal.column_labels for col in columns if label[: len(col)] == col ) if len(drop_column_labels) == 0: raise KeyError(columns) cols, labels = zip( *( (column, label) for column, label in zip( self._internal.data_spark_column_names, self._internal.column_labels ) if label not in drop_column_labels ) ) data_spark_columns = [self._internal.spark_column_for(label) for label in labels] internal = self._internal.with_new_columns( data_spark_columns, column_labels=list(labels) ) return DataFrame(internal) else: raise ValueError("Need to specify at least one of 'labels' or 'columns'") def _sort( self, by: List[Column], ascending: Union[bool, List[bool]], inplace: bool, na_position: str ): if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError( "Length of ascending ({}) != length of by ({})".format(len(ascending), len(by)) ) if na_position not in ("first", "last"): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847. mapper = { (True, "first"): lambda x: Column(getattr(x._jc, "asc_nulls_first")()), (True, "last"): lambda x: Column(getattr(x._jc, "asc_nulls_last")()), (False, "first"): lambda x: Column(getattr(x._jc, "desc_nulls_first")()), (False, "last"): lambda x: Column(getattr(x._jc, "desc_nulls_last")()), } by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)] sdf = self._internal.resolved_copy.spark_frame.sort(*(by + [NATURAL_ORDER_COLUMN_NAME])) kdf = DataFrame(self._internal.with_new_sdf(sdf)) # type: ks.DataFrame if inplace: self._update_internal_frame(kdf._internal) return None else: return kdf def sort_values( self, by: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]], ascending: Union[bool, List[bool]] = True, inplace: bool = False, na_position: str = "last", ) -> Optional["DataFrame"]: """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df col1 col2 col3 0 A 2 0 1 B 9 9 2 None 8 4 3 D 7 2 4 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 B 9 9 4 C 4 3 3 D 7 2 2 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 3 D 7 2 4 C 4 3 1 B 9 9 0 A 2 0 2 None 8 4 Sort by multiple columns >>> df = ks.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(by, (str, tuple)): by = [by] # type: ignore else: by = [b if isinstance(b, tuple) else (b,) for b in by] # type: ignore new_by = [] for colname in by: ser = self[colname] if not isinstance(ser, ks.Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(colname) ) new_by.append(ser.spark.column) return self._sort(by=new_by, ascending=ascending, inplace=inplace, na_position=na_position) def sort_index( self, axis: int = 0, level: Optional[Union[int, List[int]]] = None, ascending: bool = True, inplace: bool = False, kind: str = None, na_position: str = "last", ) -> Optional["DataFrame"]: """ Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None Koalas does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan]) >>> df.sort_index() A a 1.0 b 2.0 NaN NaN >>> df.sort_index(ascending=False) A b 2.0 a 1.0 NaN NaN >>> df.sort_index(na_position='first') A NaN NaN a 1.0 b 2.0 >>> df.sort_index(inplace=True) >>> df A a 1.0 b 2.0 NaN NaN >>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]}, ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], ... columns=['A', 'B']) >>> df.sort_index() A B a 0 3 0 1 2 1 b 0 1 2 1 0 3 >>> df.sort_index(level=1) # doctest: +SKIP A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 >>> df.sort_index(level=[1, 0]) A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = validate_axis(axis) if axis != 0: raise NotImplementedError("No other axis than 0 are supported at the moment") if kind is not None: raise NotImplementedError( "Specifying the sorting algorithm is not supported at the moment." ) if level is None or (is_list_like(level) and len(level) == 0): # type: ignore by = self._internal.index_spark_columns elif is_list_like(level): by = [self._internal.index_spark_columns[l] for l in level] # type: ignore else: by = [self._internal.index_spark_columns[level]] return self._sort(by=by, ascending=ascending, inplace=inplace, na_position=na_position) # TODO: add keep = First def nlargest(self, n: int, columns: "Any") -> "DataFrame": """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in pandas. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 """ kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame] assert kdf is not None return kdf.head(n=n) # TODO: add keep = First def nsmallest(self, n: int, columns: "Any") -> "DataFrame": """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 """ kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame] assert kdf is not None return kdf.head(n=n) def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns)) ) data_spark_columns = [] if isinstance(values, dict): for i, col in enumerate(self.columns): if col in values: data_spark_columns.append( self._internal.spark_column_for(self._internal.column_labels[i]) .isin(values[col]) .alias(self._internal.data_spark_column_names[i]) ) else: data_spark_columns.append( F.lit(False).alias(self._internal.data_spark_column_names[i]) ) elif is_list_like(values): data_spark_columns += [ self._internal.spark_column_for(label) .isin(list(values)) .alias(self._internal.spark_column_name_for(label)) for label in self._internal.column_labels ] else: raise TypeError("Values should be iterable, Series, DataFrame or dict.") return DataFrame(self._internal.with_new_columns(data_spark_columns)) @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge( self, right: "DataFrame", how: str = "inner", on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, left_on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, right_on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ("_x", "_y"), ) -> "DataFrame": """ Merge DataFrame objects with a database-style join. The index of the resulting DataFrame will be one of the following: - 0...n if no index is used for merging - Index of the left DataFrame if merged only on the index of the right DataFrame - Index of the right DataFrame if merged only on the index of the left DataFrame - All involved indices if merged using the indices of both DataFrames e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will be an index (x, a, b) Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {'left', 'right', 'outer', 'inner'}, default 'inner' left: use only keys from left frame, similar to a SQL left outer join; preserve key order. right: use only keys from right frame, similar to a SQL right outer join; preserve key order. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on: Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on: Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- DataFrame.join : Join columns of another DataFrame. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}, ... columns=['lkey', 'value']) >>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}, ... columns=['rkey', 'value']) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey') >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS lkey value_x rkey value_y ...bar 2 bar 6 ...baz 3 baz 7 ...foo 1 foo 5 ...foo 1 foo 8 ...foo 5 foo 5 ...foo 5 foo 8 >>> left_kdf = ks.DataFrame({'A': [1, 2]}) >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_kdf.merge(right_kdf, left_index=True, right_index=True).sort_index() A B 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left').sort_index() A B 0 1 None 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right').sort_index() A B 1 2.0 x 2 NaN y >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer').sort_index() A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ def to_list( os: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] ) -> List[Tuple[str, ...]]: if os is None: return [] elif isinstance(os, tuple): return [os] elif isinstance(os, str): return [(os,)] else: return [o if isinstance(o, tuple) else (o,) for o in os] # type: ignore if isinstance(right, ks.Series): right = right.to_frame() if on: if left_on or right_on: raise ValueError( 'Can only pass argument "on" OR "left_on" and "right_on", ' "not a combination of both." ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(on))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(on))) else: # TODO: need special handling for multi-index. if left_index: left_key_names = self._internal.index_spark_column_names else: left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on))) if right_index: right_key_names = right._internal.index_spark_column_names else: right_key_names = list( map(right._internal.spark_column_name_for, to_list(right_on)) ) if left_key_names and not right_key_names: raise ValueError("Must pass right_on or right_index=True") if right_key_names and not left_key_names: raise ValueError("Must pass left_on or left_index=True") if not left_key_names and not right_key_names: common = list(self.columns.intersection(right.columns)) if len(common) == 0: raise ValueError( "No common columns to perform merge on. Merge options: " "left_on=None, right_on=None, left_index=False, right_index=False" ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(common))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(common))) if len(left_key_names) != len(right_key_names): # type: ignore raise ValueError("len(left_keys) must equal len(right_keys)") if how == "full": warnings.warn( "Warning: While Koalas will accept 'full', you should use 'outer' " + "instead to be compatible with the pandas merge API", UserWarning, ) if how == "outer": # 'outer' in pandas equals 'full' in Spark how = "full" if how not in ("inner", "left", "right", "full"): raise ValueError( "The 'how' parameter has to be amongst the following values: ", "['inner', 'left', 'right', 'outer']", ) left_table = self._internal.resolved_copy.spark_frame.alias("left_table") right_table = right._internal.resolved_copy.spark_frame.alias("right_table") left_key_columns = [ # type: ignore scol_for(left_table, label) for label in left_key_names ] right_key_columns = [ # type: ignore scol_for(right_table, label) for label in right_key_names ] join_condition = reduce( lambda x, y: x & y, [lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)], ) joined_table = left_table.join(right_table, join_condition, how=how) # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = set(self._internal.column_labels) & set(right._internal.column_labels) exprs = [] data_columns = [] column_labels = [] left_scol_for = lambda label: scol_for( left_table, self._internal.spark_column_name_for(label) ) right_scol_for = lambda label: scol_for( right_table, right._internal.spark_column_name_for(label) ) for label in self._internal.column_labels: col = self._internal.spark_column_name_for(label) scol = left_scol_for(label) if label in duplicate_columns: spark_column_name = self._internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and spark_column_name in right_key_names ): # type: ignore right_scol = right_scol_for(label) if how == "right": scol = right_scol elif how == "full": scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col) else: pass else: col = col + left_suffix scol = scol.alias(col) label = tuple([label[0] + left_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) for label in right._internal.column_labels: col = right._internal.spark_column_name_for(label) scol = right_scol_for(label) if label in duplicate_columns: spark_column_name = self._internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and spark_column_name in right_key_names ): # type: ignore continue else: col = col + right_suffix scol = scol.alias(col) label = tuple([label[0] + right_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) left_index_scols = self._internal.index_spark_columns right_index_scols = right._internal.index_spark_columns # Retain indices if they are used for joining if left_index: if right_index: if how in ("inner", "left"): exprs.extend(left_index_scols) index_map = self._internal.index_map elif how == "right": exprs.extend(right_index_scols) index_map = right._internal.index_map else: index_map = OrderedDict() for (col, name), left_scol, right_scol in zip( self._internal.index_map.items(), left_index_scols, right_index_scols ): scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol) exprs.append(scol.alias(col)) index_map[col] = name else: exprs.extend(right_index_scols) index_map = right._internal.index_map elif right_index: exprs.extend(left_index_scols) index_map = self._internal.index_map else: index_map = OrderedDict() selected_columns = joined_table.select(*exprs) internal = InternalFrame( spark_frame=selected_columns, index_map=index_map if index_map else None, column_labels=column_labels, data_spark_columns=[scol_for(selected_columns, col) for col in data_columns], ) return DataFrame(internal) def join( self, right: "DataFrame", on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, how: str = "left", lsuffix: str = "", rsuffix: str = "", ) -> "DataFrame": """ Join columns of another DataFrame. Join columns with `right` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- right: DataFrame, Series on: str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `right`, otherwise joins index-on-index. If multiple values given, the `right` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how: {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use `left` frame’s index (or column if on is specified). * right: use `right`’s index. * outer: form union of `left` frame’s index (or column if on is specified) with right’s index, and sort it. lexicographically. * inner: form intersection of `left` frame’s index (or column if on is specified) with `right`’s index, preserving the order of the `left`’s one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from `right` frame's overlapping columns. Returns ------- DataFrame A dataframe containing columns from both the `left` and `right`. See Also -------- DataFrame.merge: For column(s)-on-columns(s) operations. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Notes ----- Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame objects. Examples -------- >>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], ... 'A': ['A0', 'A1', 'A2', 'A3']}, ... columns=['key', 'A']) >>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}, ... columns=['key', 'B']) >>> kdf1 key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 >>> kdf2 key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right') >>> join_kdf.sort_values(by=join_kdf.columns) key_left A key_right B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 None None If we want to join using the key columns, we need to set key to be the index in both df and right. The joined DataFrame will have key as its index. >>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key')) >>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 None Another option to join using the key columns is to use the on parameter. DataFrame.join always uses right’s index but we can use any column in df. This method preserves the original DataFrame’s index in the result. >>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key') >>> join_kdf.index Int64Index([0, 1, 2, 3], dtype='int64') """ if isinstance(right, ks.Series): common = list(self.columns.intersection([right.name])) else: common = list(self.columns.intersection(right.columns)) if len(common) > 0 and not lsuffix and not rsuffix: raise ValueError( "columns overlap but no suffix specified: " "{rename}".format(rename=common) ) if on: self = self.set_index(on) join_kdf = self.merge( right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix) ).reset_index() else: join_kdf = self.merge( right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix) ) return join_kdf def append( self, other: "DataFrame", ignore_index: bool = False, verify_integrity: bool = False, sort: bool = False, ) -> "DataFrame": """ Append rows of other to the end of caller, returning a new object. Columns in other that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default False Currently not supported. Returns ------- appended : DataFrame Examples -------- >>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df.append(df) A B 0 1 2 1 3 4 0 1 2 1 3 4 >>> df.append(df, ignore_index=True) A B 0 1 2 1 3 4 2 1 2 3 3 4 """ if isinstance(other, ks.Series): raise ValueError("DataFrames.append() does not support appending Series to DataFrames") if sort: raise NotImplementedError("The 'sort' parameter is currently not supported") if not ignore_index: index_scols = self._internal.index_spark_columns if len(index_scols) != len(other._internal.index_spark_columns): raise ValueError("Both DataFrames have to have the same number of index levels") if verify_integrity and len(index_scols) > 0: if ( self._internal.spark_frame.select(index_scols) .intersect( other._internal.spark_frame.select(other._internal.index_spark_columns) ) .count() ) > 0: raise ValueError("Indices have overlapping values") # Lazy import to avoid circular dependency issues from databricks.koalas.namespace import concat return concat([self, other], ignore_index=ignore_index) # TODO: add 'filter_func' and 'errors' parameter def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or Series join : 'left', default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. Returns ------- None : method directly changes calling object See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. DataFrame.join : Join columns of another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df.sort_index() A B 0 a d 1 b y 2 c e If `other` contains None the corresponding values are not updated in the original dataframe. >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4.0 1 2 500.0 2 3 6.0 """ if join != "left": raise NotImplementedError("Only left join is supported") if isinstance(other, ks.Series): other = other.to_frame() update_columns = list( set(self._internal.column_labels).intersection(set(other._internal.column_labels)) ) update_sdf = self.join( other[update_columns], rsuffix="_new" )._internal.resolved_copy.spark_frame for column_labels in update_columns: column_name = self._internal.spark_column_name_for(column_labels) old_col = scol_for(update_sdf, column_name) new_col = scol_for( update_sdf, other._internal.spark_column_name_for(column_labels) + "_new" ) if overwrite: update_sdf = update_sdf.withColumn( column_name, F.when(new_col.isNull(), old_col).otherwise(new_col) ) else: update_sdf = update_sdf.withColumn( column_name, F.when(old_col.isNull(), new_col).otherwise(old_col) ) sdf = update_sdf.select( [scol_for(update_sdf, col) for col in self._internal.spark_column_names] + list(HIDDEN_COLUMNS) ) internal = self._internal.with_new_sdf(sdf) self._update_internal_frame(internal, requires_same_anchor=False) def sample( self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None, ) -> "DataFrame": """ Return a random sample of items from an axis of object. Please call this function using named argument by specifying the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ... """ # Note: we don't run any of the doctests because the result can change depending on the # system's core count. if n is not None: raise NotImplementedError( "Function sample currently does not support specifying " "exact number of items to return. Use frac instead." ) if frac is None: raise ValueError("frac must be specified.") sdf = self._internal.resolved_copy.spark_frame.sample( withReplacement=replace, fraction=frac, seed=random_state ) return DataFrame(self._internal.with_new_sdf(sdf)) def astype(self, dtype) -> "DataFrame": """ Cast a Koalas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire Koalas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3 """ applied = [] if is_dict_like(dtype): for col_name in dtype.keys(): if col_name not in self.columns: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument." ) for col_name, col in self.items(): if col_name in dtype: applied.append(col.astype(dtype=dtype[col_name])) else: applied.append(col) else: for col_name, col in self.items(): applied.append(col.astype(dtype=dtype)) return DataFrame(self._internal.with_new_columns(applied)) def add_prefix(self, prefix): """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(prefix, str) return self._apply_series_op( lambda kser: kser.rename(tuple([prefix + i for i in kser._internal.column_labels[0]])) ) def add_suffix(self, suffix): """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(suffix, str) return self._apply_series_op( lambda kser: kser.rename(tuple([i + suffix for i in kser._internal.column_labels[0]])) ) # TODO: include, and exclude should be implemented. def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame": """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75] A list of percentiles to be computed. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``. Currently only numeric data is supported. Examples -------- Describing a numeric ``Series``. >>> s = ks.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: 0, dtype: float64 Describing a ``DataFrame``. Only numeric fields are returned. >>> df = ks.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0], ... 'object': ['a', 'b', 'c'] ... }, ... columns=['numeric1', 'numeric2', 'object']) >>> df.describe() numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 For multi-index columns: >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')] >>> df.describe() # doctest: +NORMALIZE_WHITESPACE num a b count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 >>> df[('num', 'b')].describe() count 3.0 mean 5.0 std 1.0 min 4.0 25% 4.0 50% 5.0 75% 6.0 max 6.0 Name: (num, b), dtype: float64 Describing a ``DataFrame`` and selecting custom percentiles. >>> df = ks.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0] ... }, ... columns=['numeric1', 'numeric2']) >>> df.describe(percentiles = [0.85, 0.15]) numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 15% 1.0 4.0 50% 2.0 5.0 85% 3.0 6.0 max 3.0 6.0 Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric1.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: numeric1, dtype: float64 Describing a column from a ``DataFrame`` by accessing it as an attribute and selecting custom percentiles. >>> df.numeric1.describe(percentiles = [0.85, 0.15]) count 3.0 mean 2.0 std 1.0 min 1.0 15% 1.0 50% 2.0 85% 3.0 max 3.0 Name: numeric1, dtype: float64 """ exprs = [] column_labels = [] for label in self._internal.column_labels: scol = self._internal.spark_column_for(label) spark_type = self._internal.spark_type_for(label) if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType): exprs.append( F.nanvl(scol, F.lit(None)).alias(self._internal.spark_column_name_for(label)) ) column_labels.append(label) elif isinstance(spark_type, NumericType): exprs.append(scol) column_labels.append(label) if len(exprs) == 0: raise ValueError("Cannot describe a DataFrame without columns") if percentiles is not None: if any((p < 0.0) or (p > 1.0) for p in percentiles): raise ValueError("Percentiles should all be in the interval [0, 1]") # appending 50% if not in percentiles already percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles else: percentiles = [0.25, 0.5, 0.75] formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)] stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"] sdf = self._internal.spark_frame.select(*exprs).summary(stats) sdf = sdf.replace("stddev", "std", subset="summary") internal = InternalFrame( spark_frame=sdf, index_map=OrderedDict({"summary": None}), column_labels=column_labels, data_spark_columns=[ scol_for(sdf, self._internal.spark_column_name_for(label)) for label in column_labels ], ) return DataFrame(internal).astype("float64") def drop_duplicates(self, subset=None, keep="first", inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy. Returns ------- DataFrame DataFrame with duplicates removed or None if ``inplace=True``. >>> df = ks.DataFrame( ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b']) >>> df a b 0 1 a 1 2 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates().sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates('a').sort_index() a b 0 1 a 1 2 a 4 3 d >>> df.drop_duplicates(['a', 'b']).sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep='last').sort_index() a b 0 1 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep=False).sort_index() a b 0 1 a 3 2 c 4 3 d """ inplace = validate_bool_kwarg(inplace, "inplace") sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.where(~scol_for(sdf, column)).drop(column) internal = self._internal.with_new_sdf(sdf) if inplace: self._update_internal_frame(internal) else: return DataFrame(internal) def reindex( self, labels: Optional[Any] = None, index: Optional[Any] = None, columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None, copy: Optional[bool] = True, fill_value: Optional[Any] = None, ) -> "DataFrame": """ Conform DataFrame to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- labels: array-like, optional New labels / index to conform the axis specified by ‘axis’ to. index, columns: array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data axis: int or str, optional Axis to target. Can be either the axis name (‘index’, ‘columns’) or number (0, 1). copy : bool, default True Return a new object, even if the passed indexes are the same. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. Returns ------- DataFrame with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = ks.DataFrame({ ... 'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, ... index=index, ... columns=['http_status', 'response_time']) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index).sort_index() http_status response_time Chrome 200.0 0.02 Comodo Dragon NaN NaN IE10 404.0 0.08 Iceweasel NaN NaN Safari 404.0 0.07 We can fill in the missing values by passing a value to the keyword ``fill_value``. >>> df.reindex(new_index, fill_value=0, copy=False).sort_index() http_status response_time Chrome 200 0.02 Comodo Dragon 0 0.00 IE10 404 0.08 Iceweasel 0 0.00 Safari 404 0.07 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']).sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, ... index=date_index) >>> df2.sort_index() prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2).sort_index() prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN """ if axis is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.") if labels is not None: axis = validate_axis(axis) if axis == 0: index = labels elif axis == 1: columns = labels else: raise ValueError("No axis named %s for object type %s." % (axis, type(axis))) if index is not None and not is_list_like(index): raise TypeError( "Index must be called with a collection of some kind, " "%s was passed" % type(index) ) if columns is not None and not is_list_like(columns): raise TypeError( "Columns must be called with a collection of some kind, " "%s was passed" % type(columns) ) df = self if index is not None: df = df._reindex_index(index) if columns is not None: df = df._reindex_columns(columns) # Process missing values. if fill_value is not None: df = df.fillna(fill_value) # Copy if copy: return df.copy() else: return df def _reindex_index(self, index): # When axis is index, we can mimic pandas' by a right outer join. assert ( len(self._internal.index_spark_column_names) <= 1 ), "Index should be single column or not set." index_column = self._internal.index_spark_column_names[0] kser = ks.Series(list(index)) labels = kser._internal.spark_frame.select(kser.spark.column.alias(index_column)) joined_df = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME).join( labels, on=index_column, how="right" ) internal = self._internal.with_new_sdf(joined_df) return DataFrame(internal) def _reindex_columns(self, columns): level = self._internal.column_labels_level if level > 1: label_columns = list(columns) for col in label_columns: if not isinstance(col, tuple): raise TypeError("Expected tuple, got {}".format(type(col))) else: label_columns = [(col,) for col in columns] for col in label_columns: if len(col) != level: raise ValueError( "shape (1,{}) doesn't match the shape (1,{})".format(len(col), level) ) scols, labels = [], [] for label in label_columns: if label in self._internal.column_labels: scols.append(self._internal.spark_column_for(label)) else: scols.append(F.lit(np.nan).alias(name_like_string(label))) labels.append(label) return DataFrame(self._internal.with_new_columns(scols, column_labels=labels)) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name="value"): """ Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar, default 'variable' Name to use for the 'variable' column. If None it uses `frame.columns.name` or ‘variable’. value_name : scalar, default 'value' Name to use for the 'value' column. Returns ------- DataFrame Unpivoted DataFrame. Examples -------- >>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}, ... columns=['A', 'B', 'C']) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> ks.melt(df) variable value 0 A a 1 B 1 2 C 2 3 A b 4 B 3 5 C 4 6 A c 7 B 5 8 C 6 >>> df.melt(id_vars='A') A variable value 0 a B 1 1 a C 2 2 b B 3 3 b C 4 4 c B 5 5 c C 6 >>> df.melt(value_vars='A') variable value 0 A a 1 A b 2 A c >>> ks.melt(df, id_vars=['A', 'B']) A B variable value 0 a 1 C 2 1 b 3 C 4 2 c 5 C 6 >>> df.melt(id_vars=['A'], value_vars=['C']) A variable value 0 a C 2 1 b C 4 2 c C 6 The names of 'variable' and 'value' columns can be customized: >>> ks.melt(df, id_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 """ column_labels = self._internal.column_labels if id_vars is None: id_vars = [] else: if isinstance(id_vars, str): id_vars = [(id_vars,)] elif isinstance(id_vars, tuple): if self._internal.column_labels_level == 1: id_vars = [idv if isinstance(idv, tuple) else (idv,) for idv in id_vars] else: raise ValueError( "id_vars must be a list of tuples" " when columns are a MultiIndex" ) else: id_vars = [idv if isinstance(idv, tuple) else (idv,) for idv in id_vars] non_existence_col = [idv for idv in id_vars if idv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'id_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if value_vars is None: value_vars = [] else: if isinstance(value_vars, str): value_vars = [(value_vars,)] elif isinstance(value_vars, tuple): if self._internal.column_labels_level == 1: value_vars = [ valv if isinstance(valv, tuple) else (valv,) for valv in value_vars ] else: raise ValueError( "value_vars must be a list of tuples" " when columns are a MultiIndex" ) else: value_vars = [valv if isinstance(valv, tuple) else (valv,) for valv in value_vars] non_existence_col = [valv for valv in value_vars if valv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'value_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if len(value_vars) == 0: value_vars = column_labels column_labels = [label for label in column_labels if label not in id_vars] sdf = self._internal.spark_frame if var_name is None: if self._internal.column_label_names is not None: var_name = self._internal.column_label_names elif self._internal.column_labels_level == 1: var_name = ["variable"] else: var_name = [ "variable_{}".format(i) for i in range(self._internal.column_labels_level) ] elif isinstance(var_name, str): var_name = [var_name] pairs = F.explode( F.array( *[ F.struct( *( [F.lit(c).alias(name) for c, name in zip(label, var_name)] + [self._internal.spark_column_for(label).alias(value_name)] ) ) for label in column_labels if label in value_vars ] ) ) columns = ( [ self._internal.spark_column_for(label).alias(name_like_string(label)) for label in id_vars ] + [F.col("pairs.%s" % name) for name in var_name[: self._internal.column_labels_level]] + [F.col("pairs.%s" % value_name)] ) exploded_df = sdf.withColumn("pairs", pairs).select(columns) return DataFrame(exploded_df) def stack(self): """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = ks.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack().sort_index() cat height 1 weight 0 dog height 3 weight 2 Name: 0, dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = ks.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack().sort_index() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = ks.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN """ from databricks.koalas.series import first_series if len(self._internal.column_labels) == 0: return DataFrame(self._internal.with_filter(F.lit(False))) column_labels = defaultdict(dict) index_values = set() should_returns_series = False for label in self._internal.column_labels: new_label = label[:-1] if len(new_label) == 0: new_label = (SPARK_DEFAULT_SERIES_NAME,) should_returns_series = True value = label[-1] scol = self._internal.spark_column_for(label) column_labels[new_label][value] = scol index_values.add(value) column_labels = OrderedDict(sorted(column_labels.items(), key=lambda x: x[0])) if self._internal.column_label_names is None: column_label_names = None index_name = None else: column_label_names = self._internal.column_label_names[:-1] if self._internal.column_label_names[-1] is None: index_name = None else: index_name = (self._internal.column_label_names[-1],) index_column = SPARK_INDEX_NAME_FORMAT(len(self._internal.index_map)) index_map = list(self._internal.index_map.items()) + [(index_column, index_name)] data_columns = [name_like_string(label) for label in column_labels] structs = [ F.struct( [F.lit(value).alias(index_column)] + [ ( column_labels[label][value] if value in column_labels[label] else F.lit(None) ).alias(name) for label, name in zip(column_labels, data_columns) ] ).alias(value) for value in index_values ] pairs = F.explode(F.array(structs)) sdf = self._internal.spark_frame.withColumn("pairs", pairs) sdf = sdf.select( self._internal.index_spark_columns + [sdf["pairs"][index_column].alias(index_column)] + [sdf["pairs"][name].alias(name) for name in data_columns] ) internal = InternalFrame( spark_frame=sdf, index_map=OrderedDict(index_map), column_labels=list(column_labels), data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) kdf = DataFrame(internal) if should_returns_series: return first_series(kdf) else: return kdf def unstack(self): """ Pivot the (necessarily hierarchical) index labels. Returns a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series. .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and it could cause a serious performance degradation since Spark partitions it row based. Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack). Examples -------- >>> df = ks.DataFrame({"A": {"0": "a", "1": "b", "2": "c"}, ... "B": {"0": "1", "1": "3", "2": "5"}, ... "C": {"0": "2", "1": "4", "2": "6"}}, ... columns=["A", "B", "C"]) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() A 0 a 1 b 2 c B 0 1 1 3 2 5 C 0 2 1 4 2 6 Name: 0, dtype: object >>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')]) >>> df.unstack().sort_index() X A 0 a 1 b 2 c B 0 1 1 3 2 5 Y C 0 2 1 4 2 6 Name: 0, dtype: object For MultiIndex case: >>> df = ks.DataFrame({"A": ["a", "b", "c"], ... "B": [1, 3, 5], ... "C": [2, 4, 6]}, ... columns=["A", "B", "C"]) >>> df = df.set_index('A', append=True) >>> df # doctest: +NORMALIZE_WHITESPACE B C A 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE B C A a b c a b c 0 1.0 NaN NaN 2.0 NaN NaN 1 NaN 3.0 NaN NaN 4.0 NaN 2 NaN NaN 5.0 NaN NaN 6.0 """ from databricks.koalas.series import first_series if len(self._internal.index_spark_column_names) > 1: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: len(self._internal.index_spark_column_names) - 1] columns = df.columns[len(self._internal.index_spark_column_names) - 1] df = df.pivot_table( index=index, columns=columns, values=self._internal.column_labels, aggfunc="first" ) internal = df._internal.copy( index_map=OrderedDict( (index_column, name) for index_column, name in zip( df._internal.index_spark_column_names, self._internal.index_names[:-1] ) ), column_label_names=( df._internal.column_label_names[:-1] + [ None if self._internal.index_names[-1] is None else df._internal.column_label_names[-1] ] ), ) return DataFrame(internal) # TODO: Codes here are similar with melt. Should we deduplicate? column_labels = self._internal.column_labels ser_name = SPARK_DEFAULT_SERIES_NAME sdf = self._internal.spark_frame new_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] new_index_map = [] if self._internal.column_label_names is not None: new_index_map.extend(zip(new_index_columns, self._internal.column_label_names)) else: new_index_map.extend(zip(new_index_columns, [None] * len(new_index_columns))) pairs = F.explode( F.array( *[ F.struct( *( [F.lit(c).alias(name) for c, name in zip(idx, new_index_columns)] + [self._internal.spark_column_for(idx).alias(ser_name)] ) ) for idx in column_labels ] ) ) columns = [ F.col("pairs.%s" % name) for name in new_index_columns[: self._internal.column_labels_level] ] + [F.col("pairs.%s" % ser_name)] new_index_len = len(new_index_columns) existing_index_columns = [] for i, index_name in enumerate(self._internal.index_names): new_index_map.append((SPARK_INDEX_NAME_FORMAT(i + new_index_len), index_name)) existing_index_columns.append( self._internal.index_spark_columns[i].alias( SPARK_INDEX_NAME_FORMAT(i + new_index_len) ) ) exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns) return first_series( DataFrame(InternalFrame(exploded_df, index_map=OrderedDict(new_index_map))) ) # TODO: axis, skipna, and many arguments should be implemented. def all(self, axis: Union[int, str] = 0) -> bool: """ Return whether all elements are True. Returns True unless there is at least one element within a series that is False or equivalent (e.g. zero or empty) Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Examples -------- Create a dataframe from a dictionary. >>> df = ks.DataFrame({ ... 'col1': [True, True, True], ... 'col2': [True, False, False], ... 'col3': [0, 0, 0], ... 'col4': [1, 2, 3], ... 'col5': [True, True, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return a boolean. >>> df.all() col1 True col2 False col3 False col4 True col5 True col6 False Name: all, dtype: bool Returns ------- Series """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') applied = [] column_labels = self._internal.column_labels for label in column_labels: scol = self._internal.spark_column_for(label) all_col = F.min(F.coalesce(scol.cast("boolean"), F.lit(True))) applied.append(F.when(all_col.isNull(), True).otherwise(all_col)) # TODO: there is a similar logic to transpose in, for instance, # DataFrame.any, Series.quantile. Maybe we should deduplicate it. value_column = "value" cols = [] for label, applied_col in zip(column_labels, applied): cols.append( F.struct( [F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] + [applied_col.alias(value_column)] ) ) sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select( F.explode(F.col("arrays")) ) sdf = sdf.selectExpr("col.*") index_column_name = lambda i: ( None if self._internal.column_label_names is None else (self._internal.column_label_names[i],) ) internal = self._internal.copy( spark_frame=sdf, index_map=OrderedDict( (SPARK_INDEX_NAME_FORMAT(i), index_column_name(i)) for i in range(self._internal.column_labels_level) ), column_labels=None, data_spark_columns=[scol_for(sdf, value_column)], column_label_names=None, ) return DataFrame(internal)[value_column].rename("all") # TODO: axis, skipna, and many arguments should be implemented. def any(self, axis: Union[int, str] = 0) -> bool: """ Return whether any element is True. Returns False unless there is at least one element within a series that is True or equivalent (e.g. non-zero or non-empty). Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Examples -------- Create a dataframe from a dictionary. >>> df = ks.DataFrame({ ... 'col1': [False, False, False], ... 'col2': [True, False, False], ... 'col3': [0, 0, 1], ... 'col4': [0, 1, 2], ... 'col5': [False, False, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return a boolean. >>> df.any() col1 False col2 True col3 True col4 True col5 False col6 True Name: any, dtype: bool Returns ------- Series """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') applied = [] column_labels = self._internal.column_labels for label in column_labels: scol = self._internal.spark_column_for(label) all_col = F.max(F.coalesce(scol.cast("boolean"), F.lit(False))) applied.append(F.when(all_col.isNull(), False).otherwise(all_col)) # TODO: there is a similar logic to transpose in, for instance, # DataFrame.all, Series.quantile. Maybe we should deduplicate it. value_column = "value" cols = [] for label, applied_col in zip(column_labels, applied): cols.append( F.struct( [F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] + [applied_col.alias(value_column)] ) ) sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select( F.explode(F.col("arrays")) ) sdf = sdf.selectExpr("col.*") index_column_name = lambda i: ( None if self._internal.column_label_names is None else (self._internal.column_label_names[i],) ) internal = self._internal.copy( spark_frame=sdf, index_map=OrderedDict( (SPARK_INDEX_NAME_FORMAT(i), index_column_name(i)) for i in range(self._internal.column_labels_level) ), column_labels=None, data_spark_columns=[scol_for(sdf, value_column)], column_label_names=None, ) return DataFrame(internal)[value_column].rename("any") # TODO: add axis, numeric_only, pct, na_option parameter def rank(self, method="average", ascending=True): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. .. note:: the current implementation of rank uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) Returns ------- ranks : same type as caller Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B']) >>> df A B 0 1 4 1 2 3 2 2 2 3 3 1 >>> df.rank().sort_index() A B 0 1.0 4.0 1 2.5 3.0 2 2.5 2.0 3 4.0 1.0 If method is set to 'min', it use lowest rank in group. >>> df.rank(method='min').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 4.0 1.0 If method is set to 'max', it use highest rank in group. >>> df.rank(method='max').sort_index() A B 0 1.0 4.0 1 3.0 3.0 2 3.0 2.0 3 4.0 1.0 If method is set to 'dense', it leaves no gaps in group. >>> df.rank(method='dense').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 3.0 1.0 """ return self._apply_series_op(lambda kser: kser.rank(method=method, ascending=ascending)) def filter(self, items=None, like=None, regex=None, axis=None): """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 For a Series, >>> # select rows by name >>> df.one.filter(items=['rabbit']) rabbit 4 Name: one, dtype: int64 >>> # select rows by regular expression >>> df.one.filter(regex='e$') mouse 1 Name: one, dtype: int64 >>> # select rows containing 'bbi' >>> df.one.filter(like='bbi') rabbit 4 Name: one, dtype: int64 """ if sum(x is not None for x in (items, like, regex)) > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) axis = validate_axis(axis, none_axis=1) index_scols = self._internal.index_spark_columns if items is not None: if is_list_like(items): items = list(items) else: raise ValueError("items should be a list-like object.") if axis == 0: if len(index_scols) == 1: col = None for item in items: if col is None: col = index_scols[0] == F.lit(item) else: col = col | (index_scols[0] == F.lit(item)) elif len(index_scols) > 1: # for multi-index col = None for item in items: if not isinstance(item, (tuple)): raise TypeError("Unsupported type {}".format(type(item))) if not item: raise ValueError("The item should not be empty.") midx_col = None for i, element in enumerate(item): if midx_col is None: midx_col = index_scols[i] == F.lit(element) else: midx_col = midx_col & (index_scols[i] == F.lit(element)) if col is None: col = midx_col else: col = col | midx_col else: raise ValueError("Single or multi index must be specified.") return DataFrame(self._internal.with_filter(col)) elif axis == 1: return self[items] elif like is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.contains(like) else: col = col | index_scol.contains(like) return DataFrame(self._internal.with_filter(col)) elif axis == 1: column_labels = self._internal.column_labels output_labels = [label for label in column_labels if any(like in i for i in label)] return self[output_labels] elif regex is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.rlike(regex) else: col = col | index_scol.rlike(regex) return DataFrame(self._internal.with_filter(col)) elif axis == 1: column_labels = self._internal.column_labels matcher = re.compile(regex) output_labels = [ label for label in column_labels if any(matcher.search(i) is not None for i in label) ] return self[output_labels] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def rename( self, mapper=None, index=None, columns=None, axis="index", inplace=False, level=None, errors="ignore", ): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don’t throw an error. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis’ values. Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index` and `columns`. index : dict-like or function Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper"). columns : dict-like or function Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper"). axis : int or str, default 'index' Axis to target with mapper. Can be either the axis name ('index', 'columns') or number (0, 1). inplace : bool, default False Whether to return a new DataFrame. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame with the renamed axis labels. Raises ------ `KeyError` If any of the labels is not found in the selected axis and "errors='raise'". Examples -------- >>> kdf1 = ks.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> kdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE a c 0 1 4 1 2 5 2 3 6 >>> kdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> def str_lower(s) -> str: ... return str.lower(s) >>> kdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE a b 0 1 4 1 2 5 2 3 6 >>> def mul10(x) -> int: ... return x * 10 >>> kdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]) >>> kdf2 = ks.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx) >>> kdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE x y A B C D 0 1 2 3 4 1 5 6 7 8 >>> kdf3 = ks.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab')) >>> kdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE a b x a 1 2 b 3 4 y c 5 6 d 7 8 """ def gen_mapper_fn(mapper): if isinstance(mapper, dict): if len(mapper) == 0: if errors == "raise": raise KeyError("Index include label which is not in the `mapper`.") else: return DataFrame(self._internal) type_set = set(map(lambda x: type(x), mapper.values())) if len(type_set) > 1: raise ValueError("Mapper dict should have the same value type.") spark_return_type = as_spark_type(list(type_set)[0]) def mapper_fn(x): if x in mapper: return mapper[x] else: if errors == "raise": raise KeyError("Index include value which is not in the `mapper`") return x elif callable(mapper): spark_return_type = infer_return_type(mapper).tpe def mapper_fn(x): return mapper(x) else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) return mapper_fn, spark_return_type index_mapper_fn = None index_mapper_ret_stype = None columns_mapper_fn = None inplace = validate_bool_kwarg(inplace, "inplace") if mapper: axis = validate_axis(axis) if axis == 0: index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper) elif axis == 1: columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper) else: raise ValueError( "argument axis should be either the axis name " "(‘index’, ‘columns’) or number (0, 1)" ) else: if index: index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index) if columns: columns_mapper_fn, _ = gen_mapper_fn(columns) if not index and not columns: raise ValueError("Either `index` or `columns` should be provided.") internal = self._internal if index_mapper_fn: # rename index labels, if `level` is None, rename all index columns, otherwise only # rename the corresponding level index. # implement this by transform the underlying spark dataframe, # Example: # suppose the kdf index column in underlying spark dataframe is "index_0", "index_1", # if rename level 0 index labels, will do: # ``kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))`` # if rename all index labels (`level` is None), then will do: # ``` # kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0")) # .withColumn("index_1", mapper_fn_udf(col("index_1")) # ``` index_columns = internal.index_spark_column_names num_indices = len(index_columns) if level: if level < 0 or level >= num_indices: raise ValueError("level should be an integer between [0, num_indices)") def gen_new_index_column(level): index_col_name = index_columns[level] index_mapper_udf = pandas_udf( lambda s: s.map(index_mapper_fn), returnType=index_mapper_ret_stype ) return index_mapper_udf(scol_for(internal.spark_frame, index_col_name)) sdf = internal.resolved_copy.spark_frame if level is None: for i in range(num_indices): sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i)) else: sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level)) internal = internal.with_new_sdf(sdf) if columns_mapper_fn: # rename column name. # Will modify the `_internal._column_labels` and transform underlying spark dataframe # to the same column name with `_internal._column_labels`. if level: if level < 0 or level >= internal.column_labels_level: raise ValueError("level should be an integer between [0, column_labels_level)") def gen_new_column_labels_entry(column_labels_entry): if isinstance(column_labels_entry, tuple): if level is None: # rename all level columns return tuple(map(columns_mapper_fn, column_labels_entry)) else: # only rename specified level column entry_list = list(column_labels_entry) entry_list[level] = columns_mapper_fn(entry_list[level]) return tuple(entry_list) else: return columns_mapper_fn(column_labels_entry) new_column_labels = list(map(gen_new_column_labels_entry, internal.column_labels)) new_data_scols = [ scol.alias(name_like_string(new_label)) for scol, new_label in zip(internal.data_spark_columns, new_column_labels) ] internal = internal.with_new_columns(new_data_scols, column_labels=new_column_labels) if inplace: self._update_internal_frame(internal) else: return DataFrame(internal) def keys(self): """ Return alias for columns. Returns ------- Index Columns of the DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 >>> df.keys() Index(['max_speed', 'shield'], dtype='object') """ return self.columns def pct_change(self, periods=1): """ Percentage change between the current and a prior element. .. note:: the current implementation of this API uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. Returns ------- DataFrame Examples -------- Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = ks.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 You can set periods to shift for forming percent change >>> df.pct_change(2) FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 NaN NaN NaN 1980-03-01 0.067912 0.073814 0.06883 """ window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods) def op(kser): prev_row = F.lag(kser.spark.column, periods).over(window) return ((kser.spark.column - prev_row) / prev_row).alias( kser._internal.data_spark_column_names[0] ) return self._apply_series_op(op) # TODO: axis = 1 def idxmax(self, axis=0): """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with maximum value using `to_pandas()` because we suppose the number of rows with max values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmax Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmax() a 2 b 0 c 2 Name: 0, dtype: int64 For Multi-column Index >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> kdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmax().sort_index() a x 2 b y 0 c z 2 Name: 0, dtype: int64 """ max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns) sdf_max = self._internal.spark_frame.select(*max_cols).head() # `sdf_max` looks like below # +------+------+------+ # |(a, x)|(b, y)|(c, z)| # +------+------+------+ # | 3| 4.0| 400| # +------+------+------+ conds = ( scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max) ) cond = reduce(lambda x, y: x | y, conds) kdf = DataFrame(self._internal.with_filter(cond)) pdf = kdf.to_pandas() return ks.from_pandas(pdf.idxmax()) # TODO: axis = 1 def idxmin(self, axis=0): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with minimum value using `to_pandas()` because we suppose the number of rows with min values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmin Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmin() a 0 b 3 c 1 Name: 0, dtype: int64 For Multi-column Index >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> kdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmin().sort_index() a x 0 b y 3 c z 1 Name: 0, dtype: int64 """ min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns) sdf_min = self._internal.spark_frame.select(*min_cols).head() conds = ( scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min) ) cond = reduce(lambda x, y: x | y, conds) kdf = DataFrame(self._internal.with_filter(cond)) pdf = kdf.to_pandas() return ks.from_pandas(pdf.idxmin()) def info(self, verbose=None, buf=None, max_cols=None, null_counts=None): """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. null_counts : bool, optional Whether to show the non-null counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = ks.DataFrame( ... {"int_col": int_values, "text_col": text_values, "float_col": float_values}, ... columns=['int_col', 'text_col', 'float_col']) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) # doctest: +SKIP <class 'databricks.koalas.frame.DataFrame'> Index: 5 entries, 0 to 4 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_col 5 non-null int64 1 text_col 5 non-null object 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) # doctest: +SKIP <class 'databricks.koalas.frame.DataFrame'> Index: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open('%s/info.txt' % path, "w", ... encoding="utf-8") as f: ... _ = f.write(s) >>> with open('%s/info.txt' % path) as f: ... f.readlines() # doctest: +SKIP ["<class 'databricks.koalas.frame.DataFrame'>\\n", 'Index: 5 entries, 0 to 4\\n', 'Data columns (total 3 columns):\\n', ' # Column Non-Null Count Dtype \\n', '--- ------ -------------- ----- \\n', ' 0 int_col 5 non-null int64 \\n', ' 1 text_col 5 non-null object \\n', ' 2 float_col 5 non-null float64\\n', 'dtypes: float64(1), int64(1), object(1)'] """ # To avoid pandas' existing config affects Koalas. # TODO: should we have corresponding Koalas configs? with pd.option_context( "display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize ): try: # hack to use pandas' info as is. self._data = self count_func = self.count self.count = lambda: count_func().to_pandas() return pd.DataFrame.info( self, verbose=verbose, buf=buf, max_cols=max_cols, memory_usage=False, null_counts=null_counts, ) finally: del self._data self.count = count_func # TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas' def quantile(self, q=0.5, axis=0, numeric_only=True, accuracy=10000): """ Return value at the given quantile. .. note:: Unlike pandas', the quantile in Koalas is an approximated quantile based upon approximate percentile computation because computing quantile across a large dataset is extremely expensive. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. axis : int, default 0 or 'index' Can only be set to 0 at the moment. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. Can only be set to True at the moment. accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]}) >>> kdf a b 0 1 6 1 2 7 2 3 8 3 4 9 4 5 0 >>> kdf.quantile(.5) a 3 b 7 Name: 0.5, dtype: int64 >>> kdf.quantile([.25, .5, .75]) a b 0.25 2 6 0.5 3 7 0.75 4 8 """ result_as_series = False axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if numeric_only is not True: raise NotImplementedError("quantile currently doesn't supports numeric_only") if isinstance(q, float): result_as_series = True key = str(q) q = (q,) quantiles = q # First calculate the percentiles from all columns and map it to each `quantiles` # by creating each entry as a struct. So, it becomes an array of structs as below: # # +-----------------------------------------+ # | arrays| # +-----------------------------------------+ # |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]| # +-----------------------------------------+ percentile_cols = [] for scol, column_name in zip( self._internal.data_spark_columns, self._internal.data_spark_column_names ): percentile_cols.append( SF.percentile_approx(scol, quantiles, accuracy).alias(column_name) ) sdf = self._internal.spark_frame.select(percentile_cols) # Here, after select percntile cols, a spark_frame looks like below: # +---------+---------+ # | a| b| # +---------+---------+ # |[2, 3, 4]|[6, 7, 8]| # +---------+---------+ cols_dict = OrderedDict() for column in self._internal.data_spark_column_names: cols_dict[column] = list() for i in range(len(quantiles)): cols_dict[column].append(scol_for(sdf, column).getItem(i).alias(column)) internal_index_column = SPARK_DEFAULT_INDEX_NAME cols = [] for i, col in enumerate(zip(*cols_dict.values())): cols.append(F.struct(F.lit("%s" % quantiles[i]).alias(internal_index_column), *col)) sdf = sdf.select(F.array(*cols).alias("arrays")) # And then, explode it and manually set the index. # +-----------------+---+---+ # |__index_level_0__| a| b| # +-----------------+---+---+ # | 0.25| 2| 6| # | 0.5| 3| 7| # | 0.75| 4| 8| # +-----------------+---+---+ sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*") internal = self._internal.copy( spark_frame=sdf, data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], index_map=OrderedDict({internal_index_column: None}), column_labels=self._internal.column_labels, column_label_names=None, ) return DataFrame(internal) if not result_as_series else DataFrame(internal).T[key] def query(self, expr, inplace=False): """ Query the columns of a DataFrame with a boolean expression. .. note:: Internal columns that starting with a '__' prefix are able to access, however, they are not supposed to be accessed. .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the pandas specific syntax such as `@` is not supported. If you want the pandas syntax, you can work around with :meth:`DataFrame.koalas.apply_batch`, but you should be aware that `query_func` will be executed at different nodes in a distributed manner. So, for example, to use `@` syntax, make sure the variable is serialized by, for example, putting it within the closure as below. >>> df = ks.DataFrame({'A': range(2000), 'B': range(2000)}) >>> def query_func(pdf): ... num = 1995 ... return pdf.query('A > @num') >>> df.koalas.apply_batch(query_func) A B 1996 1996 1996 1997 1997 1997 1998 1998 1998 1999 1999 1999 Parameters ---------- expr : str The query string to evaluate. You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. Returns ------- DataFrame DataFrame resulting from the provided query expression. Examples -------- >>> df = ks.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ if isinstance(self.columns, pd.MultiIndex): raise ValueError("Doesn't support for MultiIndex columns") if not isinstance(expr, str): raise ValueError("expr must be a string to be evaluated, {} given".format(type(expr))) inplace = validate_bool_kwarg(inplace, "inplace") data_columns = [label[0] for label in self._internal.column_labels] sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [ scol.alias(col) for scol, col in zip(self._internal.data_spark_columns, data_columns) ] ).filter(expr) internal = self._internal.with_new_sdf(sdf, data_columns=data_columns) if inplace: self._update_internal_frame(internal) else: return DataFrame(internal) def explain(self, extended: Optional[bool] = None, mode: Optional[str] = None): warnings.warn( "DataFrame.explain is deprecated as of DataFrame.spark.explain. " "Please use the API instead.", FutureWarning, ) return self.spark.explain(extended, mode) explain.__doc__ = SparkFrameMethods.explain.__doc__ def take(self, indices, axis=0, **kwargs): """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]).sort_index() name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]).sort_index() name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ axis = validate_axis(axis) if not is_list_like(indices) or isinstance(indices, (dict, set)): raise ValueError("`indices` must be a list-like except dict or set") if axis == 0: return self.iloc[indices, :] elif axis == 1: return self.iloc[:, indices] def eval(self, expr, inplace=False): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. Returns ------- The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Examples -------- >>> df = ks.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 Name: 0, dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from databricks.koalas.series import first_series if isinstance(self.columns, pd.MultiIndex): raise ValueError("`eval` is not supported for multi-index columns") inplace = validate_bool_kwarg(inplace, "inplace") should_return_series = False should_return_scalar = False # Since `eval_func` doesn't have a type hint, inferring the schema is always preformed # in the `apply_batch`. Hence, the variables `is_seires` and `is_scalar_` can be updated. def eval_func(pdf): nonlocal should_return_series nonlocal should_return_scalar result_inner = pdf.eval(expr, inplace=inplace) if inplace: result_inner = pdf if isinstance(result_inner, pd.Series): should_return_series = True result_inner = result_inner.to_frame() elif is_scalar(result_inner): should_return_scalar = True result_inner = pd.Series(result_inner).to_frame() return result_inner result = self.koalas.apply_batch(eval_func) if inplace: # Here, the result is always a frame because the error is thrown during schema inference # from pandas. self._update_internal_frame(result._internal, requires_same_anchor=False) elif should_return_series: return first_series(result) elif should_return_scalar: return first_series(result)[0] else: # Returns a frame return result def explode(self, column): """ Transform each element of a list-like to a row, replicating index values. Parameters ---------- column : str or tuple Column to explode. Returns ------- DataFrame Exploded lists to rows of the subset columns; index will be duplicated for these rows. See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) index labels. DataFrame.melt : Unpivot a DataFrame from wide format to long format. Examples -------- >>> df = ks.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1}) >>> df A B 0 [1, 2, 3] 1 1 [] 1 2 [3, 4] 1 >>> df.explode('A') A B 0 1.0 1 0 2.0 1 0 3.0 1 1 NaN 1 2 3.0 1 2 4.0 1 """ from databricks.koalas.series import Series if not isinstance(column, (tuple, str)): raise ValueError("column must be a scalar") kdf = DataFrame(self._internal.resolved_copy) kser = kdf[column] if not isinstance(kser, Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(column) ) if not isinstance(kser.spark.data_type, ArrayType): return self.copy() sdf = kdf._internal.spark_frame.withColumn( kser._internal.data_spark_column_names[0], F.explode_outer(kser.spark.column) ) internal = kdf._internal.with_new_sdf(sdf) return DataFrame(internal) def mad(self, axis=0): """ Return the mean absolute deviation of values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) >>> df.mad() a 0.666667 b 0.066667 Name: 0, dtype: float64 >>> df.mad(axis=1) 0 0.45 1 0.90 2 1.35 3 NaN Name: 0, dtype: float64 """ from databricks.koalas.series import Series, first_series axis = validate_axis(axis) if axis == 0: def get_spark_column(kdf, label): scol = kdf._internal.spark_column_for(label) col_type = kdf._internal.spark_type_for(label) if isinstance(col_type, BooleanType): scol = scol.cast("integer") return scol new_columns = [ F.avg(get_spark_column(self, label)).alias(name_like_string(label)) for label in self._internal.column_labels ] mean_data = self._internal.spark_frame.select(new_columns).first() new_columns = [ F.avg( F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)]) ).alias(name_like_string(label)) for label in self._internal.column_labels ] sdf = self._internal.spark_frame.select(new_columns) with ks.option_context( "compute.default_index_type", "distributed", "compute.max_rows", None ): kdf = DataFrame(sdf) internal = InternalFrame( spark_frame=kdf._internal.spark_frame, index_map=kdf._internal.index_map, column_labels=self._internal.column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) elif axis == 1: @pandas_udf(returnType=DoubleType()) def calculate_columns_axis(*cols): return pd.concat(cols, axis=1).mad(axis=1) internal = self._internal.copy( column_labels=[(SPARK_DEFAULT_SERIES_NAME,)], data_spark_columns=[ calculate_columns_axis(*self._internal.data_spark_columns).alias( SPARK_DEFAULT_SERIES_NAME ) ], column_label_names=None, ) return first_series(DataFrame(internal)) def tail(self, n=5): """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `n` rows, equivalent to ``df[n:]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() # doctest: +SKIP animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) # doctest: +SKIP animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) # doctest: +SKIP animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if LooseVersion(pyspark.__version__) < LooseVersion("3.0"): raise RuntimeError("tail can be used in PySpark >= 3.0") if not isinstance(n, int): raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__)) if n < 0: n = len(self) + n if n <= 0: return ks.DataFrame(self._internal.with_filter(F.lit(False))) sdf = self._internal.spark_frame rows = sdf.tail(n) new_sdf = default_session().createDataFrame(rows, sdf.schema) return DataFrame(self._internal.with_new_sdf(new_sdf)) def _to_internal_pandas(self): """ Return a pandas DataFrame directly from _internal to avoid overhead of copy. This method is for internal use only. """ return self._internal.to_pandas_frame def _get_or_create_repr_pandas_cache(self, n): if not hasattr(self, "_repr_pandas_cache") or n not in self._repr_pandas_cache: self._repr_pandas_cache = {n: self.head(n + 1)._to_internal_pandas()} return self._repr_pandas_cache[n] def __repr__(self): max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_string() pdf = self._get_or_create_repr_pandas_cache(max_display_count) pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_string = pdf.to_string(show_dimensions=True) match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format( nrows=nrows, ncols=ncols ) return REPR_PATTERN.sub(footer, repr_string) return pdf.to_string() def _repr_html_(self): max_display_count = get_option("display.max_rows") # pandas 0.25.1 has a regression about HTML representation so 'bold_rows' # has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204 bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__)) if max_display_count is None: return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows) pdf = self._get_or_create_repr_pandas_cache(max_display_count) pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows) match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ( "\n<p>Showing only the first {rows} rows " "{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols) ) return REPR_HTML_PATTERN.sub(footer, repr_html) return pdf.to_html(notebook=True, bold_rows=bold_rows) def __getitem__(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") if isinstance(key, (str, tuple, list)): return self.loc[:, key] elif isinstance(key, slice): if any(type(n) == int or None for n in [key.start, key.stop]): # Seems like pandas Frame always uses int as positional search when slicing # with ints. return self.iloc[key] return self.loc[key] elif isinstance(key, Series): return self.loc[key.astype(bool)] raise NotImplementedError(key) def __setitem__(self, key, value): from databricks.koalas.series import Series if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self): # Different Series or DataFrames level = self._internal.column_labels_level key = DataFrame._index_normalized_label(level, key) value = DataFrame._index_normalized_frame(level, value) def assign_columns(kdf, this_column_labels, that_column_labels): assert len(key) == len(that_column_labels) # Note that here intentionally uses `zip_longest` that combine # that_columns. for k, this_label, that_label in zip_longest( key, this_column_labels, that_column_labels ): yield (kdf._kser_for(that_label), tuple(["that", *k])) if this_label is not None and this_label[1:] != k: yield (kdf._kser_for(this_label), this_label) kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left") elif isinstance(value, list): if len(self) != len(value): raise ValueError("Length of values does not match length of index") # TODO: avoid using default index? with option_context( "compute.default_index_type", "distributed-sequence", "compute.ops_on_diff_frames", True, ): kdf = self.reset_index() kdf[key] = ks.DataFrame(value) kdf = kdf.set_index(kdf.columns[: len(self._internal.index_map)]) kdf.index.names = self.index.names elif isinstance(key, list): assert isinstance(value, DataFrame) # Same DataFrames. field_names = value.columns kdf = self._assign({k: value[c] for k, c in zip(key, field_names)}) else: # Same Series. kdf = self._assign({key: value}) self._update_internal_frame(kdf._internal) @staticmethod def _index_normalized_label(level, labels): """ Returns a label that is normalized against the current column index level. For example, the key "abc" can be ("abc", "", "") if the current Frame has a multi-index for its column """ if isinstance(labels, str): labels = [(labels,)] elif isinstance(labels, tuple): labels = [labels] else: labels = [k if isinstance(k, tuple) else (k,) for k in labels] if any(len(label) > level for label in labels): raise KeyError( "Key length ({}) exceeds index depth ({})".format( max(len(label) for label in labels), level ) ) return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels] @staticmethod def _index_normalized_frame(level, kser_or_kdf): """ Returns a frame that is normalized against the current column index level. For example, the name in `pd.Series([...], name="abc")` can be can be ("abc", "", "") if the current DataFrame has a multi-index for its column """ from databricks.koalas.series import Series if isinstance(kser_or_kdf, Series): kdf = kser_or_kdf.to_frame() else: assert isinstance(kser_or_kdf, DataFrame), type(kser_or_kdf) kdf = kser_or_kdf.copy() kdf.columns = pd.MultiIndex.from_tuples( [ tuple([name_like_string(label)] + ([""] * (level - 1))) for label in kdf._internal.column_labels ] ) return kdf def __getattr__(self, key: str) -> Any: if key.startswith("__"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) try: return self.loc[:, key] except KeyError: raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, key) ) def __len__(self): return self._internal.resolved_copy.spark_frame.count() def __dir__(self): fields = [ f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if " " not in f ] return super(DataFrame, self).__dir__() + fields def __iter__(self): return iter(self.columns) # NDArray Compat def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any): # TODO: is it possible to deduplicate it with '_map_series_op'? if all(isinstance(inp, DataFrame) for inp in inputs) and any( not same_anchor(inp, inputs[0]) for inp in inputs ): # binary only assert len(inputs) == 2 this = inputs[0] that = inputs[1] if this._internal.column_labels_level != that._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") # Different DataFrames def apply_op(kdf, this_column_labels, that_column_labels): for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( ufunc(kdf._kser_for(this_label), kdf._kser_for(that_label), **kwargs), this_label, ) return align_diff_frames(apply_op, this, that, fillna=True, how="full") else: # DataFrame and Series applied = [] this = inputs[0] assert all(inp is this for inp in inputs if isinstance(inp, DataFrame)) for label in this._internal.column_labels: arguments = [] for inp in inputs: arguments.append(inp[label] if isinstance(inp, DataFrame) else inp) # both binary and unary. applied.append(ufunc(*arguments, **kwargs)) internal = this._internal.with_new_columns(applied) return DataFrame(internal) if sys.version_info >= (3, 7): def __class_getitem__(cls, params): # This is a workaround to support variadic generic in DataFrame in Python 3.7. # See https://github.com/python/typing/issues/193 # we always wraps the given type hints by a tuple to mimic the variadic generic. return _create_tuple_for_frame_type(params) elif (3, 5) <= sys.version_info < (3, 7): # This is a workaround to support variadic generic in DataFrame in Python 3.5+ # The implementation is in its metaclass so this flag is needed to distinguish # Koalas DataFrame. is_dataframe = None def _reduce_spark_multi(sdf, aggs): """ Performs a reduction on a dataframe, the functions being known sql aggregate functions. """ assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2 class CachedDataFrame(DataFrame): """ Cached Koalas DataFrame, which corresponds to pandas DataFrame logically, but internally it caches the corresponding Spark DataFrame. """ def __init__(self, internal, storage_level=None): if storage_level is None: self._cached = internal.spark_frame.cache() elif isinstance(storage_level, StorageLevel): self._cached = internal.spark_frame.persist(storage_level) else: raise TypeError( "Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`" ) super(CachedDataFrame, self).__init__(internal) def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.unpersist() # create accessor for Spark related methods. spark = CachedAccessor("spark", CachedSparkFrameMethods) @property def storage_level(self): warnings.warn( "DataFrame.storage_level is deprecated as of DataFrame.spark.storage_level. " "Please use the API instead.", FutureWarning, ) return self.spark.storage_level storage_level.__doc__ = CachedSparkFrameMethods.storage_level.__doc__ def unpersist(self): warnings.warn( "DataFrame.unpersist is deprecated as of DataFrame.spark.unpersist. " "Please use the API instead.", FutureWarning, ) return self.spark.unpersist() unpersist.__doc__ = CachedSparkFrameMethods.unpersist.__doc__
1
15,936
Actually, I think it's okay to just change to `if isinstance(key, (str)) or is_list_like(key):` and `key = list(key) if is_list_like(key) else key` for simplicity for now.
databricks-koalas
py
@@ -183,7 +183,7 @@ func (s *resetorSuite) TestResetWorkflowExecution_NoReplication() { currRunID := uuid.New().String() we := commonpb.WorkflowExecution{ WorkflowId: wid, - RunId: forkRunID, + RunId: "", } request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{ Namespace: "testNamespace",
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package history import ( "context" "fmt" "testing" "time" "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" namespacepb "go.temporal.io/api/namespace/v1" "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" workflowpb "go.temporal.io/api/workflow/v1" "go.temporal.io/api/workflowservice/v1" enumsspb "go.temporal.io/server/api/enums/v1" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/persistenceblobs/v1" replicationspb "go.temporal.io/server/api/replication/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/cache" "go.temporal.io/server/common/clock" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/failure" "go.temporal.io/server/common/log" "go.temporal.io/server/common/mocks" "go.temporal.io/server/common/payloads" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/primitives/timestamp" ) type ( resetorSuite struct { suite.Suite *require.Assertions controller *gomock.Controller mockShard *shardContextTest mockTxProcessor *MocktransferQueueProcessor mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MocktimerQueueProcessor mockEventsCache *MockeventsCache mockNamespaceCache *cache.MockNamespaceCache mockClusterMetadata *cluster.MockMetadata mockExecutionMgr *mocks.ExecutionManager mockHistoryV2Mgr *mocks.HistoryV2Manager config *Config logger log.Logger shardID int historyEngine *historyEngineImpl resetor workflowResetor } ) func TestWorkflowResetorSuite(t *testing.T) { s := new(resetorSuite) suite.Run(t, s) } func (s *resetorSuite) SetupSuite() { s.config = NewDynamicConfigForTest() } func (s *resetorSuite) TearDownSuite() { } func (s *resetorSuite) SetupTest() { s.Assertions = require.New(s.T()) shardID := 10 s.shardID = shardID s.controller = gomock.NewController(s.T()) s.mockTxProcessor = NewMocktransferQueueProcessor(s.controller) s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.controller) s.mockTimerProcessor = NewMocktimerQueueProcessor(s.controller) s.mockTxProcessor.EXPECT().NotifyNewTask(gomock.Any(), gomock.Any()).AnyTimes() s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor.EXPECT().NotifyNewTimers(gomock.Any(), gomock.Any()).AnyTimes() s.mockShard = newTestShardContext( s.controller, &persistence.ShardInfoWithFailover{ ShardInfo: &persistenceblobs.ShardInfo{ ShardId: int32(shardID), RangeId: 1, TransferAckLevel: 0, }}, s.config, ) s.mockExecutionMgr = s.mockShard.resource.ExecutionMgr s.mockHistoryV2Mgr = s.mockShard.resource.HistoryMgr s.mockNamespaceCache = s.mockShard.resource.NamespaceCache s.mockClusterMetadata = s.mockShard.resource.ClusterMetadata s.mockEventsCache = s.mockShard.mockEventsCache s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes() s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(common.EmptyVersion).Return(cluster.TestCurrentClusterName).AnyTimes() s.mockEventsCache.EXPECT().putEvent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() s.logger = s.mockShard.GetLogger() historyCache := newHistoryCache(s.mockShard) h := &historyEngineImpl{ currentClusterName: s.mockShard.GetClusterMetadata().GetCurrentClusterName(), shard: s.mockShard, clusterMetadata: s.mockClusterMetadata, executionManager: s.mockExecutionMgr, historyV2Mgr: s.mockHistoryV2Mgr, historyCache: historyCache, logger: s.logger, metricsClient: s.mockShard.GetMetricsClient(), tokenSerializer: common.NewProtoTaskTokenSerializer(), config: s.config, historyEventNotifier: newHistoryEventNotifier(clock.NewRealTimeSource(), s.mockShard.GetMetricsClient(), func(string, string) int { return shardID }), txProcessor: s.mockTxProcessor, replicatorProcessor: s.mockReplicationProcessor, timerProcessor: s.mockTimerProcessor, } s.mockShard.SetEngine(h) s.resetor = newWorkflowResetor(h) h.resetor = s.resetor s.historyEngine = h } func (s *resetorSuite) TearDownTest() { s.controller.Finish() s.mockShard.Finish(s.T()) } func (s *resetorSuite) TestResetWorkflowExecution_NoReplication() { testNamespaceEntry := cache.NewLocalNamespaceCacheEntryForTest( &persistenceblobs.NamespaceInfo{Id: testNamespaceID}, &persistenceblobs.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", nil, ) s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() request := &historyservice.ResetWorkflowExecutionRequest{} namespaceID := testNamespaceID request.NamespaceId = namespaceID request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{} wid := "wId" wfType := "wfType" taskQueueName := "taskQueue" forkRunID := uuid.New().String() currRunID := uuid.New().String() we := commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, } request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{ Namespace: "testNamespace", WorkflowExecution: &we, Reason: "test reset", WorkflowTaskFinishEventId: 29, RequestId: uuid.New().String(), } forkGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, }, } timerFiredID := "timerID0" timerUnfiredID1 := "timerID1" timerUnfiredID2 := "timerID2" timerAfterReset := "timerID3" actIDCompleted1 := "actID0" actIDCompleted2 := "actID1" actIDStarted1 := "actID2" actIDNotStarted := "actID3" actIDStarted2 := "actID4" signalName1 := "sig1" signalName2 := "sig2" forkBranchToken := []byte("forkBranchToken") forkExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: forkRunID, BranchToken: forkBranchToken, NextEventID: 34, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } forkGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: forkExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, }} currGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: currRunID, }, } currExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: currRunID, NextEventID: common.FirstEventID, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } compareCurrExeInfo := copyWorkflowExecutionInfo(currExeInfo) currGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: currExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, }} gcurResponse := &persistence.GetCurrentExecutionResponse{ RunID: currRunID, } readHistoryReq := &persistence.ReadHistoryBranchRequest{ BranchToken: forkBranchToken, MinEventID: common.FirstEventID, MaxEventID: int64(34), PageSize: defaultHistoryPageSize, NextPageToken: nil, ShardID: &s.shardID, } taskQueue := &taskqueuepb.TaskQueue{ Name: taskQueueName, } readHistoryResp := &persistence.ReadHistoryBranchByBatchResponse{ NextPageToken: nil, Size: 1000, LastFirstEventID: int64(31), History: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: 1, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{ WorkflowType: &commonpb.WorkflowType{ Name: wfType, }, TaskQueue: taskQueue, Input: payloads.EncodeString("testInput"), WorkflowExecutionTimeout: timestamp.DurationPtr(100 * time.Second), WorkflowRunTimeout: timestamp.DurationPtr(50 * time.Second), WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), }}, }, { EventId: 2, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 3, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 2, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 4, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 2, StartedEventId: 3, }}, }, { EventId: 5, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_MARKER_RECORDED, Attributes: &historypb.HistoryEvent_MarkerRecordedEventAttributes{MarkerRecordedEventAttributes: &historypb.MarkerRecordedEventAttributes{ MarkerName: "Version", Details: map[string]*commonpb.Payloads{ "change-id": payloads.EncodeString("32283"), "version": payloads.EncodeInt(22), }, WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 6, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted1, ActivityType: &commonpb.ActivityType{ Name: "actType0", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 7, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerFiredID, StartToFireTimeout: timestamp.DurationPtr(2 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 8, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 6, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 9, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 6, StartedEventId: 8, }}, }, { EventId: 10, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 11, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 10, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 12, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 10, StartedEventId: 11, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 13, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_FIRED, Attributes: &historypb.HistoryEvent_TimerFiredEventAttributes{TimerFiredEventAttributes: &historypb.TimerFiredEventAttributes{ TimerId: timerFiredID, }}, }, { EventId: 14, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 15, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 14, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 16, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 14, StartedEventId: 15, }}, }, { EventId: 17, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStarted1, ActivityType: &commonpb.ActivityType{ Name: "actType1", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, RetryPolicy: &commonpb.RetryPolicy{ InitialInterval: timestamp.DurationPtr(1 * time.Second), BackoffCoefficient: 0.2, MaximumAttempts: 10, MaximumInterval: timestamp.DurationPtr(1000 * time.Second), }, }}, }, { EventId: 18, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDNotStarted, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 19, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID1, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 20, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID2, StartToFireTimeout: timestamp.DurationPtr(8 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 21, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted2, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 22, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStarted2, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 23, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 21, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 24, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 17, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 25, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 22, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 26, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 21, StartedEventId: 23, }}, }, { EventId: 27, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 28, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 27, }}, }, }, }, // ///////////// reset point///////////// { Events: []*historypb.HistoryEvent{ { EventId: 29, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 27, StartedEventId: 28, }}, }, { EventId: 30, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerAfterReset, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 29, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 31, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 18, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 32, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName1, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 33, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName2, }}, }, }, }, }, } eid := int64(0) eventTime := time.Unix(0, 1000).UTC() for _, be := range readHistoryResp.History { for _, e := range be.Events { eid++ if e.GetEventId() != eid { s.Fail(fmt.Sprintf("inconintous eventID: %v, %v", eid, e.GetEventId())) } e.EventTime = &eventTime } } newBranchToken := []byte("newBranch") forkResp := &persistence.ForkHistoryBranchResponse{ NewBranchToken: newBranchToken, } appendV2Resp := &persistence.AppendHistoryNodesResponse{ Size: 200, } s.mockExecutionMgr.On("GetWorkflowExecution", forkGwmsRequest).Return(forkGwmsResponse, nil).Once() s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything).Return(gcurResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", currGwmsRequest).Return(currGwmsResponse, nil).Once() s.mockHistoryV2Mgr.On("ReadHistoryBranchByBatch", readHistoryReq).Return(readHistoryResp, nil).Once() s.mockHistoryV2Mgr.On("ForkHistoryBranch", mock.Anything).Return(forkResp, nil).Once() s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(appendV2Resp, nil).Times(2) s.mockExecutionMgr.On("ResetWorkflowExecution", mock.Anything).Return(nil).Once() response, err := s.historyEngine.ResetWorkflowExecution(context.Background(), request) s.Nil(err) s.NotNil(response.RunId) // verify historyEvent: 5 events to append // 1. workflowTaskFailed :29 // 2. activityTaskFailed :30 // 3. signal 1 :31 // 4. signal 2 :32 // 5. workflowTaskScheduled :33 calls := s.mockHistoryV2Mgr.Calls s.Equal(4, len(calls)) appendCall := calls[3] s.Equal("AppendHistoryNodes", appendCall.Method) appendReq, ok := appendCall.Arguments[0].(*persistence.AppendHistoryNodesRequest) s.Equal(true, ok) s.Equal(newBranchToken, appendReq.BranchToken) s.Equal(false, appendReq.IsNewBranch) s.Equal(6, len(appendReq.Events)) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED, enumspb.EventType(appendReq.Events[0].GetEventType())) s.Equal(enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED, enumspb.EventType(appendReq.Events[1].GetEventType())) s.Equal(enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED, enumspb.EventType(appendReq.Events[2].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[3].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[4].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, enumspb.EventType(appendReq.Events[5].GetEventType())) s.Equal(int64(29), appendReq.Events[0].GetEventId()) s.Equal(int64(30), appendReq.Events[1].GetEventId()) s.Equal(int64(31), appendReq.Events[2].GetEventId()) s.Equal(int64(32), appendReq.Events[3].GetEventId()) s.Equal(int64(33), appendReq.Events[4].GetEventId()) s.Equal(int64(34), appendReq.Events[5].GetEventId()) // verify executionManager request calls = s.mockExecutionMgr.Calls s.Equal(4, len(calls)) resetCall := calls[3] s.Equal("ResetWorkflowExecution", resetCall.Method) resetReq, ok := resetCall.Arguments[0].(*persistence.ResetWorkflowExecutionRequest) s.True(resetReq.CurrentWorkflowMutation.ExecutionInfo.LastEventTaskID > 0) resetReq.CurrentWorkflowMutation.ExecutionInfo.LastEventTaskID = 0 s.Equal(true, ok) s.Equal(true, resetReq.CurrentWorkflowMutation != nil) compareCurrExeInfo.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED compareCurrExeInfo.Status = enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED compareCurrExeInfo.NextEventID = 2 compareCurrExeInfo.CompletionEventBatchID = 1 s.Equal(compareCurrExeInfo, resetReq.CurrentWorkflowMutation.ExecutionInfo) s.Equal(1, len(resetReq.CurrentWorkflowMutation.TransferTasks)) s.Equal(1, len(resetReq.CurrentWorkflowMutation.TimerTasks)) s.Equal(enumsspb.TASK_TYPE_TRANSFER_CLOSE_EXECUTION, resetReq.CurrentWorkflowMutation.TransferTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_DELETE_HISTORY_EVENT, resetReq.CurrentWorkflowMutation.TimerTasks[0].GetType()) s.Equal(int64(200), resetReq.CurrentWorkflowMutation.ExecutionStats.HistorySize) s.Equal("wfType", resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTypeName) s.True(len(resetReq.NewWorkflowSnapshot.ExecutionInfo.RunID) > 0) s.Equal([]byte(newBranchToken), resetReq.NewWorkflowSnapshot.ExecutionInfo.BranchToken) // 35 = resetEventID(29) + 6 in a batch s.Equal(int64(34), resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTaskScheduleID) s.Equal(int64(35), resetReq.NewWorkflowSnapshot.ExecutionInfo.NextEventID) // one activity task, one workflow task and one record workflow started task s.Equal(3, len(resetReq.NewWorkflowSnapshot.TransferTasks)) s.Equal(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_WORKFLOW_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_RECORD_WORKFLOW_STARTED, resetReq.NewWorkflowSnapshot.TransferTasks[2].GetType()) // WF timeout task, user timer, activity timeout timer, activity retry timer s.Equal(3, len(resetReq.NewWorkflowSnapshot.TimerTasks)) s.Equal(enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_USER_TIMER, resetReq.NewWorkflowSnapshot.TimerTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[2].GetType()) s.Equal(2, len(resetReq.NewWorkflowSnapshot.TimerInfos)) s.assertTimerIDs([]string{timerUnfiredID1, timerUnfiredID2}, resetReq.NewWorkflowSnapshot.TimerInfos) s.Equal(1, len(resetReq.NewWorkflowSnapshot.ActivityInfos)) s.assertActivityIDs([]string{actIDNotStarted}, resetReq.NewWorkflowSnapshot.ActivityInfos) s.Nil(resetReq.NewWorkflowSnapshot.ReplicationTasks) s.Nil(resetReq.NewWorkflowSnapshot.ReplicationState) s.Equal(0, len(resetReq.NewWorkflowSnapshot.RequestCancelInfos)) // not supported feature s.Empty(resetReq.NewWorkflowSnapshot.ChildExecutionInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalRequestedIDs) } func (s *resetorSuite) assertTimerIDs(ids []string, timers []*persistenceblobs.TimerInfo) { m := map[string]bool{} for _, s := range ids { m[s] = true } for _, t := range timers { delete(m, t.GetTimerId()) } s.Equal(0, len(m)) } func (s *resetorSuite) assertActivityIDs(ids []string, timers []*persistenceblobs.ActivityInfo) { m := map[string]bool{} for _, s := range ids { m[s] = true } for _, t := range timers { delete(m, t.ActivityId) } s.Equal(0, len(m)) } func (s *resetorSuite) TestResetWorkflowExecution_NoReplication_WithRequestCancel() { testNamespaceEntry := cache.NewLocalNamespaceCacheEntryForTest( &persistenceblobs.NamespaceInfo{Id: testNamespaceID}, &persistenceblobs.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, "", nil, ) s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() request := &historyservice.ResetWorkflowExecutionRequest{} namespaceID := testNamespaceID request.NamespaceId = namespaceID request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{} wid := "wId" wfType := "wfType" taskQueueName := "taskQueue" forkRunID := uuid.New().String() currRunID := uuid.New().String() we := commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, } request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{ Namespace: "testNamespace", WorkflowExecution: &we, Reason: "test reset", WorkflowTaskFinishEventId: 30, RequestId: uuid.New().String(), } forkGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, }, } timerFiredID := "timerID0" timerUnfiredID1 := "timerID1" timerUnfiredID2 := "timerID2" timerAfterReset := "timerID3" actIDCompleted1 := "actID0" actIDCompleted2 := "actID1" actIDStartedRetry := "actID2" actIDNotStarted := "actID3" actIDStartedNoRetry := "actID4" signalName1 := "sig1" signalName2 := "sig2" cancelWE := &commonpb.WorkflowExecution{ WorkflowId: "cancel-wfid", RunId: uuid.New().String(), } forkBranchToken := []byte("forkBranchToken") forkExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: forkRunID, BranchToken: forkBranchToken, NextEventID: 35, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } forkGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: forkExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, }} currGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: currRunID, }, } currExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: currRunID, NextEventID: common.FirstEventID, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } currGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: currExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, }} gcurResponse := &persistence.GetCurrentExecutionResponse{ RunID: currRunID, } readHistoryReq := &persistence.ReadHistoryBranchRequest{ BranchToken: forkBranchToken, MinEventID: common.FirstEventID, MaxEventID: int64(35), PageSize: defaultHistoryPageSize, NextPageToken: nil, ShardID: &s.shardID, } taskQueue := &taskqueuepb.TaskQueue{ Name: taskQueueName, } readHistoryResp := &persistence.ReadHistoryBranchByBatchResponse{ NextPageToken: nil, Size: 1000, LastFirstEventID: int64(31), History: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: 1, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{ WorkflowType: &commonpb.WorkflowType{ Name: wfType, }, TaskQueue: taskQueue, Input: payloads.EncodeString("testInput"), WorkflowExecutionTimeout: timestamp.DurationPtr(100 * time.Second), WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), }}, }, { EventId: 2, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 3, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 2, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 4, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 2, StartedEventId: 3, }}, }, { EventId: 5, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_MARKER_RECORDED, Attributes: &historypb.HistoryEvent_MarkerRecordedEventAttributes{MarkerRecordedEventAttributes: &historypb.MarkerRecordedEventAttributes{ MarkerName: "Version", Details: map[string]*commonpb.Payloads{ "change-id": payloads.EncodeString("32283"), "version": payloads.EncodeInt(22), }, WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 6, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted1, ActivityType: &commonpb.ActivityType{ Name: "actType0", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 7, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerFiredID, StartToFireTimeout: timestamp.DurationPtr(2 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 8, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 6, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 9, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 6, StartedEventId: 8, }}, }, { EventId: 10, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 11, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 10, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 12, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 10, StartedEventId: 11, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 13, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_FIRED, Attributes: &historypb.HistoryEvent_TimerFiredEventAttributes{TimerFiredEventAttributes: &historypb.TimerFiredEventAttributes{ TimerId: timerFiredID, }}, }, { EventId: 14, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 15, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 14, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 16, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 14, StartedEventId: 15, }}, }, { EventId: 17, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStartedRetry, ActivityType: &commonpb.ActivityType{ Name: "actType1", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, RetryPolicy: &commonpb.RetryPolicy{ InitialInterval: timestamp.DurationPtr(1 * time.Second), BackoffCoefficient: 0.2, MaximumAttempts: 10, MaximumInterval: timestamp.DurationPtr(1000 * time.Second), }, }}, }, { EventId: 18, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDNotStarted, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 19, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID1, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 20, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID2, StartToFireTimeout: timestamp.DurationPtr(8 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 21, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted2, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 22, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStartedNoRetry, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 23, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED, Attributes: &historypb.HistoryEvent_RequestCancelExternalWorkflowExecutionInitiatedEventAttributes{RequestCancelExternalWorkflowExecutionInitiatedEventAttributes: &historypb.RequestCancelExternalWorkflowExecutionInitiatedEventAttributes{ Namespace: "any-namespace", WorkflowExecution: cancelWE, WorkflowTaskCompletedEventId: 16, ChildWorkflowOnly: true, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 24, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 21, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 25, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 17, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 26, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 22, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 27, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 21, StartedEventId: 24, }}, }, { EventId: 28, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 29, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 28, }}, }, }, }, // ///////////// reset point///////////// { Events: []*historypb.HistoryEvent{ { EventId: 30, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 28, StartedEventId: 29, }}, }, { EventId: 31, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerAfterReset, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 30, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 32, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 18, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 33, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName1, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 34, Version: common.EmptyVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName2, }}, }, }, }, }, } eid := int64(0) eventTime := time.Unix(0, 1000).UTC() for _, be := range readHistoryResp.History { for _, e := range be.Events { eid++ if e.GetEventId() != eid { s.Fail(fmt.Sprintf("inconintous eventID: %v, %v", eid, e.GetEventId())) } e.EventTime = &eventTime } } s.mockExecutionMgr.On("GetWorkflowExecution", forkGwmsRequest).Return(forkGwmsResponse, nil).Once() s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything).Return(gcurResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", currGwmsRequest).Return(currGwmsResponse, nil).Once() s.mockHistoryV2Mgr.On("ReadHistoryBranchByBatch", readHistoryReq).Return(readHistoryResp, nil).Once() _, err := s.historyEngine.ResetWorkflowExecution(context.Background(), request) s.EqualError(err, "it is not allowed resetting to a point that workflow has pending request cancel.") } func (s *resetorSuite) TestResetWorkflowExecution_Replication_WithTerminatingCurrent() { namespace := "testNamespace" beforeResetVersion := int64(100) afterResetVersion := int64(101) s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(beforeResetVersion).Return("standby").AnyTimes() s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(afterResetVersion).Return("active").AnyTimes() testNamespaceEntry := cache.NewGlobalNamespaceCacheEntryForTest( &persistenceblobs.NamespaceInfo{Id: testNamespaceID}, &persistenceblobs.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, &persistenceblobs.NamespaceReplicationConfig{ ActiveClusterName: "active", Clusters: []string{ "active", "standby", }, }, afterResetVersion, cluster.GetTestClusterMetadata(true, true), ) // override namespace cache s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() request := &historyservice.ResetWorkflowExecutionRequest{} namespaceID := testNamespaceID request.NamespaceId = namespaceID request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{} wid := "wId" wfType := "wfType" taskQueueName := "taskQueue" forkRunID := uuid.New().String() currRunID := uuid.New().String() we := commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, } request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{ Namespace: namespace, WorkflowExecution: &we, Reason: "test reset", WorkflowTaskFinishEventId: 30, RequestId: uuid.New().String(), } forkGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, }, } timerFiredID := "timerID0" timerUnfiredID1 := "timerID1" timerUnfiredID2 := "timerID2" timerAfterReset := "timerID3" actIDCompleted1 := "actID0" actIDCompleted2 := "actID1" actIDRetry := "actID2" // not started, will reschedule actIDNotStarted := "actID3" // not started, will reschedule actIDStartedNoRetry := "actID4" // started, will fail signalName1 := "sig1" signalName2 := "sig2" signalName3 := "sig3" signalName4 := "sig4" forkBranchToken := []byte("forkBranchToken") forkExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: forkRunID, BranchToken: forkBranchToken, NextEventID: 35, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } forkRepState := &persistenceblobs.ReplicationState{ CurrentVersion: beforeResetVersion, StartVersion: beforeResetVersion, LastWriteEventId: common.EmptyEventID, LastWriteVersion: common.EmptyVersion, LastReplicationInfo: map[string]*replicationspb.ReplicationInfo{}, } forkGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: forkExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} currGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: currRunID, }, } currExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: currRunID, NextEventID: common.FirstEventID, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } compareCurrExeInfo := copyWorkflowExecutionInfo(currExeInfo) currGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: currExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} gcurResponse := &persistence.GetCurrentExecutionResponse{ RunID: currRunID, } readHistoryReq := &persistence.ReadHistoryBranchRequest{ BranchToken: forkBranchToken, MinEventID: common.FirstEventID, MaxEventID: int64(35), PageSize: defaultHistoryPageSize, NextPageToken: nil, ShardID: &s.shardID, } taskQueue := &taskqueuepb.TaskQueue{ Name: taskQueueName, } readHistoryResp := &persistence.ReadHistoryBranchByBatchResponse{ NextPageToken: nil, Size: 1000, LastFirstEventID: int64(31), History: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: 1, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{ WorkflowType: &commonpb.WorkflowType{ Name: wfType, }, TaskQueue: taskQueue, Input: payloads.EncodeString("testInput"), WorkflowExecutionTimeout: timestamp.DurationPtr(100 * time.Second), WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), }}, }, { EventId: 2, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 3, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 2, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 4, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 2, StartedEventId: 3, }}, }, { EventId: 5, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_MARKER_RECORDED, Attributes: &historypb.HistoryEvent_MarkerRecordedEventAttributes{MarkerRecordedEventAttributes: &historypb.MarkerRecordedEventAttributes{ MarkerName: "Version", Details: map[string]*commonpb.Payloads{ "change-id": payloads.EncodeString("32283"), "version": payloads.EncodeInt(22), }, WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 6, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted1, ActivityType: &commonpb.ActivityType{ Name: "actType0", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 7, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerFiredID, StartToFireTimeout: timestamp.DurationPtr(2 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 8, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 6, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 9, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 6, StartedEventId: 8, }}, }, { EventId: 10, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 11, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 10, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 12, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 10, StartedEventId: 11, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 13, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_FIRED, Attributes: &historypb.HistoryEvent_TimerFiredEventAttributes{TimerFiredEventAttributes: &historypb.TimerFiredEventAttributes{ TimerId: timerFiredID, }}, }, { EventId: 14, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 15, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 14, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 16, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 14, StartedEventId: 15, }}, }, { EventId: 17, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDRetry, ActivityType: &commonpb.ActivityType{ Name: "actType1", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, RetryPolicy: &commonpb.RetryPolicy{ InitialInterval: timestamp.DurationPtr(1 * time.Second), BackoffCoefficient: 0.2, MaximumAttempts: 10, MaximumInterval: timestamp.DurationPtr(1000 * time.Second), }, }}, }, { EventId: 18, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDNotStarted, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 19, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID1, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 20, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID2, StartToFireTimeout: timestamp.DurationPtr(8 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 21, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted2, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 22, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStartedNoRetry, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 23, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName3, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 24, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 21, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 25, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 26, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 22, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 27, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 21, StartedEventId: 24, }}, }, { EventId: 28, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 29, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 28, }}, }, }, }, // ///////////// reset point///////////// { Events: []*historypb.HistoryEvent{ { EventId: 30, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 28, StartedEventId: 29, }}, }, { EventId: 31, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerAfterReset, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 30, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 32, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 18, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 33, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName1, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 34, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName2, }}, }, }, }, }, } eid := int64(0) eventTime := time.Unix(0, 1000).UTC() for _, be := range readHistoryResp.History { for _, e := range be.Events { eid++ if e.GetEventId() != eid { s.Fail(fmt.Sprintf("inconintous eventID: %v, %v", eid, e.GetEventId())) } e.EventTime = &eventTime } } newBranchToken := []byte("newBranch") forkResp := &persistence.ForkHistoryBranchResponse{ NewBranchToken: newBranchToken, } appendV2Resp := &persistence.AppendHistoryNodesResponse{ Size: 200, } s.mockExecutionMgr.On("GetWorkflowExecution", forkGwmsRequest).Return(forkGwmsResponse, nil).Once() s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything).Return(gcurResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", currGwmsRequest).Return(currGwmsResponse, nil).Once() s.mockHistoryV2Mgr.On("ReadHistoryBranchByBatch", readHistoryReq).Return(readHistoryResp, nil).Once() s.mockHistoryV2Mgr.On("ForkHistoryBranch", mock.Anything).Return(forkResp, nil).Once() s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(appendV2Resp, nil).Times(2) s.mockExecutionMgr.On("ResetWorkflowExecution", mock.Anything).Return(nil).Once() response, err := s.historyEngine.ResetWorkflowExecution(context.Background(), request) s.Nil(err) s.NotNil(response.RunId) // verify historyEvent: 5 events to append // 1. workflowTaskFailed // 2. activityTaskFailed // 3. signal 1 // 4. signal 2 // 5. workflowTaskScheduled calls := s.mockHistoryV2Mgr.Calls s.Equal(4, len(calls)) appendCall := calls[3] s.Equal("AppendHistoryNodes", appendCall.Method) appendReq, ok := appendCall.Arguments[0].(*persistence.AppendHistoryNodesRequest) s.Equal(true, ok) s.Equal(newBranchToken, appendReq.BranchToken) s.Equal(false, appendReq.IsNewBranch) s.Equal(5, len(appendReq.Events)) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED, enumspb.EventType(appendReq.Events[0].GetEventType())) s.Equal(enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED, enumspb.EventType(appendReq.Events[1].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[2].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[3].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, enumspb.EventType(appendReq.Events[4].GetEventType())) s.Equal(int64(30), appendReq.Events[0].GetEventId()) s.Equal(int64(31), appendReq.Events[1].GetEventId()) s.Equal(int64(32), appendReq.Events[2].GetEventId()) s.Equal(int64(33), appendReq.Events[3].GetEventId()) s.Equal(int64(34), appendReq.Events[4].GetEventId()) // verify executionManager request calls = s.mockExecutionMgr.Calls s.Equal(4, len(calls)) resetCall := calls[3] s.Equal("ResetWorkflowExecution", resetCall.Method) resetReq, ok := resetCall.Arguments[0].(*persistence.ResetWorkflowExecutionRequest) s.True(resetReq.CurrentWorkflowMutation.ExecutionInfo.LastEventTaskID > 0) resetReq.CurrentWorkflowMutation.ExecutionInfo.LastEventTaskID = 0 s.Equal(true, ok) s.Equal(true, resetReq.CurrentWorkflowMutation != nil) compareCurrExeInfo.State = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED compareCurrExeInfo.Status = enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED compareCurrExeInfo.NextEventID = 2 compareCurrExeInfo.LastFirstEventID = 1 compareCurrExeInfo.CompletionEventBatchID = 1 s.Equal(compareCurrExeInfo, resetReq.CurrentWorkflowMutation.ExecutionInfo) s.Equal(1, len(resetReq.CurrentWorkflowMutation.TransferTasks)) s.Equal(1, len(resetReq.CurrentWorkflowMutation.TimerTasks)) s.Equal(enumsspb.TASK_TYPE_TRANSFER_CLOSE_EXECUTION, resetReq.CurrentWorkflowMutation.TransferTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_DELETE_HISTORY_EVENT, resetReq.CurrentWorkflowMutation.TimerTasks[0].GetType()) s.Equal(int64(200), resetReq.CurrentWorkflowMutation.ExecutionStats.HistorySize) s.Equal("wfType", resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTypeName) s.True(len(resetReq.NewWorkflowSnapshot.ExecutionInfo.RunID) > 0) s.Equal([]byte(newBranchToken), resetReq.NewWorkflowSnapshot.ExecutionInfo.BranchToken) s.Equal(int64(34), resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTaskScheduleID) s.Equal(int64(35), resetReq.NewWorkflowSnapshot.ExecutionInfo.NextEventID) s.Equal(4, len(resetReq.NewWorkflowSnapshot.TransferTasks)) s.Equal(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_WORKFLOW_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[2].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_RECORD_WORKFLOW_STARTED, resetReq.NewWorkflowSnapshot.TransferTasks[3].GetType()) // WF timeout task, user timer, activity timeout timer, activity retry timer s.Equal(3, len(resetReq.NewWorkflowSnapshot.TimerTasks)) s.Equal(enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_USER_TIMER, resetReq.NewWorkflowSnapshot.TimerTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[2].GetType()) s.Equal(2, len(resetReq.NewWorkflowSnapshot.TimerInfos)) s.assertTimerIDs([]string{timerUnfiredID1, timerUnfiredID2}, resetReq.NewWorkflowSnapshot.TimerInfos) s.Equal(2, len(resetReq.NewWorkflowSnapshot.ActivityInfos)) s.assertActivityIDs([]string{actIDRetry, actIDNotStarted}, resetReq.NewWorkflowSnapshot.ActivityInfos) s.Equal(1, len(resetReq.NewWorkflowSnapshot.ReplicationTasks)) s.Equal(enumsspb.TASK_TYPE_REPLICATION_HISTORY, resetReq.NewWorkflowSnapshot.ReplicationTasks[0].GetType()) s.Equal(1, len(resetReq.CurrentWorkflowMutation.ReplicationTasks)) s.Equal(enumsspb.TASK_TYPE_REPLICATION_HISTORY, resetReq.CurrentWorkflowMutation.ReplicationTasks[0].GetType()) compareRepState := copyReplicationState(forkRepState) compareRepState.StartVersion = beforeResetVersion compareRepState.CurrentVersion = afterResetVersion compareRepState.LastWriteEventId = 34 compareRepState.LastWriteVersion = afterResetVersion compareRepState.LastReplicationInfo = map[string]*replicationspb.ReplicationInfo{ "standby": { LastEventId: 29, Version: beforeResetVersion, }, } s.Equal(compareRepState, resetReq.NewWorkflowSnapshot.ReplicationState) // not supported feature s.Empty(resetReq.NewWorkflowSnapshot.ChildExecutionInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalRequestedIDs) s.Equal(0, len(resetReq.NewWorkflowSnapshot.RequestCancelInfos)) } func (s *resetorSuite) TestResetWorkflowExecution_Replication_NotActive() { namespace := "testNamespace" beforeResetVersion := int64(100) afterResetVersion := int64(101) s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(beforeResetVersion).Return("active").AnyTimes() s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(afterResetVersion).Return("standby").AnyTimes() testNamespaceEntry := cache.NewGlobalNamespaceCacheEntryForTest( &persistenceblobs.NamespaceInfo{Id: testNamespaceID}, &persistenceblobs.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, &persistenceblobs.NamespaceReplicationConfig{ ActiveClusterName: "active", Clusters: []string{ "active", "standby", }, }, afterResetVersion, cluster.GetTestClusterMetadata(true, true), ) // override namespace cache s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() request := &historyservice.ResetWorkflowExecutionRequest{} namespaceID := testNamespaceID request.NamespaceId = namespaceID request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{} wid := "wId" wfType := "wfType" taskQueueName := "taskQueue" forkRunID := uuid.New().String() currRunID := uuid.New().String() we := commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, } request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{ Namespace: namespace, WorkflowExecution: &we, Reason: "test reset", WorkflowTaskFinishEventId: 30, RequestId: uuid.New().String(), } forkGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, }, } timerFiredID := "timerID0" timerUnfiredID1 := "timerID1" timerUnfiredID2 := "timerID2" timerAfterReset := "timerID3" actIDCompleted1 := "actID0" actIDCompleted2 := "actID1" actIDRetry := "actID2" // not started, will reschedule actIDNotStarted := "actID3" // not started, will reschedule actIDStartedNoRetry := "actID4" // started, will fail signalName1 := "sig1" signalName2 := "sig2" signalName3 := "sig3" signalName4 := "sig4" forkBranchToken := []byte("forkBranchToken") forkExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: forkRunID, BranchToken: forkBranchToken, NextEventID: 35, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } forkRepState := &persistenceblobs.ReplicationState{ CurrentVersion: beforeResetVersion, StartVersion: beforeResetVersion, LastWriteEventId: common.EmptyEventID, LastWriteVersion: common.EmptyVersion, LastReplicationInfo: map[string]*replicationspb.ReplicationInfo{}, } forkGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: forkExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} currGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: currRunID, }, } currExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: currRunID, NextEventID: common.FirstEventID, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } currGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: currExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} gcurResponse := &persistence.GetCurrentExecutionResponse{ RunID: currRunID, } readHistoryReq := &persistence.ReadHistoryBranchRequest{ BranchToken: forkBranchToken, MinEventID: common.FirstEventID, MaxEventID: int64(35), PageSize: defaultHistoryPageSize, NextPageToken: nil, ShardID: &s.shardID, } taskQueue := &taskqueuepb.TaskQueue{ Name: taskQueueName, } readHistoryResp := &persistence.ReadHistoryBranchByBatchResponse{ NextPageToken: nil, Size: 1000, LastFirstEventID: int64(31), History: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: 1, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{ WorkflowType: &commonpb.WorkflowType{ Name: wfType, }, TaskQueue: taskQueue, Input: payloads.EncodeString("testInput"), WorkflowRunTimeout: timestamp.DurationPtr(100 * time.Second), WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), }}, }, { EventId: 2, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 3, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 2, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 4, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 2, StartedEventId: 3, }}, }, { EventId: 5, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_MARKER_RECORDED, Attributes: &historypb.HistoryEvent_MarkerRecordedEventAttributes{MarkerRecordedEventAttributes: &historypb.MarkerRecordedEventAttributes{ MarkerName: "Version", Details: map[string]*commonpb.Payloads{ "change-id": payloads.EncodeString("32283"), "version": payloads.EncodeInt(22), }, WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 6, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted1, ActivityType: &commonpb.ActivityType{ Name: "actType0", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 7, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerFiredID, StartToFireTimeout: timestamp.DurationPtr(2 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 8, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 6, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 9, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 6, StartedEventId: 8, }}, }, { EventId: 10, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 11, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 10, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 12, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 10, StartedEventId: 11, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 13, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_FIRED, Attributes: &historypb.HistoryEvent_TimerFiredEventAttributes{TimerFiredEventAttributes: &historypb.TimerFiredEventAttributes{ TimerId: timerFiredID, }}, }, { EventId: 14, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 15, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 14, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 16, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 14, StartedEventId: 15, }}, }, { EventId: 17, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDRetry, ActivityType: &commonpb.ActivityType{ Name: "actType1", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, RetryPolicy: &commonpb.RetryPolicy{ InitialInterval: timestamp.DurationPtr(1 * time.Second), BackoffCoefficient: 0.2, MaximumAttempts: 10, MaximumInterval: timestamp.DurationPtr(1000 * time.Second), }, }}, }, { EventId: 18, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDNotStarted, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 19, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID1, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 20, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID2, StartToFireTimeout: timestamp.DurationPtr(8 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 21, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted2, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 22, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStartedNoRetry, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 23, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName3, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 24, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 21, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 25, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 26, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 22, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 27, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 21, StartedEventId: 24, }}, }, { EventId: 28, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 29, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 28, }}, }, }, }, // ///////////// reset point///////////// { Events: []*historypb.HistoryEvent{ { EventId: 30, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 28, StartedEventId: 29, }}, }, { EventId: 31, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerAfterReset, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 30, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 32, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 18, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 33, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName1, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 34, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName2, }}, }, }, }, }, } eid := int64(0) eventTime := time.Unix(0, 1000).UTC() for _, be := range readHistoryResp.History { for _, e := range be.Events { eid++ if e.GetEventId() != eid { s.Fail(fmt.Sprintf("inconintous eventID: %v, %v", eid, e.GetEventId())) } e.EventTime = &eventTime } } newBranchToken := []byte("newBranch") forkResp := &persistence.ForkHistoryBranchResponse{ NewBranchToken: newBranchToken, } s.mockExecutionMgr.On("GetWorkflowExecution", forkGwmsRequest).Return(forkGwmsResponse, nil).Once() s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything).Return(gcurResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", currGwmsRequest).Return(currGwmsResponse, nil).Once() s.mockHistoryV2Mgr.On("ReadHistoryBranchByBatch", readHistoryReq).Return(readHistoryResp, nil).Once() s.mockHistoryV2Mgr.On("ForkHistoryBranch", mock.Anything).Return(forkResp, nil).Once() _, err := s.historyEngine.ResetWorkflowExecution(context.Background(), request) s.IsType(&serviceerror.NamespaceNotActive{}, err) } func (s *resetorSuite) TestResetWorkflowExecution_Replication_NoTerminatingCurrent() { namespace := "testNamespace" beforeResetVersion := int64(100) afterResetVersion := int64(101) s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(beforeResetVersion).Return("standby").AnyTimes() s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(afterResetVersion).Return("active").AnyTimes() testNamespaceEntry := cache.NewGlobalNamespaceCacheEntryForTest( &persistenceblobs.NamespaceInfo{Id: testNamespaceID}, &persistenceblobs.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, &persistenceblobs.NamespaceReplicationConfig{ ActiveClusterName: "active", Clusters: []string{ "active", "standby", }, }, afterResetVersion, cluster.GetTestClusterMetadata(true, true), ) // override namespace cache s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() request := &historyservice.ResetWorkflowExecutionRequest{} namespaceID := testNamespaceID request.NamespaceId = namespaceID request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{} wid := "wId" wfType := "wfType" taskQueueName := "taskQueue" forkRunID := uuid.New().String() currRunID := uuid.New().String() we := commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, } request.ResetRequest = &workflowservice.ResetWorkflowExecutionRequest{ Namespace: namespace, WorkflowExecution: &we, Reason: "test reset", WorkflowTaskFinishEventId: 30, RequestId: uuid.New().String(), } forkGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, }, } timerFiredID := "timerID0" timerUnfiredID1 := "timerID1" timerUnfiredID2 := "timerID2" timerAfterReset := "timerID3" actIDCompleted1 := "actID0" actIDCompleted2 := "actID1" actIDRetry := "actID2" // not started, will reschedule actIDNotStarted := "actID3" // not started, will reschedule actIDStartedNoRetry := "actID4" // started, will fail signalName1 := "sig1" signalName2 := "sig2" signalName3 := "sig3" signalName4 := "sig4" forkBranchToken := []byte("forkBranchToken") forkExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: forkRunID, BranchToken: forkBranchToken, NextEventID: 35, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } forkRepState := &persistenceblobs.ReplicationState{ CurrentVersion: beforeResetVersion, StartVersion: beforeResetVersion, LastWriteEventId: common.EmptyEventID, LastWriteVersion: common.EmptyVersion, LastReplicationInfo: map[string]*replicationspb.ReplicationInfo{}, } forkGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: forkExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} currGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: currRunID, }, } currExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: currRunID, NextEventID: common.FirstEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, } compareCurrExeInfo := copyWorkflowExecutionInfo(currExeInfo) currGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: currExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} gcurResponse := &persistence.GetCurrentExecutionResponse{ RunID: currRunID, } readHistoryReq := &persistence.ReadHistoryBranchRequest{ BranchToken: forkBranchToken, MinEventID: common.FirstEventID, MaxEventID: int64(35), PageSize: defaultHistoryPageSize, NextPageToken: nil, ShardID: &s.shardID, } taskQueue := &taskqueuepb.TaskQueue{ Name: taskQueueName, } readHistoryResp := &persistence.ReadHistoryBranchByBatchResponse{ NextPageToken: nil, Size: 1000, LastFirstEventID: int64(31), History: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: 1, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{ WorkflowType: &commonpb.WorkflowType{ Name: wfType, }, TaskQueue: taskQueue, Input: payloads.EncodeString("testInput"), WorkflowRunTimeout: timestamp.DurationPtr(100 * time.Second), WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), }}, }, { EventId: 2, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 3, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 2, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 4, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 2, StartedEventId: 3, }}, }, { EventId: 5, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_MARKER_RECORDED, Attributes: &historypb.HistoryEvent_MarkerRecordedEventAttributes{MarkerRecordedEventAttributes: &historypb.MarkerRecordedEventAttributes{ MarkerName: "Version", Details: map[string]*commonpb.Payloads{ "change-id": payloads.EncodeString("32283"), "version": payloads.EncodeInt(22), }, WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 6, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted1, ActivityType: &commonpb.ActivityType{ Name: "actType0", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 7, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerFiredID, StartToFireTimeout: timestamp.DurationPtr(2 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 8, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 6, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 9, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 6, StartedEventId: 8, }}, }, { EventId: 10, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 11, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 10, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 12, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 10, StartedEventId: 11, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 13, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_FIRED, Attributes: &historypb.HistoryEvent_TimerFiredEventAttributes{TimerFiredEventAttributes: &historypb.TimerFiredEventAttributes{ TimerId: timerFiredID, }}, }, { EventId: 14, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 15, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 14, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 16, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 14, StartedEventId: 15, }}, }, { EventId: 17, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDRetry, ActivityType: &commonpb.ActivityType{ Name: "actType1", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, RetryPolicy: &commonpb.RetryPolicy{ InitialInterval: timestamp.DurationPtr(1 * time.Second), BackoffCoefficient: 0.2, MaximumAttempts: 10, MaximumInterval: timestamp.DurationPtr(1000 * time.Second), }, }}, }, { EventId: 18, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDNotStarted, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 19, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID1, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 20, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID2, StartToFireTimeout: timestamp.DurationPtr(8 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 21, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted2, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 22, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStartedNoRetry, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 23, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName3, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 24, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 21, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 25, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 26, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 22, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 27, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 21, StartedEventId: 24, }}, }, { EventId: 28, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 29, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 28, }}, }, }, }, // ///////////// reset point///////////// { Events: []*historypb.HistoryEvent{ { EventId: 30, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 28, StartedEventId: 29, }}, }, { EventId: 31, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerAfterReset, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 30, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 32, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 18, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 33, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName1, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 34, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName2, }}, }, }, }, }, } eid := int64(0) eventTime := time.Unix(0, 1000).UTC() for _, be := range readHistoryResp.History { for _, e := range be.Events { eid++ if e.GetEventId() != eid { s.Fail(fmt.Sprintf("inconintous eventID: %v, %v", eid, e.GetEventId())) } e.EventTime = &eventTime } } newBranchToken := []byte("newBranch") forkResp := &persistence.ForkHistoryBranchResponse{ NewBranchToken: newBranchToken, } appendV2Resp := &persistence.AppendHistoryNodesResponse{ Size: 200, } s.mockExecutionMgr.On("GetWorkflowExecution", forkGwmsRequest).Return(forkGwmsResponse, nil).Once() s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything).Return(gcurResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", currGwmsRequest).Return(currGwmsResponse, nil).Once() s.mockHistoryV2Mgr.On("ReadHistoryBranchByBatch", readHistoryReq).Return(readHistoryResp, nil).Once() s.mockHistoryV2Mgr.On("ForkHistoryBranch", mock.Anything).Return(forkResp, nil).Once() s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(appendV2Resp, nil).Once() s.mockExecutionMgr.On("ResetWorkflowExecution", mock.Anything).Return(nil).Once() response, err := s.historyEngine.ResetWorkflowExecution(context.Background(), request) s.Nil(err) s.NotNil(response.RunId) // verify historyEvent: 5 events to append // 1. workflowTaskFailed // 2. activityTaskFailed // 3. signal 1 // 4. signal 2 // 5. workflowTaskScheduled calls := s.mockHistoryV2Mgr.Calls s.Equal(3, len(calls)) appendCall := calls[2] s.Equal("AppendHistoryNodes", appendCall.Method) appendReq, ok := appendCall.Arguments[0].(*persistence.AppendHistoryNodesRequest) s.Equal(true, ok) s.Equal(newBranchToken, appendReq.BranchToken) s.Equal(false, appendReq.IsNewBranch) s.Equal(5, len(appendReq.Events)) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED, enumspb.EventType(appendReq.Events[0].GetEventType())) s.Equal(enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED, enumspb.EventType(appendReq.Events[1].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[2].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[3].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, enumspb.EventType(appendReq.Events[4].GetEventType())) s.Equal(int64(30), appendReq.Events[0].GetEventId()) s.Equal(int64(31), appendReq.Events[1].GetEventId()) s.Equal(int64(32), appendReq.Events[2].GetEventId()) s.Equal(int64(33), appendReq.Events[3].GetEventId()) s.Equal(int64(34), appendReq.Events[4].GetEventId()) // verify executionManager request calls = s.mockExecutionMgr.Calls s.Equal(4, len(calls)) resetCall := calls[3] s.Equal("ResetWorkflowExecution", resetCall.Method) resetReq, ok := resetCall.Arguments[0].(*persistence.ResetWorkflowExecutionRequest) s.Equal(true, ok) s.Equal(false, resetReq.CurrentWorkflowMutation != nil) s.Equal(compareCurrExeInfo.RunID, resetReq.CurrentRunID) s.Equal(compareCurrExeInfo.NextEventID, resetReq.CurrentRunNextEventID) s.Equal("wfType", resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTypeName) s.True(len(resetReq.NewWorkflowSnapshot.ExecutionInfo.RunID) > 0) s.Equal([]byte(newBranchToken), resetReq.NewWorkflowSnapshot.ExecutionInfo.BranchToken) s.Equal(int64(34), resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTaskScheduleID) s.Equal(int64(35), resetReq.NewWorkflowSnapshot.ExecutionInfo.NextEventID) s.Equal(4, len(resetReq.NewWorkflowSnapshot.TransferTasks)) s.Equal(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_WORKFLOW_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[2].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_RECORD_WORKFLOW_STARTED, resetReq.NewWorkflowSnapshot.TransferTasks[3].GetType()) // WF timeout task, user timer, activity timeout timer, activity retry timer s.Equal(3, len(resetReq.NewWorkflowSnapshot.TimerTasks)) s.Equal(enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_USER_TIMER, resetReq.NewWorkflowSnapshot.TimerTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[2].GetType()) s.Equal(2, len(resetReq.NewWorkflowSnapshot.TimerInfos)) s.assertTimerIDs([]string{timerUnfiredID1, timerUnfiredID2}, resetReq.NewWorkflowSnapshot.TimerInfos) s.Equal(2, len(resetReq.NewWorkflowSnapshot.ActivityInfos)) s.assertActivityIDs([]string{actIDRetry, actIDNotStarted}, resetReq.NewWorkflowSnapshot.ActivityInfos) s.Equal(1, len(resetReq.NewWorkflowSnapshot.ReplicationTasks)) s.Equal(enumsspb.TASK_TYPE_REPLICATION_HISTORY, resetReq.NewWorkflowSnapshot.ReplicationTasks[0].GetType()) compareRepState := copyReplicationState(forkRepState) compareRepState.StartVersion = beforeResetVersion compareRepState.CurrentVersion = afterResetVersion compareRepState.LastWriteEventId = 34 compareRepState.LastWriteVersion = afterResetVersion compareRepState.LastReplicationInfo = map[string]*replicationspb.ReplicationInfo{ "standby": { LastEventId: 29, Version: beforeResetVersion, }, } s.Equal(compareRepState, resetReq.NewWorkflowSnapshot.ReplicationState) // not supported feature s.Empty(resetReq.NewWorkflowSnapshot.ChildExecutionInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalRequestedIDs) s.Equal(0, len(resetReq.NewWorkflowSnapshot.RequestCancelInfos)) } func (s *resetorSuite) TestApplyReset() { namespaceID := testNamespaceID beforeResetVersion := int64(100) afterResetVersion := int64(101) s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(beforeResetVersion).Return(cluster.TestAlternativeClusterName).AnyTimes() s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(afterResetVersion).Return(cluster.TestCurrentClusterName).AnyTimes() testNamespaceEntry := cache.NewGlobalNamespaceCacheEntryForTest( &persistenceblobs.NamespaceInfo{Id: testNamespaceID}, &persistenceblobs.NamespaceConfig{Retention: timestamp.DurationFromDays(1)}, &persistenceblobs.NamespaceReplicationConfig{ ActiveClusterName: cluster.TestAlternativeClusterName, Clusters: []string{ cluster.TestCurrentClusterName, cluster.TestAlternativeClusterName, }, }, afterResetVersion, cluster.GetTestClusterMetadata(true, true), ) // override namespace cache s.mockNamespaceCache.EXPECT().GetNamespaceByID(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() s.mockNamespaceCache.EXPECT().GetNamespace(gomock.Any()).Return(testNamespaceEntry, nil).AnyTimes() wid := "wId" wfType := "wfType" taskQueueName := "taskQueue" forkRunID := uuid.New().String() currRunID := uuid.New().String() newRunID := uuid.New().String() we := commonpb.WorkflowExecution{ WorkflowId: wid, RunId: newRunID, } forkGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: forkRunID, }, } timerFiredID := "timerID0" timerUnfiredID1 := "timerID1" timerUnfiredID2 := "timerID2" actIDCompleted1 := "actID0" actIDCompleted2 := "actID1" actIDRetry := "actID2" // not started, will reschedule actIDNotStarted := "actID3" // not started, will reschedule actIDStartedNoRetry := "actID4" // started, will fail signalName1 := "sig1" signalName2 := "sig2" signalName3 := "sig3" signalName4 := "sig4" forkBranchToken := []byte("forkBranchToken") forkExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: forkRunID, BranchToken: forkBranchToken, NextEventID: 35, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, } forkRepState := &persistenceblobs.ReplicationState{ CurrentVersion: beforeResetVersion, StartVersion: beforeResetVersion, LastWriteEventId: common.EmptyEventID, LastWriteVersion: common.EmptyVersion, LastReplicationInfo: map[string]*replicationspb.ReplicationInfo{}, } forkGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: forkExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} currGwmsRequest := &persistence.GetWorkflowExecutionRequest{ NamespaceID: namespaceID, Execution: commonpb.WorkflowExecution{ WorkflowId: wid, RunId: currRunID, }, } currExeInfo := &persistence.WorkflowExecutionInfo{ NamespaceID: namespaceID, WorkflowID: wid, WorkflowTypeName: wfType, TaskQueue: taskQueueName, RunID: currRunID, NextEventID: common.FirstEventID, State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleID: common.EmptyEventID, WorkflowTaskStartedID: common.EmptyEventID, } compareCurrExeInfo := copyWorkflowExecutionInfo(currExeInfo) currGwmsResponse := &persistence.GetWorkflowExecutionResponse{State: &persistence.WorkflowMutableState{ ExecutionInfo: currExeInfo, ExecutionStats: &persistenceblobs.ExecutionStats{}, ReplicationState: forkRepState, }} readHistoryReq := &persistence.ReadHistoryBranchRequest{ BranchToken: forkBranchToken, MinEventID: common.FirstEventID, MaxEventID: int64(30), PageSize: defaultHistoryPageSize, NextPageToken: nil, ShardID: &s.shardID, } taskQueue := &taskqueuepb.TaskQueue{ Name: taskQueueName, } readHistoryResp := &persistence.ReadHistoryBranchByBatchResponse{ NextPageToken: nil, Size: 1000, LastFirstEventID: int64(31), History: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: 1, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, Attributes: &historypb.HistoryEvent_WorkflowExecutionStartedEventAttributes{WorkflowExecutionStartedEventAttributes: &historypb.WorkflowExecutionStartedEventAttributes{ WorkflowType: &commonpb.WorkflowType{ Name: wfType, }, TaskQueue: taskQueue, Input: payloads.EncodeString("testInput"), WorkflowRunTimeout: timestamp.DurationPtr(100 * time.Second), WorkflowTaskTimeout: timestamp.DurationPtr(200 * time.Second), }}, }, { EventId: 2, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 3, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 2, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 4, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 2, StartedEventId: 3, }}, }, { EventId: 5, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_MARKER_RECORDED, Attributes: &historypb.HistoryEvent_MarkerRecordedEventAttributes{MarkerRecordedEventAttributes: &historypb.MarkerRecordedEventAttributes{ MarkerName: "Version", Details: map[string]*commonpb.Payloads{ "change-id": payloads.EncodeString("32283"), "version": payloads.EncodeInt(22), }, WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 6, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted1, ActivityType: &commonpb.ActivityType{ Name: "actType0", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, { EventId: 7, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerFiredID, StartToFireTimeout: timestamp.DurationPtr(2 * time.Second), WorkflowTaskCompletedEventId: 4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 8, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 6, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 9, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 6, StartedEventId: 8, }}, }, { EventId: 10, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 11, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 10, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 12, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 10, StartedEventId: 11, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 13, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_FIRED, Attributes: &historypb.HistoryEvent_TimerFiredEventAttributes{TimerFiredEventAttributes: &historypb.TimerFiredEventAttributes{ TimerId: timerFiredID, }}, }, { EventId: 14, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 15, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 14, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 16, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_WorkflowTaskCompletedEventAttributes{WorkflowTaskCompletedEventAttributes: &historypb.WorkflowTaskCompletedEventAttributes{ ScheduledEventId: 14, StartedEventId: 15, }}, }, { EventId: 17, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDRetry, ActivityType: &commonpb.ActivityType{ Name: "actType1", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, RetryPolicy: &commonpb.RetryPolicy{ InitialInterval: timestamp.DurationPtr(1 * time.Second), BackoffCoefficient: 0.2, MaximumAttempts: 10, MaximumInterval: timestamp.DurationPtr(1000 * time.Second), }, }}, }, { EventId: 18, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDNotStarted, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 19, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID1, StartToFireTimeout: timestamp.DurationPtr(4 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 20, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_TIMER_STARTED, Attributes: &historypb.HistoryEvent_TimerStartedEventAttributes{TimerStartedEventAttributes: &historypb.TimerStartedEventAttributes{ TimerId: timerUnfiredID2, StartToFireTimeout: timestamp.DurationPtr(8 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 21, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDCompleted2, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 22, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ ActivityId: actIDStartedNoRetry, ActivityType: &commonpb.ActivityType{ Name: "actType2", }, TaskQueue: taskQueue, ScheduleToCloseTimeout: timestamp.DurationPtr(1000 * time.Second), ScheduleToStartTimeout: timestamp.DurationPtr(2000 * time.Second), StartToCloseTimeout: timestamp.DurationPtr(3000 * time.Second), HeartbeatTimeout: timestamp.DurationPtr(4000 * time.Second), WorkflowTaskCompletedEventId: 16, }}, }, { EventId: 23, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName3, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 24, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 21, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 25, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName4, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 26, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED, Attributes: &historypb.HistoryEvent_ActivityTaskStartedEventAttributes{ActivityTaskStartedEventAttributes: &historypb.ActivityTaskStartedEventAttributes{ ScheduledEventId: 22, }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 27, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED, Attributes: &historypb.HistoryEvent_ActivityTaskCompletedEventAttributes{ActivityTaskCompletedEventAttributes: &historypb.ActivityTaskCompletedEventAttributes{ ScheduledEventId: 21, StartedEventId: 24, }}, }, { EventId: 28, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: 29, Version: beforeResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED, Attributes: &historypb.HistoryEvent_WorkflowTaskStartedEventAttributes{WorkflowTaskStartedEventAttributes: &historypb.WorkflowTaskStartedEventAttributes{ ScheduledEventId: 28, }}, }, }, }, // ///////////// reset point///////////// }, } eid := int64(0) eventTime := time.Unix(0, 1000).UTC() for _, be := range readHistoryResp.History { for _, e := range be.Events { eid++ if e.GetEventId() != eid { s.Fail(fmt.Sprintf("inconintous eventID: %v, %v", eid, e.GetEventId())) } e.EventTime = &eventTime } } newBranchToken := []byte("newBranch") forkReq := &persistence.ForkHistoryBranchRequest{ ForkBranchToken: forkBranchToken, ForkNodeID: 30, Info: persistence.BuildHistoryGarbageCleanupInfo(namespaceID, wid, newRunID), ShardID: &s.shardID, } forkResp := &persistence.ForkHistoryBranchResponse{ NewBranchToken: newBranchToken, } historyAfterReset := &historypb.History{ Events: []*historypb.HistoryEvent{ { EventId: 30, Version: afterResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED, Attributes: &historypb.HistoryEvent_WorkflowTaskFailedEventAttributes{WorkflowTaskFailedEventAttributes: &historypb.WorkflowTaskFailedEventAttributes{ ScheduledEventId: int64(28), StartedEventId: int64(29), Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_RESET_WORKFLOW, Identity: identityHistoryService, Failure: failure.NewResetWorkflowFailure("resetWFtest", nil), BaseRunId: forkRunID, NewRunId: newRunID, ForkEventVersion: beforeResetVersion, }}, }, { EventId: 31, Version: afterResetVersion, EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED, Attributes: &historypb.HistoryEvent_ActivityTaskFailedEventAttributes{ActivityTaskFailedEventAttributes: &historypb.ActivityTaskFailedEventAttributes{ Failure: failure.NewResetWorkflowFailure("resetWF", nil), ScheduledEventId: 22, StartedEventId: 26, Identity: identityHistoryService, }}, }, { EventId: 32, Version: afterResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName1, }}, }, { EventId: 33, Version: afterResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, Attributes: &historypb.HistoryEvent_WorkflowExecutionSignaledEventAttributes{WorkflowExecutionSignaledEventAttributes: &historypb.WorkflowExecutionSignaledEventAttributes{ SignalName: signalName2, }}, }, { EventId: 34, Version: afterResetVersion, EventType: enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, Attributes: &historypb.HistoryEvent_WorkflowTaskScheduledEventAttributes{WorkflowTaskScheduledEventAttributes: &historypb.WorkflowTaskScheduledEventAttributes{ TaskQueue: taskQueue, StartToCloseTimeout: timestamp.DurationPtr(100 * time.Second), }}, }, }, } appendV2Resp := &persistence.AppendHistoryNodesResponse{ Size: 200, } request := &historyservice.ReplicateEventsRequest{ SourceCluster: "standby", NamespaceId: namespaceID, WorkflowExecution: &we, FirstEventId: 30, NextEventId: 35, History: historyAfterReset, } s.mockExecutionMgr.On("GetWorkflowExecution", forkGwmsRequest).Return(forkGwmsResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", currGwmsRequest).Return(currGwmsResponse, nil).Once() s.mockHistoryV2Mgr.On("ReadHistoryBranchByBatch", readHistoryReq).Return(readHistoryResp, nil).Once() s.mockHistoryV2Mgr.On("ForkHistoryBranch", forkReq).Return(forkResp, nil).Once() s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything).Return(appendV2Resp, nil).Once() s.mockExecutionMgr.On("ResetWorkflowExecution", mock.Anything).Return(nil).Once() err := s.resetor.ApplyResetEvent(context.Background(), request, namespaceID, wid, currRunID) s.Nil(err) // verify historyEvent: 5 events to append // 1. workflowTaskFailed // 2. activityTaskFailed // 3. signal 1 // 4. signal 2 // 5. workflowTaskScheduled calls := s.mockHistoryV2Mgr.Calls s.Equal(3, len(calls)) appendCall := calls[2] s.Equal("AppendHistoryNodes", appendCall.Method) appendReq, ok := appendCall.Arguments[0].(*persistence.AppendHistoryNodesRequest) s.Equal(true, ok) s.Equal(newBranchToken, appendReq.BranchToken) s.Equal(false, appendReq.IsNewBranch) s.Equal(5, len(appendReq.Events)) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED, enumspb.EventType(appendReq.Events[0].GetEventType())) s.Equal(enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED, enumspb.EventType(appendReq.Events[1].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[2].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED, enumspb.EventType(appendReq.Events[3].GetEventType())) s.Equal(enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED, enumspb.EventType(appendReq.Events[4].GetEventType())) s.Equal(int64(30), appendReq.Events[0].GetEventId()) s.Equal(int64(31), appendReq.Events[1].GetEventId()) s.Equal(int64(32), appendReq.Events[2].GetEventId()) s.Equal(int64(33), appendReq.Events[3].GetEventId()) s.Equal(int64(34), appendReq.Events[4].GetEventId()) s.Equal(enumspb.EncodingType(enumspb.EncodingType_value[s.config.EventEncodingType(namespaceID)]), appendReq.Encoding) // verify executionManager request calls = s.mockExecutionMgr.Calls s.Equal(3, len(calls)) resetCall := calls[2] s.Equal("ResetWorkflowExecution", resetCall.Method) resetReq, ok := resetCall.Arguments[0].(*persistence.ResetWorkflowExecutionRequest) s.Equal(true, ok) s.Equal(false, resetReq.CurrentWorkflowMutation != nil) s.Equal(compareCurrExeInfo.RunID, resetReq.CurrentRunID) s.Equal(compareCurrExeInfo.NextEventID, resetReq.CurrentRunNextEventID) s.Equal("wfType", resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTypeName) s.True(len(resetReq.NewWorkflowSnapshot.ExecutionInfo.RunID) > 0) s.Equal([]byte(newBranchToken), resetReq.NewWorkflowSnapshot.ExecutionInfo.BranchToken) s.Equal(int64(34), resetReq.NewWorkflowSnapshot.ExecutionInfo.WorkflowTaskScheduleID) s.Equal(int64(35), resetReq.NewWorkflowSnapshot.ExecutionInfo.NextEventID) s.Equal(3, len(resetReq.NewWorkflowSnapshot.TransferTasks)) s.Equal(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_TRANSFER_WORKFLOW_TASK, resetReq.NewWorkflowSnapshot.TransferTasks[2].GetType()) // WF timeout task, user timer, activity timeout timer, activity retry timer s.Equal(3, len(resetReq.NewWorkflowSnapshot.TimerTasks)) s.Equal(enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[0].GetType()) s.Equal(enumsspb.TASK_TYPE_USER_TIMER, resetReq.NewWorkflowSnapshot.TimerTasks[1].GetType()) s.Equal(enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT, resetReq.NewWorkflowSnapshot.TimerTasks[2].GetType()) s.Equal(2, len(resetReq.NewWorkflowSnapshot.TimerInfos)) s.assertTimerIDs([]string{timerUnfiredID1, timerUnfiredID2}, resetReq.NewWorkflowSnapshot.TimerInfos) s.Equal(2, len(resetReq.NewWorkflowSnapshot.ActivityInfos)) s.assertActivityIDs([]string{actIDRetry, actIDNotStarted}, resetReq.NewWorkflowSnapshot.ActivityInfos) compareRepState := copyReplicationState(forkRepState) compareRepState.StartVersion = beforeResetVersion compareRepState.CurrentVersion = afterResetVersion compareRepState.LastWriteEventId = 34 compareRepState.LastWriteVersion = afterResetVersion compareRepState.LastReplicationInfo = map[string]*replicationspb.ReplicationInfo{ "standby": { LastEventId: 29, Version: beforeResetVersion, }, } s.Equal(compareRepState, resetReq.NewWorkflowSnapshot.ReplicationState) s.Equal(0, len(resetReq.NewWorkflowSnapshot.ReplicationTasks)) // not supported feature s.Empty(resetReq.NewWorkflowSnapshot.ChildExecutionInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalInfos) s.Empty(resetReq.NewWorkflowSnapshot.SignalRequestedIDs) s.Equal(0, len(resetReq.NewWorkflowSnapshot.RequestCancelInfos)) } func TestFindAutoResetPoint(t *testing.T) { timeSource := clock.NewRealTimeSource() // case 1: nil _, pt := FindAutoResetPoint(timeSource, nil, nil) assert.Nil(t, pt) // case 2: empty _, pt = FindAutoResetPoint(timeSource, &namespacepb.BadBinaries{}, &workflowpb.ResetPoints{}) assert.Nil(t, pt) pt0 := &workflowpb.ResetPointInfo{ BinaryChecksum: "abc", Resettable: true, } pt1 := &workflowpb.ResetPointInfo{ BinaryChecksum: "def", Resettable: true, } pt3 := &workflowpb.ResetPointInfo{ BinaryChecksum: "ghi", Resettable: false, } expiredNow := time.Now().UTC().Add(-1 * time.Hour) notExpiredNow := time.Now().UTC().Add(time.Hour) pt4 := &workflowpb.ResetPointInfo{ BinaryChecksum: "expired", Resettable: true, ExpireTime: &expiredNow, } pt5 := &workflowpb.ResetPointInfo{ BinaryChecksum: "notExpired", Resettable: true, ExpireTime: &notExpiredNow, } // case 3: two intersection _, pt = FindAutoResetPoint(timeSource, &namespacepb.BadBinaries{ Binaries: map[string]*namespacepb.BadBinaryInfo{ "abc": {}, "def": {}, }, }, &workflowpb.ResetPoints{ Points: []*workflowpb.ResetPointInfo{ pt0, pt1, pt3, }, }) assert.Equal(t, pt.String(), pt0.String()) // case 4: one intersection _, pt = FindAutoResetPoint(timeSource, &namespacepb.BadBinaries{ Binaries: map[string]*namespacepb.BadBinaryInfo{ "none": {}, "def": {}, "expired": {}, }, }, &workflowpb.ResetPoints{ Points: []*workflowpb.ResetPointInfo{ pt4, pt0, pt1, pt3, }, }) assert.Equal(t, pt.String(), pt1.String()) // case 4: no intersection _, pt = FindAutoResetPoint(timeSource, &namespacepb.BadBinaries{ Binaries: map[string]*namespacepb.BadBinaryInfo{ "none1": {}, "none2": {}, }, }, &workflowpb.ResetPoints{ Points: []*workflowpb.ResetPointInfo{ pt0, pt1, pt3, }, }) assert.Nil(t, pt) // case 5: not resettable _, pt = FindAutoResetPoint(timeSource, &namespacepb.BadBinaries{ Binaries: map[string]*namespacepb.BadBinaryInfo{ "none1": {}, "ghi": {}, }, }, &workflowpb.ResetPoints{ Points: []*workflowpb.ResetPointInfo{ pt0, pt1, pt3, }, }) assert.Nil(t, pt) // case 6: one intersection of expired _, pt = FindAutoResetPoint(timeSource, &namespacepb.BadBinaries{ Binaries: map[string]*namespacepb.BadBinaryInfo{ "none": {}, "expired": {}, }, }, &workflowpb.ResetPoints{ Points: []*workflowpb.ResetPointInfo{ pt0, pt1, pt3, pt4, pt5, }, }) assert.Nil(t, pt) // case 7: one intersection of not expired _, pt = FindAutoResetPoint(timeSource, &namespacepb.BadBinaries{ Binaries: map[string]*namespacepb.BadBinaryInfo{ "none": {}, "notExpired": {}, }, }, &workflowpb.ResetPoints{ Points: []*workflowpb.ResetPointInfo{ pt0, pt1, pt3, pt4, pt5, }, }) assert.Equal(t, pt.String(), pt5.String()) }
1
10,116
If there any other tests, I would make it optional there also.
temporalio-temporal
go
@@ -54,6 +54,7 @@ Workshops::Application.routes.draw do end match '/watch' => 'high_voltage/pages#show', as: :watch, id: 'watch' + match '/my-accounts' => 'high_voltage/pages#show', as: :watch, id: 'my-accounts' match '/directions' => "high_voltage/pages#show", as: :directions, id: "directions" match '/group-training' => "high_voltage/pages#show", as: :group_training, id: "group-training"
1
Workshops::Application.routes.draw do mount RailsAdmin::Engine => '/new_admin', :as => 'rails_admin' root to: 'topics#index' match '/pages/tmux' => redirect("/products/4-humans-present-tmux") resource :session, controller: 'sessions' resources :sections, only: [:show] do resources :registrations, only: [:index, :new, :create] resources :redemptions, only: [:new] end resources :courses, only: [:index, :show] do resources :follow_ups, only: [:create] end resources :products, only: [:show] do resources :redemptions, only: [:new] resources :purchases, only: [:new, :create, :show] do resources :videos, only: [:show] member do get 'paypal' get 'watch' end end end resources :payments, only: [:create] resource :shopify, controller: 'shopify' do member do post 'order_paid' end end resources :topics, only: :index resources :topics, only: :show, as: :full_topic match '/admin' => 'admin/courses#index', as: :admin namespace :admin do resources :courses do resource :position resources :sections resources :follow_ups resources :questions, only: [:destroy] end resources :coupons resources :audiences resources :sections do resources :registrations end resources :teachers, except: :destroy resources :products, except: :destroy resources :purchases, only: :index end match '/watch' => 'high_voltage/pages#show', as: :watch, id: 'watch' match '/directions' => "high_voltage/pages#show", as: :directions, id: "directions" match '/group-training' => "high_voltage/pages#show", as: :group_training, id: "group-training" match '/humans-present/oss' => "high_voltage/pages#show", as: :humans_present_oss, id: "humans-present-oss" match '/backbone-js-on-rails' => redirect("/products/1-backbone-js-on-rails") match '/rubyist-booster-shot' => "high_voltage/pages#show", as: :rubyist_booster_shot, id: "rubyist-booster-shot" match '/my_account' => 'users#update', as: 'my_account', via: :put match '/my_account' => 'users#edit', as: 'my_account' match '/sign_up' => 'users#new', as: 'sign_up' match '/sign_in' => 'sessions#new', as: 'sign_in' mount Split::Dashboard, at: 'split' get ':id' => 'topics#show', as: :topic end
1
6,373
This can be removed too, right?
thoughtbot-upcase
rb
@@ -248,7 +248,7 @@ public class SettingsExporter { // Write outgoing server settings - ServerSettings outgoing = Transport.decodeTransportUri(account.getTransportUri()); + ServerSettings outgoing = Transport.decodeTransportUri(account.getTransportUri(0)); serializer.startTag(null, OUTGOING_SERVER_ELEMENT); serializer.attribute(null, TYPE_ATTRIBUTE, outgoing.type);
1
package com.fsck.k9.preferences; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.Map.Entry; import org.xmlpull.v1.XmlSerializer; import android.content.Context; import android.content.SharedPreferences; import android.os.Environment; import android.util.Log; import android.util.Xml; import com.fsck.k9.Account; import com.fsck.k9.K9; import com.fsck.k9.Preferences; import com.fsck.k9.helper.Utility; import com.fsck.k9.mail.Store; import com.fsck.k9.mail.ServerSettings; import com.fsck.k9.mail.Transport; import com.fsck.k9.preferences.Settings.InvalidSettingValueException; import com.fsck.k9.preferences.Settings.SettingsDescription; public class SettingsExporter { private static final String EXPORT_FILENAME = "settings.k9s"; /** * File format version number. * * <p> * Increment this if you need to change the structure of the settings file. When you do this * remember that we also have to be able to handle old file formats. So have fun adding support * for that to {@link SettingsImporter} :) * </p> */ public static final int FILE_FORMAT_VERSION = 1; public static final String ROOT_ELEMENT = "k9settings"; public static final String VERSION_ATTRIBUTE = "version"; public static final String FILE_FORMAT_ATTRIBUTE = "format"; public static final String GLOBAL_ELEMENT = "global"; public static final String SETTINGS_ELEMENT = "settings"; public static final String ACCOUNTS_ELEMENT = "accounts"; public static final String ACCOUNT_ELEMENT = "account"; public static final String UUID_ATTRIBUTE = "uuid"; public static final String INCOMING_SERVER_ELEMENT = "incoming-server"; public static final String OUTGOING_SERVER_ELEMENT = "outgoing-server"; public static final String TYPE_ATTRIBUTE = "type"; public static final String HOST_ELEMENT = "host"; public static final String PORT_ELEMENT = "port"; public static final String CONNECTION_SECURITY_ELEMENT = "connection-security"; public static final String AUTHENTICATION_TYPE_ELEMENT = "authentication-type"; public static final String USERNAME_ELEMENT = "username"; public static final String PASSWORD_ELEMENT = "password"; public static final String EXTRA_ELEMENT = "extra"; public static final String IDENTITIES_ELEMENT = "identities"; public static final String IDENTITY_ELEMENT = "identity"; public static final String FOLDERS_ELEMENT = "folders"; public static final String FOLDER_ELEMENT = "folder"; public static final String NAME_ATTRIBUTE = "name"; public static final String VALUE_ELEMENT = "value"; public static final String KEY_ATTRIBUTE = "key"; public static final String NAME_ELEMENT = "name"; public static final String EMAIL_ELEMENT = "email"; public static final String DESCRIPTION_ELEMENT = "description"; public static String exportToFile(Context context, boolean includeGlobals, Set<String> accountUuids) throws SettingsImportExportException { OutputStream os = null; String filename = null; try { File dir = new File(Environment.getExternalStorageDirectory() + File.separator + context.getPackageName()); dir.mkdirs(); File file = Utility.createUniqueFile(dir, EXPORT_FILENAME); filename = file.getAbsolutePath(); os = new FileOutputStream(filename); exportPreferences(context, os, includeGlobals, accountUuids); // If all went well, we return the name of the file just written. return filename; } catch (Exception e) { throw new SettingsImportExportException(e); } finally { if (os != null) { try { os.close(); } catch (IOException ioe) { Log.w(K9.LOG_TAG, "Couldn't close exported settings file: " + filename); } } } } public static void exportPreferences(Context context, OutputStream os, boolean includeGlobals, Set<String> accountUuids) throws SettingsImportExportException { try { XmlSerializer serializer = Xml.newSerializer(); serializer.setOutput(os, "UTF-8"); serializer.startDocument(null, Boolean.TRUE); // Output with indentation serializer.setFeature("http://xmlpull.org/v1/doc/features.html#indent-output", true); serializer.startTag(null, ROOT_ELEMENT); serializer.attribute(null, VERSION_ATTRIBUTE, Integer.toString(Settings.VERSION)); serializer.attribute(null, FILE_FORMAT_ATTRIBUTE, Integer.toString(FILE_FORMAT_VERSION)); Log.i(K9.LOG_TAG, "Exporting preferences"); Preferences preferences = Preferences.getPreferences(context); SharedPreferences storage = preferences.getPreferences(); Set<String> exportAccounts; if (accountUuids == null) { Account[] accounts = preferences.getAccounts(); exportAccounts = new HashSet<String>(); for (Account account : accounts) { exportAccounts.add(account.getUuid()); } } else { exportAccounts = accountUuids; } Map<String, Object> prefs = new TreeMap<String, Object>(storage.getAll()); if (includeGlobals) { serializer.startTag(null, GLOBAL_ELEMENT); writeSettings(serializer, prefs); serializer.endTag(null, GLOBAL_ELEMENT); } serializer.startTag(null, ACCOUNTS_ELEMENT); for (String accountUuid : exportAccounts) { Account account = preferences.getAccount(accountUuid); writeAccount(serializer, account, prefs); } serializer.endTag(null, ACCOUNTS_ELEMENT); serializer.endTag(null, ROOT_ELEMENT); serializer.endDocument(); serializer.flush(); } catch (Exception e) { throw new SettingsImportExportException(e.getLocalizedMessage(), e); } } private static void writeSettings(XmlSerializer serializer, Map<String, Object> prefs) throws IOException { for (Entry<String, TreeMap<Integer, SettingsDescription>> versionedSetting : GlobalSettings.SETTINGS.entrySet()) { String key = versionedSetting.getKey(); String valueString = (String) prefs.get(key); TreeMap<Integer, SettingsDescription> versions = versionedSetting.getValue(); Integer highestVersion = versions.lastKey(); SettingsDescription setting = versions.get(highestVersion); if (setting == null) { // Setting was removed. continue; } if (valueString != null) { try { Object value = setting.fromString(valueString); String outputValue = setting.toPrettyString(value); writeKeyValue(serializer, key, outputValue); } catch (InvalidSettingValueException e) { Log.w(K9.LOG_TAG, "Global setting \"" + key + "\" has invalid value \"" + valueString + "\" in preference storage. This shouldn't happen!"); } } else { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "Couldn't find key \"" + key + "\" in preference storage." + "Using default value."); } Object value = setting.getDefaultValue(); String outputValue = setting.toPrettyString(value); writeKeyValue(serializer, key, outputValue); } } } private static void writeAccount(XmlSerializer serializer, Account account, Map<String, Object> prefs) throws IOException { Set<Integer> identities = new HashSet<Integer>(); Set<String> folders = new HashSet<String>(); String accountUuid = account.getUuid(); serializer.startTag(null, ACCOUNT_ELEMENT); serializer.attribute(null, UUID_ATTRIBUTE, accountUuid); String name = (String) prefs.get(accountUuid + "." + Account.ACCOUNT_DESCRIPTION_KEY); if (name != null) { serializer.startTag(null, NAME_ELEMENT); serializer.text(name); serializer.endTag(null, NAME_ELEMENT); } // Write incoming server settings ServerSettings incoming = Store.decodeStoreUri(account.getStoreUri()); serializer.startTag(null, INCOMING_SERVER_ELEMENT); serializer.attribute(null, TYPE_ATTRIBUTE, incoming.type); writeElement(serializer, HOST_ELEMENT, incoming.host); if (incoming.port != -1) { writeElement(serializer, PORT_ELEMENT, Integer.toString(incoming.port)); } writeElement(serializer, CONNECTION_SECURITY_ELEMENT, incoming.connectionSecurity.name()); writeElement(serializer, AUTHENTICATION_TYPE_ELEMENT, incoming.authenticationType); writeElement(serializer, USERNAME_ELEMENT, incoming.username); // XXX For now we don't export the password //writeElement(serializer, PASSWORD_ELEMENT, incoming.password); Map<String, String> extras = incoming.getExtra(); if (extras != null && extras.size() > 0) { serializer.startTag(null, EXTRA_ELEMENT); for (Entry<String, String> extra : extras.entrySet()) { writeKeyValue(serializer, extra.getKey(), extra.getValue()); } serializer.endTag(null, EXTRA_ELEMENT); } serializer.endTag(null, INCOMING_SERVER_ELEMENT); // Write outgoing server settings ServerSettings outgoing = Transport.decodeTransportUri(account.getTransportUri()); serializer.startTag(null, OUTGOING_SERVER_ELEMENT); serializer.attribute(null, TYPE_ATTRIBUTE, outgoing.type); writeElement(serializer, HOST_ELEMENT, outgoing.host); if (outgoing.port != -1) { writeElement(serializer, PORT_ELEMENT, Integer.toString(outgoing.port)); } writeElement(serializer, CONNECTION_SECURITY_ELEMENT, outgoing.connectionSecurity.name()); writeElement(serializer, AUTHENTICATION_TYPE_ELEMENT, outgoing.authenticationType); writeElement(serializer, USERNAME_ELEMENT, outgoing.username); // XXX For now we don't export the password //writeElement(serializer, PASSWORD_ELEMENT, outgoing.password); extras = outgoing.getExtra(); if (extras != null && extras.size() > 0) { serializer.startTag(null, EXTRA_ELEMENT); for (Entry<String, String> extra : extras.entrySet()) { writeKeyValue(serializer, extra.getKey(), extra.getValue()); } serializer.endTag(null, EXTRA_ELEMENT); } serializer.endTag(null, OUTGOING_SERVER_ELEMENT); // Write account settings serializer.startTag(null, SETTINGS_ELEMENT); for (Map.Entry<String, Object> entry : prefs.entrySet()) { String key = entry.getKey(); String valueString = entry.getValue().toString(); String[] comps = key.split("\\."); if (comps.length < 2) { // Skip global settings continue; } String keyUuid = comps[0]; String secondPart = comps[1]; if (!keyUuid.equals(accountUuid)) { // Setting doesn't belong to the account we're currently writing. continue; } String keyPart; if (comps.length >= 3) { String thirdPart = comps[2]; if (Account.IDENTITY_DESCRIPTION_KEY.equals(secondPart)) { // This is an identity key. Save identity index for later... try { identities.add(Integer.parseInt(thirdPart)); } catch (NumberFormatException e) { /* ignore */ } // ... but don't write it now. continue; } if (FolderSettings.SETTINGS.containsKey(thirdPart)) { // This is a folder key. Save folder name for later... folders.add(secondPart); // ... but don't write it now. continue; } // Strip account UUID from key keyPart = key.substring(comps[0].length() + 1); } else { keyPart = secondPart; } TreeMap<Integer, SettingsDescription> versionedSetting = AccountSettings.SETTINGS.get(keyPart); if (versionedSetting != null) { Integer highestVersion = versionedSetting.lastKey(); SettingsDescription setting = versionedSetting.get(highestVersion); if (setting != null) { // Only export account settings that can be found in AccountSettings.SETTINGS try { Object value = setting.fromString(valueString); String pretty = setting.toPrettyString(value); writeKeyValue(serializer, keyPart, pretty); } catch (InvalidSettingValueException e) { Log.w(K9.LOG_TAG, "Account setting \"" + keyPart + "\" (" + account.getDescription() + ") has invalid value \"" + valueString + "\" in preference storage. This shouldn't happen!"); } } } } serializer.endTag(null, SETTINGS_ELEMENT); if (identities.size() > 0) { serializer.startTag(null, IDENTITIES_ELEMENT); // Sort identity indices (that's why we store them as Integers) List<Integer> sortedIdentities = new ArrayList<Integer>(identities); Collections.sort(sortedIdentities); for (Integer identityIndex : sortedIdentities) { writeIdentity(serializer, accountUuid, identityIndex.toString(), prefs); } serializer.endTag(null, IDENTITIES_ELEMENT); } if (folders.size() > 0) { serializer.startTag(null, FOLDERS_ELEMENT); for (String folder : folders) { writeFolder(serializer, accountUuid, folder, prefs); } serializer.endTag(null, FOLDERS_ELEMENT); } serializer.endTag(null, ACCOUNT_ELEMENT); } private static void writeIdentity(XmlSerializer serializer, String accountUuid, String identity, Map<String, Object> prefs) throws IOException { serializer.startTag(null, IDENTITY_ELEMENT); String prefix = accountUuid + "."; String suffix = "." + identity; // Write name belonging to the identity String name = (String) prefs.get(prefix + Account.IDENTITY_NAME_KEY + suffix); serializer.startTag(null, NAME_ELEMENT); serializer.text(name); serializer.endTag(null, NAME_ELEMENT); // Write email address belonging to the identity String email = (String) prefs.get(prefix + Account.IDENTITY_EMAIL_KEY + suffix); serializer.startTag(null, EMAIL_ELEMENT); serializer.text(email); serializer.endTag(null, EMAIL_ELEMENT); // Write identity description String description = (String) prefs.get(prefix + Account.IDENTITY_DESCRIPTION_KEY + suffix); if (description != null) { serializer.startTag(null, DESCRIPTION_ELEMENT); serializer.text(description); serializer.endTag(null, DESCRIPTION_ELEMENT); } // Write identity settings serializer.startTag(null, SETTINGS_ELEMENT); for (Map.Entry<String, Object> entry : prefs.entrySet()) { String key = entry.getKey(); String valueString = entry.getValue().toString(); String[] comps = key.split("\\."); if (comps.length < 3) { // Skip non-identity config entries continue; } String keyUuid = comps[0]; String identityKey = comps[1]; String identityIndex = comps[2]; if (!keyUuid.equals(accountUuid) || !identityIndex.equals(identity)) { // Skip entries that belong to another identity continue; } TreeMap<Integer, SettingsDescription> versionedSetting = IdentitySettings.SETTINGS.get(identityKey); if (versionedSetting != null) { Integer highestVersion = versionedSetting.lastKey(); SettingsDescription setting = versionedSetting.get(highestVersion); if (setting != null) { // Only write settings that have an entry in IdentitySettings.SETTINGS try { Object value = setting.fromString(valueString); String outputValue = setting.toPrettyString(value); writeKeyValue(serializer, identityKey, outputValue); } catch (InvalidSettingValueException e) { Log.w(K9.LOG_TAG, "Identity setting \"" + identityKey + "\" has invalid value \"" + valueString + "\" in preference storage. This shouldn't happen!"); } } } } serializer.endTag(null, SETTINGS_ELEMENT); serializer.endTag(null, IDENTITY_ELEMENT); } private static void writeFolder(XmlSerializer serializer, String accountUuid, String folder, Map<String, Object> prefs) throws IOException { serializer.startTag(null, FOLDER_ELEMENT); serializer.attribute(null, NAME_ATTRIBUTE, folder); // Write folder settings for (Map.Entry<String, Object> entry : prefs.entrySet()) { String key = entry.getKey(); String valueString = entry.getValue().toString(); String[] comps = key.split("\\."); if (comps.length < 3) { // Skip non-folder config entries continue; } String keyUuid = comps[0]; String folderName = comps[1]; String folderKey = comps[2]; if (!keyUuid.equals(accountUuid) || !folderName.equals(folder)) { // Skip entries that belong to another folder continue; } TreeMap<Integer, SettingsDescription> versionedSetting = FolderSettings.SETTINGS.get(folderKey); if (versionedSetting != null) { Integer highestVersion = versionedSetting.lastKey(); SettingsDescription setting = versionedSetting.get(highestVersion); if (setting != null) { // Only write settings that have an entry in FolderSettings.SETTINGS try { Object value = setting.fromString(valueString); String outputValue = setting.toPrettyString(value); writeKeyValue(serializer, folderKey, outputValue); } catch (InvalidSettingValueException e) { Log.w(K9.LOG_TAG, "Folder setting \"" + folderKey + "\" has invalid value \"" + valueString + "\" in preference storage. This shouldn't happen!"); } } } } serializer.endTag(null, FOLDER_ELEMENT); } private static void writeElement(XmlSerializer serializer, String elementName, String value) throws IllegalArgumentException, IllegalStateException, IOException { if (value != null) { serializer.startTag(null, elementName); serializer.text(value); serializer.endTag(null, elementName); } } private static void writeKeyValue(XmlSerializer serializer, String key, String value) throws IllegalArgumentException, IllegalStateException, IOException { serializer.startTag(null, VALUE_ELEMENT); serializer.attribute(null, KEY_ATTRIBUTE, key); if (value != null) { serializer.text(value); } serializer.endTag(null, VALUE_ELEMENT); } }
1
11,905
If you're going to allow people to specify multiple SMTP servers, you should also properly export/import them.
k9mail-k-9
java
@@ -28,5 +28,14 @@ namespace Datadog.Trace.RuntimeMetrics public const string CpuUserTime = "runtime.dotnet.cpu.user"; public const string CpuSystemTime = "runtime.dotnet.cpu.system"; public const string CpuPercentage = "runtime.dotnet.cpu.percent"; + + public const string CurrentRequests = "runtime.dotnet.aspnetcore.requests.current"; + public const string FailedRequests = "runtime.dotnet.aspnetcore.requests.failed"; + public const string TotalRequests = "runtime.dotnet.aspnetcore.requests.total"; + public const string RequestQueueLength = "runtime.dotnet.aspnetcore.requests.queue_length"; + + public const string CurrentConnections = "runtime.dotnet.aspnetcore.connections.current"; + public const string ConnectionQueueLength = "runtime.dotnet.aspnetcore.connections.queue_length"; + public const string TotalConnections = "runtime.dotnet.aspnetcore.connections.total"; } }
1
namespace Datadog.Trace.RuntimeMetrics { internal static class MetricsNames { public const string ExceptionsCount = "runtime.dotnet.exceptions.count"; public const string Gen0CollectionsCount = "runtime.dotnet.gc.count.gen0"; public const string Gen1CollectionsCount = "runtime.dotnet.gc.count.gen1"; public const string Gen2CollectionsCount = "runtime.dotnet.gc.count.gen2"; public const string GcPauseTime = "runtime.dotnet.gc.pause_time"; public const string GcMemoryLoad = "runtime.dotnet.gc.memory_load"; public const string Gen0HeapSize = "runtime.dotnet.gc.size.gen0"; public const string Gen1HeapSize = "runtime.dotnet.gc.size.gen1"; public const string Gen2HeapSize = "runtime.dotnet.gc.size.gen2"; public const string LohSize = "runtime.dotnet.gc.size.loh"; public const string ContentionTime = "runtime.dotnet.threads.contention_time"; public const string ContentionCount = "runtime.dotnet.threads.contention_count"; public const string ThreadPoolWorkersCount = "runtime.dotnet.threads.workers_count"; public const string ThreadsCount = "runtime.dotnet.threads.count"; public const string CommittedMemory = "runtime.dotnet.mem.committed"; public const string CpuUserTime = "runtime.dotnet.cpu.user"; public const string CpuSystemTime = "runtime.dotnet.cpu.system"; public const string CpuPercentage = "runtime.dotnet.cpu.percent"; } }
1
18,528
nit: Can we prefix these variables with `AspNetCore`?
DataDog-dd-trace-dotnet
.cs
@@ -47,12 +47,18 @@ describe "transpiling YAML plans" do } PLAN - it 'transpiles a yaml plan' do + it 'transpiles a YAML plan from a path' do expect { run_cli(%W[plan convert #{plan_path}]) }.to output(output_plan).to_stdout end + it 'transpiles a YAML plan from a plan name' do + expect { + run_cli(%W[plan convert yaml::conversion -m #{modulepath}]) + }.to output(output_plan).to_stdout + end + it 'plan show output is the same for the original plan and converted plan', ssh: true do Dir.mktmpdir do |tmpdir| FileUtils.mkdir_p(File.join(tmpdir, 'plans'))
1
# frozen_string_literal: true require 'spec_helper' require 'bolt/pal/yaml_plan/transpiler' require 'bolt_spec/files' require 'bolt_spec/integration' describe "transpiling YAML plans" do include BoltSpec::Files include BoltSpec::Integration after(:each) { Puppet.settings.send(:clear_everything_for_tests) } let(:modulepath) { fixtures_path('modules') } let(:yaml_path) { File.join(modulepath, 'yaml', 'plans') } let(:plan_path) { File.join(yaml_path, 'conversion.yaml') } let(:invalid_plan_path) { File.join(yaml_path, 'invalid.yaml') } let(:output_plan) { <<~PLAN } # A yaml plan for testing plan conversion # WARNING: This is an autogenerated plan. It might not behave as expected. # @param targets The targets to run the plan on # @param message A string to print plan yaml::conversion( TargetSpec $targets, String $message = 'hello world' ) { $sample = run_task('sample', $targets, {'message' => $message}) apply_prep($targets) apply($targets) { package { 'nginx': } -> file { '/etc/nginx/html/index.html': content => "Hello world!", } -> service { 'nginx': } } $eval_output = with() || { # TODO: Can blocks handle comments? $list = $sample.targets.map |$t| { notice($t) $t } $list.map |$l| {$l.name} } return $eval_output } PLAN it 'transpiles a yaml plan' do expect { run_cli(%W[plan convert #{plan_path}]) }.to output(output_plan).to_stdout end it 'plan show output is the same for the original plan and converted plan', ssh: true do Dir.mktmpdir do |tmpdir| FileUtils.mkdir_p(File.join(tmpdir, 'plans')) File.write(File.join(tmpdir, 'plans', 'conversion.pp'), output_plan) File.write(File.join(tmpdir, 'bolt-project.yaml'), { 'name' => 'yaml' }.to_yaml) puppet_show = JSON.parse(run_cli(%W[plan show yaml::conversion --project #{tmpdir}])) yaml_show = JSON.parse(run_cli(%W[plan show yaml::conversion -m #{modulepath}])) # Don't compare moduledirs [puppet_show, yaml_show].each do |plan| plan.delete_if { |k, _v| k == 'module_dir' } end # Remove the conversion warning suffix = "\nWARNING: This is an autogenerated plan. It might not behave as expected." puppet_show['description'].delete_suffix!(suffix) # Account for string quoting yaml_show['parameters']['message']['default_value'] = "'#{yaml_show['parameters']['message']['default_value']}'" expect(puppet_show).to eq(yaml_show) end end it 'errors when loading the plan raises an error' do expect { run_cli(%W[plan convert #{invalid_plan_path}]) } .to raise_error(Bolt::Error, /did not find expected '-' indicator/) end end
1
18,018
Can this also include a quick test for transpiling by name?
puppetlabs-bolt
rb
@@ -59,10 +59,11 @@ func (client *clientRest) NodeRegister(proposal dto_discovery.ServiceProposal) ( return } -func (client *clientRest) NodeSendStats(nodeKey string, sessionList []dto.SessionStatsDeprecated) (err error) { +func (client *clientRest) NodeSendStats(nodeKey string) (err error) { response, err := client.doPostRequest("node_send_stats", dto.NodeStatsRequest{ - NodeKey: nodeKey, - Sessions: sessionList, + NodeKey: nodeKey, + // TODO: remove this struct in favor of `SessionStats` + Sessions: []dto.SessionStats{}, }) if err == nil { defer response.Body.Close()
1
package server import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" log "github.com/cihub/seelog" "github.com/mysterium/node/identity" "github.com/mysterium/node/server/dto" dto_discovery "github.com/mysterium/node/service_discovery/dto" "net/url" ) var mysteriumApiUrl string const MYSTERIUM_API_CLIENT = "goclient-v0.1" const MYSTERIUM_API_LOG_PREFIX = "[Mysterium.api] " func NewClient() Client { httpClient := http.Client{ Transport: &http.Transport{}, } return &clientRest{ httpClient: httpClient, } } type clientRest struct { httpClient http.Client } func (client *clientRest) RegisterIdentity(identity identity.Identity) (err error) { response, err := client.doPostRequest("identities", dto.CreateIdentityRequest{ Identity: identity.Address, }) if err == nil { defer response.Body.Close() log.Info(MYSTERIUM_API_LOG_PREFIX, "Identity registered: ", identity) } return } func (client *clientRest) NodeRegister(proposal dto_discovery.ServiceProposal) (err error) { response, err := client.doPostRequest("node_register", dto.NodeRegisterRequest{ ServiceProposal: proposal, }) if err == nil { defer response.Body.Close() log.Info(MYSTERIUM_API_LOG_PREFIX, "Node registered: ", proposal.ProviderId) } return } func (client *clientRest) NodeSendStats(nodeKey string, sessionList []dto.SessionStatsDeprecated) (err error) { response, err := client.doPostRequest("node_send_stats", dto.NodeStatsRequest{ NodeKey: nodeKey, Sessions: sessionList, }) if err == nil { defer response.Body.Close() log.Info(MYSTERIUM_API_LOG_PREFIX, "Node stats sent: ", nodeKey) } return nil } func (client *clientRest) FindProposals(nodeKey string) (proposals []dto_discovery.ServiceProposal, err error) { values := url.Values{} values.Set("node_key", nodeKey) response, err := client.doGetRequest("proposals", values) if err != nil { return } defer response.Body.Close() var proposalsResponse dto.ProposalsResponse err = parseResponseJson(response, &proposalsResponse) if err != nil { return } proposals = proposalsResponse.Proposals log.Info(MYSTERIUM_API_LOG_PREFIX, "FindProposals fetched: ", proposals) return } func (client *clientRest) SendSessionStats(sessionId string, sessionStats dto.SessionStats) (err error) { path := fmt.Sprintf("sessions/%s/stats", sessionId) response, err := client.doPostRequest(path, sessionStats) if err == nil { defer response.Body.Close() log.Info(MYSTERIUM_API_LOG_PREFIX, "Session stats sent: ", sessionId) } return nil } func (client *clientRest) doGetRequest(path string, values url.Values) (*http.Response, error) { fullPath := fmt.Sprintf("%v/%v?%v", mysteriumApiUrl, path, values.Encode()) return client.executeRequest("GET", fullPath, nil) } func (client *clientRest) doPostRequest(path string, payload interface{}) (*http.Response, error) { return client.doPayloadRequest("POST", path, payload) } func (client *clientRest) doPayloadRequest(method, path string, payload interface{}) (*http.Response, error) { payloadJson, err := json.Marshal(payload) if err != nil { log.Critical(MYSTERIUM_API_LOG_PREFIX, err) return nil, err } return client.executeRequest(method, mysteriumApiUrl+"/"+path, payloadJson) } func (client *clientRest) executeRequest(method, fullPath string, payloadJson []byte) (*http.Response, error) { request, err := http.NewRequest(method, fullPath, bytes.NewBuffer(payloadJson)) request.Header.Set("User-Agent", MYSTERIUM_API_CLIENT) request.Header.Set("Content-Type", "application/json") request.Header.Set("Accept", "application/json") if err != nil { log.Critical(MYSTERIUM_API_LOG_PREFIX, err) return nil, err } response, err := client.httpClient.Do(request) if err != nil { log.Error(MYSTERIUM_API_LOG_PREFIX, err) return response, err } err = parseResponseError(response) if err != nil { log.Error(MYSTERIUM_API_LOG_PREFIX, err) return response, err } return response, nil } func parseResponseJson(response *http.Response, dto interface{}) error { responseJson, err := ioutil.ReadAll(response.Body) if err != nil { return err } err = json.Unmarshal(responseJson, dto) if err != nil { return err } return nil } func parseResponseError(response *http.Response) error { if response.StatusCode < 200 || response.StatusCode >= 300 { return errors.New(fmt.Sprintf("Server response invalid: %s (%s)", response.Status, response.Request.URL)) } return nil }
1
9,975
This TODO can be removed
mysteriumnetwork-node
go
@@ -5,13 +5,13 @@ using UIKit; namespace MvvmCross.iOS.Support.Presenters { - public interface IMvxTabBarViewController - { - void ShowTabView(UIViewController viewController, bool wrapInNavigationController, string tabTitle, string tabIconName); + public interface IMvxTabBarViewController + { + void ShowTabView(UIViewController viewController, bool wrapInNavigationController, string tabTitle, string tabIconName, string tabAccessibilityIdentifier); - bool ShowChildView(UIViewController viewController); + bool ShowChildView(UIViewController viewController); - bool CloseChildViewModel(IMvxViewModel viewModel); - } + bool CloseChildViewModel(IMvxViewModel viewModel); + } }
1
using System; using System.Collections.Generic; using MvvmCross.Core.ViewModels; using UIKit; namespace MvvmCross.iOS.Support.Presenters { public interface IMvxTabBarViewController { void ShowTabView(UIViewController viewController, bool wrapInNavigationController, string tabTitle, string tabIconName); bool ShowChildView(UIViewController viewController); bool CloseChildViewModel(IMvxViewModel viewModel); } }
1
12,280
can we make tabAccessibilityIdentifier = null as default?
MvvmCross-MvvmCross
.cs
@@ -80,8 +80,9 @@ func initProvider() func() { pusher.Start() return func() { - bsp.Shutdown() // shutdown the processor - handleErr(exp.Shutdown(context.Background()), "failed to stop exporter") + ctx := context.Background() + _ = bsp.Shutdown(ctx) // shutdown the processor + handleErr(exp.Shutdown(ctx), "failed to stop exporter") pusher.Stop() // pushes any last exports to the receiver } }
1
// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Example using the OTLP exporter + collector + third-party backends. For // information about using the exporter, see: // https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp?tab=doc#example-package-Insecure package main import ( "context" "fmt" "log" "time" "google.golang.org/grpc" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp" "go.opentelemetry.io/otel/global" "go.opentelemetry.io/otel/label" "go.opentelemetry.io/otel/propagators" "go.opentelemetry.io/otel/sdk/metric/controller/push" "go.opentelemetry.io/otel/sdk/metric/processor/basic" "go.opentelemetry.io/otel/sdk/metric/selector/simple" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/semconv" ) // Initializes an OTLP exporter, and configures the corresponding trace and // metric providers. func initProvider() func() { // If the OpenTelemetry Collector is running on a local cluster (minikube or // microk8s), it should be accessible through the NodePort service at the // `localhost:30080` address. Otherwise, replace `localhost` with the // address of your cluster. If you run the app inside k8s, then you can // probably connect directly to the service through dns exp, err := otlp.NewExporter( otlp.WithInsecure(), otlp.WithAddress("localhost:30080"), otlp.WithGRPCDialOption(grpc.WithBlock()), // useful for testing ) handleErr(err, "failed to create exporter") bsp := sdktrace.NewBatchSpanProcessor(exp) tracerProvider := sdktrace.NewTracerProvider( sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}), sdktrace.WithResource(resource.New( // the service name used to display traces in backends semconv.ServiceNameKey.String("test-service"), )), sdktrace.WithSpanProcessor(bsp), ) pusher := push.New( basic.New( simple.NewWithExactDistribution(), exp, ), exp, push.WithPeriod(2*time.Second), ) // set global propagator to tracecontext (the default is no-op). global.SetTextMapPropagator(propagators.TraceContext{}) global.SetTracerProvider(tracerProvider) global.SetMeterProvider(pusher.MeterProvider()) pusher.Start() return func() { bsp.Shutdown() // shutdown the processor handleErr(exp.Shutdown(context.Background()), "failed to stop exporter") pusher.Stop() // pushes any last exports to the receiver } } func main() { log.Printf("Waiting for connection...") shutdown := initProvider() defer shutdown() tracer := global.Tracer("test-tracer") meter := global.Meter("test-meter") // labels represent additional key-value descriptors that can be bound to a // metric observer or recorder. commonLabels := []label.KeyValue{ label.String("labelA", "chocolate"), label.String("labelB", "raspberry"), label.String("labelC", "vanilla"), } // Recorder metric example valuerecorder := otel.Must(meter). NewFloat64Counter( "an_important_metric", otel.WithDescription("Measures the cumulative epicness of the app"), ).Bind(commonLabels...) defer valuerecorder.Unbind() // work begins ctx, span := tracer.Start( context.Background(), "CollectorExporter-Example", otel.WithAttributes(commonLabels...)) defer span.End() for i := 0; i < 10; i++ { _, iSpan := tracer.Start(ctx, fmt.Sprintf("Sample-%d", i)) log.Printf("Doing really hard work (%d / 10)\n", i+1) valuerecorder.Add(ctx, 1.0) <-time.After(time.Second) iSpan.End() } log.Printf("Done!") } func handleErr(err error, message string) { if err != nil { log.Fatalf("%s: %v", message, err) } }
1
13,536
Should we print this error?
open-telemetry-opentelemetry-go
go
@@ -31,7 +31,7 @@ def multiclass_nms(multi_bboxes, tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5), (k), and (k). Labels are 0-based. """ - num_classes = multi_scores.size(1) - 1 + num_classes = int(multi_scores.size(1) - 1) # exclude background category if multi_bboxes.shape[1] > 4: bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
1
import torch from mmcv.ops.nms import batched_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None, return_inds=False): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. score_factors (Tensor, optional): The factors multiplied to scores before applying NMS. Default to None. return_inds (bool, optional): Whether return the indices of kept bboxes. Default to False. Returns: tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5), (k), and (k). Labels are 0-based. """ num_classes = multi_scores.size(1) - 1 # exclude background category if multi_bboxes.shape[1] > 4: bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) else: bboxes = multi_bboxes[:, None].expand( multi_scores.size(0), num_classes, 4) scores = multi_scores[:, :-1] labels = torch.arange(num_classes, dtype=torch.long) labels = labels.view(1, -1).expand_as(scores) bboxes = bboxes.reshape(-1, 4) scores = scores.reshape(-1) labels = labels.reshape(-1) if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT # remove low scoring boxes valid_mask = scores > score_thr # multiply score_factor after threshold to preserve more bboxes, improve # mAP by 1% for YOLOv3 if score_factors is not None: # expand the shape to match original shape of score score_factors = score_factors.view(-1, 1).expand( multi_scores.size(0), num_classes) score_factors = score_factors.reshape(-1) scores = scores * score_factors if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT inds = valid_mask.nonzero(as_tuple=False).squeeze(1) bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] else: # TensorRT NMS plugin has invalid output filled with -1 # add dummy data to make detection output correct. bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) scores = torch.cat([scores, scores.new_zeros(1)], dim=0) labels = torch.cat([labels, labels.new_zeros(1)], dim=0) if bboxes.numel() == 0: if torch.onnx.is_in_onnx_export(): raise RuntimeError('[ONNX Error] Can not record NMS ' 'as it has not been executed this time') if return_inds: return bboxes, labels, inds else: return bboxes, labels dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) if max_num > 0: dets = dets[:max_num] keep = keep[:max_num] if return_inds: return dets, labels[keep], keep else: return dets, labels[keep] def fast_nms(multi_bboxes, multi_scores, multi_coeffs, score_thr, iou_thr, top_k, max_num=-1): """Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Labels are 0-based. """ scores = multi_scores[:, :-1].t() # [#class, n] scores, idx = scores.sort(1, descending=True) idx = idx[:, :top_k].contiguous() scores = scores[:, :top_k] # [#class, topk] num_classes, num_dets = idx.size() boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] iou.triu_(diagonal=1) iou_max, _ = iou.max(dim=1) # Now just filter out the ones higher than the threshold keep = iou_max <= iou_thr # Second thresholding introduces 0.2 mAP gain at negligible time cost keep *= scores > score_thr # Assign each kept detection to its corresponding class classes = torch.arange( num_classes, device=boxes.device)[:, None].expand_as(keep) classes = classes[keep] boxes = boxes[keep] coeffs = coeffs[keep] scores = scores[keep] # Only keep the top max_num highest scores across all classes scores, idx = scores.sort(0, descending=True) if max_num > 0: idx = idx[:max_num] scores = scores[:max_num] classes = classes[idx] boxes = boxes[idx] coeffs = coeffs[idx] cls_dets = torch.cat([boxes, scores[:, None]], dim=1) return cls_dets, classes, coeffs
1
23,349
For which reason do we need to convert this to int here?
open-mmlab-mmdetection
py
@@ -19,7 +19,7 @@ func setFreezer(dirPath string, state configs.FreezerState) error { // freeze the container (since without the freezer cgroup, that's a // no-op). if state == configs.Undefined || state == configs.Thawed { - err = nil + return nil } return errors.Wrap(err, "freezer not supported") }
1
// +build linux package fs2 import ( stdErrors "errors" "os" "strings" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" "github.com/pkg/errors" "golang.org/x/sys/unix" ) func setFreezer(dirPath string, state configs.FreezerState) error { if err := supportsFreezer(dirPath); err != nil { // We can ignore this request as long as the user didn't ask us to // freeze the container (since without the freezer cgroup, that's a // no-op). if state == configs.Undefined || state == configs.Thawed { err = nil } return errors.Wrap(err, "freezer not supported") } var stateStr string switch state { case configs.Undefined: return nil case configs.Frozen: stateStr = "1" case configs.Thawed: stateStr = "0" default: return errors.Errorf("invalid freezer state %q requested", state) } if err := fscommon.WriteFile(dirPath, "cgroup.freeze", stateStr); err != nil { return err } // Confirm that the cgroup did actually change states. if actualState, err := getFreezer(dirPath); err != nil { return err } else if actualState != state { return errors.Errorf(`expected "cgroup.freeze" to be in state %q but was in %q`, state, actualState) } return nil } func supportsFreezer(dirPath string) error { _, err := fscommon.ReadFile(dirPath, "cgroup.freeze") return err } func getFreezer(dirPath string) (configs.FreezerState, error) { state, err := fscommon.ReadFile(dirPath, "cgroup.freeze") if err != nil { // If the kernel is too old, then we just treat the freezer as being in // an "undefined" state. if os.IsNotExist(err) || stdErrors.Is(err, unix.ENODEV) { err = nil } return configs.Undefined, err } switch strings.TrimSpace(state) { case "0": return configs.Thawed, nil case "1": return configs.Frozen, nil default: return configs.Undefined, errors.Errorf(`unknown "cgroup.freeze" state: %q`, state) } }
1
21,490
FWIW, wondering if the error is interesting here (I actually arrived at this code initially to change `supportsFreezer()` to return a `bool`
opencontainers-runc
go
@@ -1,8 +1 @@ -<h1><%= plan.title %></h1> - -<% if plan.visibility == 'is_test' %> - <div class="roadmap-info-box"> - <i class="fa fa-exclamation-circle" aria-hidden="true"></i> - <span><%= _('This is a') %> <strong><%= _('test plan') %></strong>.</span> - </div> -<% end %> +<h1><%= plan.title %></h1>
1
<h1><%= plan.title %></h1> <% if plan.visibility == 'is_test' %> <div class="roadmap-info-box"> <i class="fa fa-exclamation-circle" aria-hidden="true"></i> <span><%= _('This is a') %> <strong><%= _('test plan') %></strong>.</span> </div> <% end %>
1
16,785
Does this still need to be a partial? is the intention down the line to move back towards a conditionally different title?
DMPRoadmap-roadmap
rb
@@ -107,13 +107,12 @@ func TestEnvAdd_Execute(t *testing.T) { prog: mockSpinner, }, expectedEnv: archer.Environment{ - Name: "env", - Project: "project", - //TODO update these to real values - AccountID: "1234", - Region: "1234", - Prod: true, - PublicLoadBalancer: true, + Name: "env", + Project: "project", + AccountID: "1234", + Region: "1234", + RegistryURL: "902697171733.dkr.ecr.eu-west-3.amazonaws.com/project/env", + Prod: true, }, mocking: func() { gomock.InOrder(
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "fmt" "testing" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" climocks "github.com/aws/amazon-ecs-cli-v2/internal/pkg/cli/mocks" "github.com/aws/amazon-ecs-cli-v2/mocks" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) func TestEnvAdd_Ask(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockPrompter := climocks.NewMockprompter(ctrl) mockProject := "mockProject" mockEnv := "mockEnv" testCases := map[string]struct { inputEnv string inputProject string setupMocks func() }{ "with no flags set": { setupMocks: func() { gomock.InOrder( mockPrompter.EXPECT(). Get( gomock.Eq("What is your project's name?"), gomock.Eq("A project groups all of your environments together."), gomock.Any()). Return(mockProject, nil). Times(1), mockPrompter.EXPECT(). Get( gomock.Eq("What is your environment's name?"), gomock.Eq("A unique identifier for an environment (e.g. dev, test, prod)"), gomock.Any()). Return(mockEnv, nil). Times(1)) }, }, "with env flags set": { inputEnv: mockEnv, setupMocks: func() { mockPrompter.EXPECT(). Get( gomock.Eq("What is your project's name?"), gomock.Eq("A project groups all of your environments together."), gomock.Any()). Return(mockProject, nil). Times(1) }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { addEnv := &AddEnvOpts{ EnvName: tc.inputEnv, ProjectName: tc.inputProject, prompter: mockPrompter, } tc.setupMocks() err := addEnv.Ask() require.NoError(t, err) require.Equal(t, mockProject, addEnv.ProjectName, "expected project names to match") require.Equal(t, mockEnv, addEnv.EnvName, "expected environment names to match") }) } } func TestEnvAdd_Execute(t *testing.T) { ctrl := gomock.NewController(t) mockError := fmt.Errorf("error") mockEnvStore := mocks.NewMockEnvironmentStore(ctrl) mockProjStore := mocks.NewMockProjectStore(ctrl) mockDeployer := mocks.NewMockEnvironmentDeployer(ctrl) mockSpinner := climocks.NewMockprogress(ctrl) var capturedArgument *archer.Environment defer ctrl.Finish() testCases := map[string]struct { addEnvOpts AddEnvOpts expectedEnv archer.Environment expectedErr error mocking func() }{ "with a succesful call to add env": { addEnvOpts: AddEnvOpts{ manager: mockEnvStore, projectGetter: mockProjStore, deployer: mockDeployer, ProjectName: "project", EnvName: "env", Production: true, prog: mockSpinner, }, expectedEnv: archer.Environment{ Name: "env", Project: "project", //TODO update these to real values AccountID: "1234", Region: "1234", Prod: true, PublicLoadBalancer: true, }, mocking: func() { gomock.InOrder( mockProjStore. EXPECT(). GetProject(gomock.Any()). Return(&archer.Project{}, nil), mockSpinner.EXPECT().Start(gomock.Eq("Preparing deployment...")), mockDeployer.EXPECT().DeployEnvironment(gomock.Any()), mockSpinner.EXPECT().Stop(gomock.Eq("Done!")), mockSpinner.EXPECT().Start(gomock.Eq("Deploying env...")), // TODO: Assert Wait is called with stack name returned by DeployEnvironment. mockDeployer.EXPECT().WaitForEnvironmentCreation(gomock.Any()), mockEnvStore. EXPECT(). CreateEnvironment(gomock.Any()). Do(func(env *archer.Environment) { capturedArgument = env }), mockSpinner.EXPECT().Stop(gomock.Eq("Done!")), ) }, }, "with a invalid project": { expectedErr: mockError, addEnvOpts: AddEnvOpts{ manager: mockEnvStore, projectGetter: mockProjStore, deployer: mockDeployer, ProjectName: "project", EnvName: "env", Production: true, prog: mockSpinner, }, expectedEnv: archer.Environment{ Name: "env", Project: "project", //TODO update these to real values AccountID: "1234", Region: "1234", Prod: true, }, mocking: func() { mockProjStore. EXPECT(). GetProject(gomock.Any()). Return(nil, mockError) mockEnvStore. EXPECT(). CreateEnvironment(gomock.Any()). Times(0) }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { // Setup mocks tc.mocking() err := tc.addEnvOpts.Execute() if tc.expectedErr == nil { require.NoError(t, err) require.Equal(t, tc.expectedEnv, *capturedArgument) } else { require.EqualError(t, tc.expectedErr, err.Error()) } }) } }
1
10,561
seems like `RegistryURL` is missing? Same for a few other places that create `archer.Environment` below.
aws-copilot-cli
go
@@ -14,7 +14,9 @@ import abc import logging +import datetime import parameter +import target import warnings import traceback import pyparsing as pp
1
# Copyright (c) 2012 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import abc import logging import parameter import warnings import traceback import pyparsing as pp Parameter = parameter.Parameter logger = logging.getLogger('luigi-interface') def namespace(namespace=None): """ Call to set namespace of tasks declared after the call. If called without arguments or with ``None`` as the namespace, the namespace is reset, which is recommended to do at the end of any file where the namespace is set to avoid unintentionally setting namespace on tasks outside of the scope of the current file. """ Register._default_namespace = namespace def id_to_name_and_params(task_id): ''' Turn a task_id into a (task_family, {params}) tuple. E.g. calling with ``Foo(bar=bar, baz=baz)`` returns ``('Foo', {'bar': 'bar', 'baz': 'baz'})`` ''' name_chars = pp.alphanums + '_' parameter = ( (pp.Word(name_chars) + pp.Literal('=').suppress() + ((pp.Literal('(').suppress() | pp.Literal('[').suppress()) + pp.ZeroOrMore(pp.Word(name_chars) + pp.ZeroOrMore(pp.Literal(',')).suppress()) + (pp.Literal(')').suppress() | pp.Literal(']').suppress()))).setResultsName('list_params', listAllMatches=True) | (pp.Word(name_chars) + pp.Literal('=').suppress() + pp.Word(name_chars)).setResultsName('params', listAllMatches=True)) parser = ( pp.Word(name_chars).setResultsName('task') + pp.Literal('(').suppress() + pp.ZeroOrMore(parameter + (pp.Literal(',')).suppress()) + pp.ZeroOrMore(parameter) + pp.Literal(')').suppress()) parsed = parser.parseString(task_id).asDict() task_name = parsed['task'] params = {} if 'params' in parsed: for k, v in parsed['params']: params[k] = v if 'list_params' in parsed: for x in parsed['list_params']: params[x[0]] = x[1:] return task_name, params class Register(abc.ABCMeta): """ The Metaclass of :py:class:`Task`. Acts as a global registry of Tasks with the following properties: 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the same object. 2. Keep track of all subclasses of :py:class:`Task` and expose them. """ __instance_cache = {} _default_namespace = None _reg = [] AMBIGUOUS_CLASS = object() # Placeholder denoting an error """If this value is returned by :py:meth:`get_reg` then there is an ambiguous task name (two :py:class:`Task` have the same name). This denotes an error.""" def __new__(metacls, classname, bases, classdict): """ Custom class creation for namespacing. Also register all subclasses Set the task namespace to whatever the currently declared namespace is """ if "task_namespace" not in classdict: classdict["task_namespace"] = metacls._default_namespace cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): """ Custom class instantiation utilizing instance cache. If a Task has already been instantiated with the same parameters, the previous instance is returned to reduce number of object instances.""" def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = Register.__instance_cache if h == None: # disabled return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() # unhashable types in parameters if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(self): """Clear/Reset the instance cache.""" Register.__instance_cache = {} @classmethod def disable_instance_cache(self): """Disables the instance cache.""" Register.__instance_cache = None @property def task_family(cls): """The task family for the given class. If ``cls.task_namespace is None`` then it's the name of the class. Otherwise, ``<task_namespace>.`` is prefixed to the class name. """ if cls.task_namespace is None: return cls.__name__ else: return "%s.%s" % (cls.task_namespace, cls.__name__) @classmethod def get_reg(cls): """Return all of the registery classes. :return: a ``dict`` of task_family -> class """ # We have to do this on-demand in case task names have changed later reg = {} for cls in cls._reg: if cls.run != NotImplemented: name = cls.task_family if name in reg and reg[name] != cls and \ reg[name] != cls.AMBIGUOUS_CLASS and \ not issubclass(cls, reg[name]): # Registering two different classes - this means we can't instantiate them by name # The only exception is if one class is a subclass of the other. In that case, we # instantiate the most-derived class (this fixes some issues with decorator wrappers). reg[name] = cls.AMBIGUOUS_CLASS else: reg[name] = cls return reg @classmethod def tasks_str(cls): """Human-readable register contents dump. """ return repr(sorted(Register.get_reg().keys())) @classmethod def get_task_cls(cls, name): """Returns an unambiguous class or raises an exception. """ task_cls = Register.get_reg().get(name) if not task_cls: raise Exception('Task %r not found. Candidates are: %s' % (name, Register.tasks_str())) if task_cls == Register.AMBIGUOUS_CLASS: raise Exception('Task %r is ambiguous' % name) return task_cls @classmethod def get_global_params(cls): """Compiles and returns the global parameters for all :py:class:`Task`. :return: a ``dict`` of parameter name -> parameter. """ global_params = {} for t_name, t_cls in cls.get_reg().iteritems(): if t_cls == cls.AMBIGUOUS_CLASS: continue for param_name, param_obj in t_cls.get_global_params(): if param_name in global_params and global_params[param_name] != param_obj: # Could be registered multiple times in case there's subclasses raise Exception('Global parameter %r registered by multiple classes' % param_name) global_params[param_name] = param_obj return global_params.iteritems() class Task(object): """ This is the base class of all Luigi Tasks, the base unit of work in Luigi. A Luigi Task describes a unit or work. The key methods of a Task, which must be implemented in a subclass are: * :py:meth:`run` - the computation done by this task. * :py:meth:`requires` - the list of Tasks that this Task depends on. * :py:meth:`output` - the output :py:class:`Target` that this Task creates. Parameters to the Task should be declared as members of the class, e.g.:: class MyTask(luigi.Task): count = luigi.IntParameter() Each Task exposes a constructor accepting all :py:class:`Parameter` (and values) as kwargs. e.g. ``MyTask(count=10)`` would instantiate `MyTask`. In addition to any declared properties and methods, there are a few non-declared properties, which are created by the :py:class:`Register` metaclass: ``Task.task_namespace`` optional string which is prepended to the task name for the sake of scheduling. If it isn't overridden in a Task, whatever was last declared using `luigi.namespace` will be used. ``Task._parameters`` list of ``(parameter_name, parameter)`` tuples for this task class """ __metaclass__ = Register _event_callbacks = {} # Priority of the task: the scheduler should favor available # tasks with higher priority values first. priority = 0 # Resources used by the task. Should be formatted like {"scp": 1} to indicate that the # task requires 1 unit of the scp resource. resources = {} @classmethod def event_handler(cls, event): """ Decorator for adding event handlers """ def wrapped(callback): cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback) return callback return wrapped def trigger_event(self, event, *args, **kwargs): """Trigger that calls all of the specified events associated with this class. """ for event_class, event_callbacks in self._event_callbacks.iteritems(): if not isinstance(self, event_class): continue for callback in event_callbacks.get(event, []): try: # callbacks are protected callback(*args, **kwargs) except KeyboardInterrupt: return except: logger.exception("Error in event callback for %r", event) pass @property def task_family(self): """Convenience method since a property on the metaclass isn't directly accessible through the class instances. """ return self.__class__.task_family @classmethod def get_params(cls): """Returns all of the Parameters for this Task.""" # We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically params = [] for param_name in dir(cls): param_obj = getattr(cls, param_name) if not isinstance(param_obj, Parameter): continue params.append((param_name, param_obj)) # The order the parameters are created matters. See Parameter class params.sort(key=lambda t: t[1].counter) return params @classmethod def get_global_params(cls): """Return the global parameters for this Task.""" return [(param_name, param_obj) for param_name, param_obj in cls.get_params() if param_obj.is_global] @classmethod def get_nonglobal_params(cls): """Return the non-global parameters for this Task.""" return [(param_name, param_obj) for param_name, param_obj in cls.get_params() if not param_obj.is_global] @classmethod def get_param_values(cls, params, args, kwargs): """Get the values of the parameters from the args and kwargs. :param params: list of (param_name, Parameter). :param args: positional arguments :param kwargs: keyword arguments. :returns: list of `(name, value)` tuples, one for each parameter. """ result = {} params_dict = dict(params) # In case any exceptions are thrown, create a helpful description of how the Task was invoked # TODO: should we detect non-reprable arguments? These will lead to mysterious errors exc_desc = '%s[args=%s, kwargs=%s]' % (cls.__name__, args, kwargs) # Fill in the positional arguments positional_params = [(n, p) for n, p in params if not p.is_global] for i, arg in enumerate(args): if i >= len(positional_params): raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args))) param_name, param_obj = positional_params[i] result[param_name] = arg # Then the optional arguments for param_name, arg in kwargs.iteritems(): if param_name in result: raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name)) if param_name not in params_dict: raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name)) if params_dict[param_name].is_global: raise parameter.ParameterException('%s: can not override global parameter %s' % (exc_desc, param_name)) result[param_name] = arg # Then use the defaults for anything not filled in for param_name, param_obj in params: if param_name not in result: if not param_obj.has_value: raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name)) result[param_name] = param_obj.value def list_to_tuple(x): """ Make tuples out of lists and sets to allow hashing """ if isinstance(x, list) or isinstance(x, set): return tuple(x) else: return x # Sort it by the correct order and make a list return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params] def __init__(self, *args, **kwargs): """Constructor to resolve values for all Parameters. For example, the Task:: class MyTask(luigi.Task): count = luigi.IntParameter() can be instantiated as ``MyTask(count=10)``. """ params = self.get_params() param_values = self.get_param_values(params, args, kwargs) # Set all values on class instance for key, value in param_values: setattr(self, key, value) # Register args and kwargs as an attribute on the class. Might be useful self.param_args = tuple(value for key, value in param_values) self.param_kwargs = dict(param_values) # Build up task id task_id_parts = [] param_objs = dict(params) for param_name, param_value in param_values: if dict(params)[param_name].significant: task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value))) self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts)) self.__hash = hash(self.task_id) def initialized(self): """Returns ``True`` if the Task is initialized and ``False`` otherwise.""" return hasattr(self, 'task_id') @classmethod def from_str_params(cls, params_str, global_params): """Creates an instance from a str->str hash This method is for parsing of command line arguments or other non-programmatic invocations. :param params_str: dict of param name -> value. :param global_params: dict of param name -> value, the global params. """ for param_name, param in global_params: value = param.parse_from_input(param_name, params_str[param_name]) param.set_global(value) kwargs = {} for param_name, param in cls.get_nonglobal_params(): value = param.parse_from_input(param_name, params_str[param_name]) kwargs[param_name] = value return cls(**kwargs) def to_str_params(self): """Opposite of from_str_params""" params_str = {} params = dict(self.get_params()) for param_name, param_value in self.param_kwargs.iteritems(): params_str[param_name] = params[param_name].serialize(param_value) return params_str def clone(self, cls=None, **kwargs): ''' Creates a new instance from an existing instance where some of the args have changed. There's at least two scenarios where this is useful (see test/clone_test.py) - Remove a lot of boiler plate when you have recursive dependencies and lots of args - There's task inheritance and some logic is on the base class ''' k = self.param_kwargs.copy() k.update(kwargs.items()) if cls is None: cls = self.__class__ new_k = {} for param_name, param_class in cls.get_nonglobal_params(): if param_name in k: new_k[param_name] = k[param_name] return cls(**new_k) def __hash__(self): return self.__hash def __repr__(self): return self.task_id def __eq__(self, other): return self.__class__ == other.__class__ and self.param_args == other.param_args def complete(self): """ If the task has any outputs, return ``True`` if all outputs exists. Otherwise, return whether or not the task has run or not """ outputs = flatten(self.output()) if len(outputs) == 0: # TODO: unclear if tasks without outputs should always run or never run warnings.warn("Task %r without outputs has no custom complete() method" % self) return False for output in outputs: if not output.exists(): return False else: return True def output(self): """The output that this Task produces. The output of the Task determines if the Task needs to be run--the task is considered finished iff the outputs all exist. Subclasses should override this method to return a single :py:class:`Target` or a list of :py:class:`Target` instances. Implementation note If running multiple workers, the output must be a resource that is accessible by all workers, such as a DFS or database. Otherwise, workers might compute the same output since they don't see the work done by other workers. """ return [] # default impl def requires(self): """The Tasks that this Task depends on. A Task will only run if all of the Tasks that it requires are completed. If your Task does not require any other Tasks, then you don't need to override this method. Otherwise, a Subclasses can override this method to return a single Task, a list of Task instances, or a dict whose values are Task instances. """ return [] # default impl def _requires(self): ''' Override in "template" tasks which themselves are supposed to be subclassed and thus have their requires() overridden (name preserved to provide consistent end-user experience), yet need to introduce (non-input) dependencies. Must return an iterable which among others contains the _requires() of the superclass. ''' return flatten(self.requires()) # base impl def process_resources(self): ''' Override in "template" tasks which provide common resource functionality but allow subclasses to specify additional resources while preserving the name for consistent end-user experience. ''' return self.resources # default impl def input(self): """Returns the outputs of the Tasks returned by :py:meth:`requires` :return: a list of :py:class:`Target` objects which are specified as outputs of all required Tasks. """ return getpaths(self.requires()) def deps(self): """Internal method used by the scheduler Returns the flattened list of requires. """ # used by scheduler return flatten(self._requires()) def run(self): """The task run method, to be overridden in a subclass.""" pass # default impl def on_failure(self, exception): """ Override for custom error handling This method gets called if an exception is raised in :py:meth:`run`. Return value of this method is json encoded and sent to the scheduler as the `expl` argument. Its string representation will be used as the body of the error email sent out if any. Default behavior is to return a string representation of the stack trace. """ traceback_string = traceback.format_exc() return "Runtime error:\n%s" % traceback_string def on_success(self): """ Override for doing custom completion handling for a larger class of tasks This method gets called when :py:meth:`run` completes without raising any exceptions. The returned value is json encoded and sent to the scheduler as the `expl` argument. Default behavior is to send an None value""" def externalize(task): """Returns an externalized version of the Task. See py:class:`ExternalTask`. """ task.run = NotImplemented return task class ExternalTask(Task): """Subclass for references to external dependencies. An ExternalTask's does not have a `run` implementation, which signifies to the framework that this Task's :py:meth:`output` is generated outside of Luigi. """ run = NotImplemented class WrapperTask(Task): """Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist. """ def complete(self): return all(r.complete() for r in flatten(self.requires())) def getpaths(struct): """ Maps all Tasks in a structured data object to their .output()""" if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): r = {} for k, v in struct.iteritems(): r[k] = getpaths(v) return r else: # Remaining case: assume r is iterable... try: s = list(struct) except TypeError: raise Exception('Cannot map %s to Task/dict/list' % str(struct)) return [getpaths(r) for r in s] def flatten(struct): """Creates a flat list of all all items in structured output (dicts, lists, items) >>> flatten({'a': 'foo', 'b': 'bar'}) ['foo', 'bar'] >>> flatten(['foo', ['bar', 'troll']]) ['foo', 'bar', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for key, result in struct.iteritems(): flat += flatten(result) return flat if isinstance(struct, basestring): return [struct] try: # if iterable for result in struct: flat += flatten(result) return flat except TypeError: pass return [struct]
1
9,726
Are these imports needed?
spotify-luigi
py
@@ -139,6 +139,13 @@ describe('text.formControlValue', function() { axe.utils .querySelectorAll(axe._tree[0], '#fixture input') .forEach(function(target) { + // Safari and IE11 do not support the color input type + // and thus treat them as text inputs. ignore fallback + // inputs + if (target.actualNode.type === 'text') { + return; + } + assert.equal( nativeTextboxValue(target), '',
1
describe('text.formControlValue', function() { var __methods, __unsupported; var formControlValue = axe.commons.text.formControlValue; var fixtureSetup = axe.testUtils.fixtureSetup; var fixture = document.querySelector('#fixture'); var isIE11 = axe.testUtils.isIE11; function queryFixture(code, query) { fixtureSetup(code); return axe.utils.querySelectorAll(axe._tree, query)[0]; } function getNodeType(node) { // Note: Inconsistent response for `node.type` across browsers, hence resolving and sanitizing using getAttribute var nodeType = node.hasAttribute('type') ? axe.commons.text.sanitize(node.getAttribute('type')).toLowerCase() : 'text'; nodeType = axe.utils.validInputTypes().includes(nodeType) ? nodeType : 'text'; return nodeType; } function bar() { return 'bar'; } function empty() { return ''; } beforeEach(function() { __methods = axe.commons.text.formControlValueMethods; __unsupported = axe.commons.text.unsupported.accessibleNameFromFieldValue; }); afterEach(function() { axe.commons.text.formControlValueMethods = __methods; axe.commons.text.unsupported.accessibleNameFromFieldValue = __unsupported; }); it('runs functions on text.formControlValueMethods', function() { var target = queryFixture('<input value="foo" />', 'input'); axe.commons.text.formControlValueMethods = [bar]; assert.equal(formControlValue(target), 'bar'); }); it('returns the first truthy result from text.formControlValueMethods', function() { var target = queryFixture('<input value ="foo" />', 'input'); var fixture = axe.utils.querySelectorAll(axe._tree, '#fixture')[0]; axe.commons.text.formControlValueMethods = [empty, bar, empty]; assert.equal(formControlValue(target, { startNode: fixture }), 'bar'); }); it('returns `` when the node equal context.startNode', function() { var target = queryFixture('<input value="foo" />', 'input'); axe.commons.text.formControlValueMethods = [bar]; assert.equal(formControlValue(target, { startNode: target }), ''); }); it('returns `` when the role is not supposed to return a value', function() { var target = queryFixture( '<input value="foo" role="presentation" />', 'input' ); axe.commons.text.formControlValueMethods = [bar]; assert.equal(formControlValue(target), ''); }); it('returns `` when accessibleNameFromFieldValue says the role is unsupported', function() { var target = queryFixture('<input value="foo" />', 'input'); axe.commons.text.unsupported.accessibleNameFromFieldValue = ['textbox']; assert.equal(formControlValue(target), ''); }); describe('nativeTextboxValue', function() { var nativeTextboxValue = axe.commons.text.formControlValueMethods.nativeTextboxValue; it('returns the value of textarea elements', function() { var target = queryFixture('<textarea>foo</textarea>', 'textarea'); assert.equal(nativeTextboxValue(target), 'foo'); }); it('returns the value of text field input elements', function() { var formData = { text: 'foo', date: '2018-12-12', 'datetime-local': '2018-12-12T12:34', email: '[email protected]', month: '2018-11', number: '123', search: 'foo', tel: '123456', time: '12:34', url: 'http://foo.bar.baz', week: '2018-W46' }; fixtureSetup( Object.keys(formData).reduce(function(html, fieldType) { return ( html + '<input type="' + fieldType + '" value="' + formData[fieldType] + '">' ); }, '') ); axe.utils .querySelectorAll(axe._tree[0], '#fixture input') .forEach(function(target) { var expected = formData[getNodeType(target.actualNode)]; assert.isDefined(expected); var actual = nativeTextboxValue(target); assert.equal( actual, expected, 'Expected value for ' + target.actualNode.outerHTML ); }); }); // This currently breaks in IE11 (isIE11 ? it.skip : it)( 'returns `` for non-text input elements', function() { fixtureSetup( '<input type="button" value="foo">' + '<input type="checkbox" value="foo">' + '<input type="file" value="foo">' + '<input type="hidden" value="foo">' + '<input type="image" value="foo">' + '<input type="password" value="foo">' + '<input type="radio" value="foo">' + '<input type="reset" value="foo">' + '<input type="submit" value="foo">' + '<input type="color" value="#000000">' ); axe.utils .querySelectorAll(axe._tree[0], '#fixture input') .forEach(function(target) { assert.equal( nativeTextboxValue(target), '', 'Expected no value for ' + target.actualNode.outerHTML ); }); } ); it('returns the value of DOM nodes', function() { fixture.innerHTML = '<input value="foo">'; assert.equal(nativeTextboxValue(fixture.querySelector('input')), 'foo'); }); it('returns `` for other elements', function() { // some random elements: ['div', 'span', 'h1', 'output', 'summary', 'style', 'template'].forEach( function(nodeName) { var target = document.createElement(nodeName); target.value = 'foo'; // That shouldn't do anything fixture.appendChild(target); assert.equal(nativeTextboxValue(target), ''); } ); }); }); describe('nativeSelectValue', function() { var nativeSelectValue = axe.commons.text.formControlValueMethods.nativeSelectValue; it('returns the selected option text', function() { var target = queryFixture( '<select>' + ' <option>foo</option>' + ' <option value="bar" selected>baz</option>' + '</select>', 'select' ); assert.equal(nativeSelectValue(target), 'baz'); }); it('returns multiple options, space seperated', function() { // Can't apply multiple "selected" props without setting "multiple" var target = queryFixture( '<select multiple>' + ' <option>oof</option>' + ' <option selected>foo</option>' + ' <option>rab</option>' + ' <option selected>bar</option>' + ' <option>zab</option>' + ' <option selected>baz</option>' + '</select>', 'select' ); assert.equal(nativeSelectValue(target), 'foo bar baz'); }); it('returns options from within optgroup elements', function() { var target = queryFixture( '<select multiple>' + ' <option>oof</option>' + ' <option selected>foo</option>' + ' <optgroup>' + ' <option>rab</option>' + ' <option selected>bar</option>' + ' </optgroup>' + ' <optgroup>' + ' <option>zab</option>' + ' <option selected>baz</option>' + ' </optgroup>' + '</select>', 'select' ); assert.equal(nativeSelectValue(target), 'foo bar baz'); }); it('returns the first option when there are no selected options', function() { // Browser automatically selectes the first option var target = queryFixture( '<select>' + ' <option>foo</option>' + ' <option>baz</option>' + '</select>', 'select' ); assert.equal(nativeSelectValue(target), 'foo'); }); it('returns `` for other elements', function() { // some random elements: ['div', 'span', 'h1', 'output', 'summary', 'style', 'template'].forEach( function(nodeName) { var target = document.createElement(nodeName); target.value = 'foo'; // That shouldn't do anything fixture.appendChild(target); assert.equal(nativeSelectValue(target), ''); } ); }); }); describe('ariaTextboxValue', function() { var ariaTextboxValue = axe.commons.text.formControlValueMethods.ariaTextboxValue; it('returns the text of role=textbox elements', function() { var target = queryFixture( '<div role="textbox">foo</div>', '[role=textbox]' ); assert.equal(ariaTextboxValue(target), 'foo'); }); it('returns `` for elements without role=textbox', function() { var target = queryFixture( '<div role="combobox">foo</div>', '[role=combobox]' ); assert.equal(ariaTextboxValue(target), ''); }); it('ignores text hidden with CSS', function() { var target = queryFixture( '<div role="textbox">' + '<span>foo</span>' + '<span style="display: none;">bar</span>' + '<span style="visibility: hidden;">baz</span>' + '</div>', '[role=textbox]' ); assert.equal(ariaTextboxValue(target), 'foo'); }); it('ignores elements with hidden content', function() { var target = queryFixture( '<div role="textbox">' + '<span>span</span>' + '<style>style</style>' + '<template>template</template>' + '<script>script</script>' + '<!-- comment -->' + '<h1>h1</h1>' + '</div>', '[role=textbox]' ); assert.equal(ariaTextboxValue(target), 'spanh1'); }); it('does not return HTML or comments', function() { var target = queryFixture( '<div role="textbox">' + '<i>foo</i>' + '<!-- comment -->' + '</div>', '[role=textbox]' ); assert.equal(ariaTextboxValue(target), 'foo'); }); it('returns the entire text content if the textbox is hidden', function() { var target = queryFixture( '<div role="textbox" style="display:none">' + // Yes, this is how it works in browsers :-( '<style>[role=texbox] { display: none }</style>' + '</div>', '[role=textbox]' ); assert.equal(ariaTextboxValue(target), '[role=texbox] { display: none }'); }); }); describe('ariaListboxValue', function() { var ariaListboxValue = axe.commons.text.formControlValueMethods.ariaListboxValue; it('returns the selected option when the element is a listbox', function() { var target = queryFixture( '<div role="listbox">' + ' <div role="option">foo</div>' + ' <div role="option" aria-selected="true">bar</div>' + ' <div role="option">baz</div>' + '</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), 'bar'); }); it('returns `` when the element is not a listbox', function() { var target = queryFixture( '<div role="combobox">' + ' <div role="option">foo</div>' + ' <div role="option" aria-selected="true">bar</div>' + ' <div role="option">baz</div>' + '</div>', '[role=combobox]' ); assert.equal(ariaListboxValue(target), ''); }); it('returns `` when there is no selected option', function() { var target = queryFixture( '<div role="listbox">' + ' <div role="option">foo</div>' + ' <div role="option">bar</div>' + ' <div role="option">baz</div>' + '</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), ''); }); it('returns `` when aria-selected is not true option', function() { var target = queryFixture( '<div role="listbox">' + ' <div role="option" aria-selected="false">foo</div>' + ' <div role="option" aria-selected="TRUE">bar</div>' + ' <div role="option" aria-selected="yes">baz</div>' + ' <div role="option" aria-selected="selected">fiz</div>' + '</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), ''); }); it('returns selected options from aria-owned', function() { var target = queryFixture( '<div role="listbox" aria-owns="opt1 opt2 opt3"></div>' + '<div role="option" id="opt1">foo</div>' + '<div role="option" id="opt2" aria-selected="true">bar</div>' + '<div role="option" id="opt3">baz</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), 'bar'); }); it('ignores aria-selected for elements that are not options', function() { var target = queryFixture( '<div role="listbox" aria-owns="opt1 opt2 opt3"></div>' + '<div id="opt1">foo</div>' + '<div id="opt2" aria-selected="true">bar</div>' + '<div id="opt3">baz</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), ''); }); describe('with multiple aria-selected', function() { it('returns the first selected option from children', function() { var target = queryFixture( '<div role="listbox">' + ' <div role="option">foo</div>' + ' <div role="option" aria-selected="true">bar</div>' + ' <div role="option" aria-selected="true">baz</div>' + '</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), 'bar'); }); it('returns the first selected option in aria-owned (as opposed to in the DOM order)', function() { var target = queryFixture( '<div role="listbox" aria-owns="opt3 opt2 opt1"></div>' + '<div role="option" id="opt1" aria-selected="true">foo</div>' + '<div role="option" id="opt2" aria-selected="true">bar</div>' + '<div role="option" id="opt3">baz</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), 'bar'); }); it('returns the a selected child before a selected aria-owned element', function() { var target = queryFixture( '<div role="listbox" aria-owns="opt2 opt3">' + ' <div role="option" aria-selected="true">foo</div>' + '</div>' + '<div role="option" id="opt2" aria-selected="true">bar</div>' + '<div role="option" id="opt3">baz</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), 'foo'); }); it('ignores aria-multiselectable=true', function() { // aria-multiselectable doesn't add additional content to the accessible name var target = queryFixture( '<div role="listbox" aria-owns="opt2 opt3" aria-multiselectable="true">' + ' <div role="option" aria-selected="true">foo</div>' + '</div>' + '<div role="option" id="opt2" aria-selected="true">bar</div>' + '<div role="option" id="opt3" aria-selected="true">baz</div>', '[role=listbox]' ); assert.equal(ariaListboxValue(target), 'foo'); }); }); }); describe('ariaComboboxValue', function() { var ariaComboboxValue = axe.commons.text.formControlValueMethods.ariaComboboxValue; var __ariaListboxValue = axe.commons.text.formControlValueMethods.ariaListboxValue; afterEach(function() { axe.commons.text.formControlValueMethods.ariaListboxValue = __ariaListboxValue; }); var comboboxContent = '<div role="textbox" id="text">nope</div>' + '<div role="listbox" id="list">' + ' <div role="option">foo</div>' + ' <div role="option" aria-selected="true">bar</div>' + '</div>'; it('returns the text of role=combobox elements', function() { var target = queryFixture( '<div role="combobox">' + comboboxContent + '</div>', '[role=combobox]' ); assert.equal(ariaComboboxValue(target), 'bar'); }); it('returns `` for elements without role=combobox', function() { var target = queryFixture( '<div role="combobox">' + comboboxContent + '</div>', '[role=listbox]' ); assert.equal(ariaComboboxValue(target), ''); }); it('passes child listbox to `ariaListboxValue` and returns its result', function() { var target = queryFixture( '<div role="combobox">' + comboboxContent + '</div>', '[role=combobox]' ); axe.commons.text.formControlValueMethods.ariaListboxValue = function( elm ) { assert.equal(elm.actualNode.id, 'list'); return 'Foxtrot'; }; assert.equal(ariaComboboxValue(target), 'Foxtrot'); }); it('passes aria-owned listbox to `ariaListboxValue` and returns its result', function() { var target = queryFixture( '<div role="combobox" aria-owns="text list"></div>' + comboboxContent, '[role=combobox]' ); axe.commons.text.formControlValueMethods.ariaListboxValue = function( elm ) { assert.equal(elm.actualNode.id, 'list'); return 'Foxtrot'; }; assert.equal(ariaComboboxValue(target), 'Foxtrot'); }); }); describe('ariaRangeValue', function() { var rangeRoles = ['progressbar', 'scrollbar', 'slider', 'spinbutton']; var ariaRangeValue = axe.commons.text.formControlValueMethods.ariaRangeValue; it('returns `` for roles that are not ranges', function() { var target = queryFixture( '<div role="textbox">foo</div>', '[role=textbox]' ); assert.equal(ariaRangeValue(target), ''); }); rangeRoles.forEach(function(role) { describe('with ' + role, function() { it('returns the result of aria-valuenow', function() { var target = queryFixture( '<div role="' + role + '" aria-valuenow="+123">foo</div>', '[aria-valuenow]' ); assert.equal(ariaRangeValue(target), '123'); }); it('returns `0` if aria-valuenow is not a number', function() { var target = queryFixture( '<div role="' + role + '" aria-valuenow="abc">foo</div>', '[aria-valuenow]' ); assert.equal(ariaRangeValue(target), '0'); }); it('returns decimal numbers', function() { var target = queryFixture( '<div role="' + role + '" aria-valuenow="1.5678">foo</div>', '[aria-valuenow]' ); assert.equal(ariaRangeValue(target), '1.5678'); }); it('returns negative numbers', function() { var target = queryFixture( '<div role="' + role + '" aria-valuenow="-1.0">foo</div>', '[aria-valuenow]' ); assert.equal(ariaRangeValue(target), '-1'); }); }); }); }); });
1
14,470
I think you can remove the `(isIE11 ? it.skip : it)(` above, if you're going to skip the test this way.
dequelabs-axe-core
js
@@ -96,11 +96,12 @@ public class CopyOneFile implements Closeable { // Paranoia: make sure the primary node is not smoking crack, by somehow sending us an already corrupted file whose checksum (in its // footer) disagrees with reality: long actualChecksumIn = in.readLong(); - if (actualChecksumIn != checksum) { + // CheckSum is written in Big Endian so we need to reverse bytes + if (actualChecksumIn != Long.reverseBytes(checksum)) { dest.message("file " + tmpName + ": checksum claimed by primary disagrees with the file's footer: claimed checksum=" + checksum + " vs actual=" + actualChecksumIn); throw new IOException("file " + name + ": checksum mismatch after file copy"); } - out.writeLong(checksum); + out.writeLong(actualChecksumIn); bytesCopied += Long.BYTES; close();
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.replicator.nrt; import java.io.Closeable; import java.io.IOException; import java.util.Locale; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; /** Copies one file from an incoming DataInput to a dest filename in a local Directory */ public class CopyOneFile implements Closeable { private final DataInput in; private final IndexOutput out; private final ReplicaNode dest; public final String name; public final String tmpName; public final FileMetaData metaData; public final long bytesToCopy; private final long copyStartNS; private final byte[] buffer; private long bytesCopied; public CopyOneFile(DataInput in, ReplicaNode dest, String name, FileMetaData metaData, byte[] buffer) throws IOException { this.in = in; this.name = name; this.dest = dest; this.buffer = buffer; // TODO: pass correct IOCtx, e.g. seg total size out = dest.createTempOutput(name, "copy", IOContext.DEFAULT); tmpName = out.getName(); // last 8 bytes are checksum, which we write ourselves after copying all bytes and confirming checksum: bytesToCopy = metaData.length - Long.BYTES; if (Node.VERBOSE_FILES) { dest.message("file " + name + ": start copying to tmp file " + tmpName + " length=" + (8+bytesToCopy)); } copyStartNS = System.nanoTime(); this.metaData = metaData; dest.startCopyFile(name); } /** Transfers this file copy to another input, continuing where the first one left off */ public CopyOneFile(CopyOneFile other, DataInput in) { this.in = in; this.dest = other.dest; this.name = other.name; this.out = other.out; this.tmpName = other.tmpName; this.metaData = other.metaData; this.bytesCopied = other.bytesCopied; this.bytesToCopy = other.bytesToCopy; this.copyStartNS = other.copyStartNS; this.buffer = other.buffer; } public void close() throws IOException { out.close(); dest.finishCopyFile(name); } /** Copy another chunk of bytes, returning true once the copy is done */ public boolean visit() throws IOException { // Copy up to 640 KB per visit: for(int i=0;i<10;i++) { long bytesLeft = bytesToCopy - bytesCopied; if (bytesLeft == 0) { long checksum = out.getChecksum(); if (checksum != metaData.checksum) { // Bits flipped during copy! dest.message("file " + tmpName + ": checksum mismatch after copy (bits flipped during network copy?) after-copy checksum=" + checksum + " vs expected=" + metaData.checksum + "; cancel job"); throw new IOException("file " + name + ": checksum mismatch after file copy"); } // Paranoia: make sure the primary node is not smoking crack, by somehow sending us an already corrupted file whose checksum (in its // footer) disagrees with reality: long actualChecksumIn = in.readLong(); if (actualChecksumIn != checksum) { dest.message("file " + tmpName + ": checksum claimed by primary disagrees with the file's footer: claimed checksum=" + checksum + " vs actual=" + actualChecksumIn); throw new IOException("file " + name + ": checksum mismatch after file copy"); } out.writeLong(checksum); bytesCopied += Long.BYTES; close(); if (Node.VERBOSE_FILES) { dest.message(String.format(Locale.ROOT, "file %s: done copying [%s, %.3fms]", name, Node.bytesToString(metaData.length), (System.nanoTime() - copyStartNS)/1000000.0)); } return true; } int toCopy = (int) Math.min(bytesLeft, buffer.length); in.readBytes(buffer, 0, toCopy); out.writeBytes(buffer, 0, toCopy); // TODO: rsync will fsync a range of the file; maybe we should do that here for large files in case we crash/killed bytesCopied += toCopy; } return false; } public long getBytesCopied() { return bytesCopied; } }
1
38,573
IMO we should instead fix the above call to do `long actualChecksumIn = Long.reverseBytes(in.readLong());` to get the actual checksum value? This way the below error message would also be correct?
apache-lucene-solr
java
@@ -512,6 +512,13 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB DeleteFiles(gopts, repo, removePacksFirst, restic.PackFile) } + // delete obsolete index files (index files that have already been superseded) + obsoleteIndexes := (repo.Index()).(*repository.MasterIndex).Obsolete() + if len(obsoleteIndexes) != 0 { + Verbosef("deleting unused index files...\n") + DeleteFiles(gopts, repo, obsoleteIndexes, restic.IndexFile) + } + if len(repackPacks) != 0 { Verbosef("repacking packs\n") bar := newProgressMax(!gopts.Quiet, uint64(len(repackPacks)), "packs repacked")
1
package main import ( "math" "sort" "strconv" "strings" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/spf13/cobra" ) var errorIndexIncomplete = errors.Fatal("index is not complete") var errorPacksMissing = errors.Fatal("packs from index missing in repo") var errorSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") var cmdPrune = &cobra.Command{ Use: "prune [flags]", Short: "Remove unneeded data from the repository", Long: ` The "prune" command checks the repository and removes data that is not referenced and therefore not needed any more. EXIT STATUS =========== Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runPrune(pruneOptions, globalOptions) }, } // PruneOptions collects all options for the cleanup command. type PruneOptions struct { DryRun bool MaxUnused string maxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused MaxRepackSize string MaxRepackBytes uint64 RepackCachableOnly bool } var pruneOptions PruneOptions func init() { cmdRoot.AddCommand(cmdPrune) f := cmdPrune.Flags() f.BoolVarP(&pruneOptions.DryRun, "dry-run", "n", false, "do not modify the repository, just print what would be done") addPruneOptions(cmdPrune) } func addPruneOptions(c *cobra.Command) { f := c.Flags() f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") } func verifyPruneOptions(opts *PruneOptions) error { if len(opts.MaxRepackSize) > 0 { size, err := parseSizeStr(opts.MaxRepackSize) if err != nil { return err } opts.MaxRepackBytes = uint64(size) } maxUnused := strings.TrimSpace(opts.MaxUnused) if maxUnused == "" { return errors.Fatalf("invalid value for --max-unused: %q", opts.MaxUnused) } // parse MaxUnused either as unlimited, a percentage, or an absolute number of bytes switch { case maxUnused == "unlimited": opts.maxUnusedBytes = func(used uint64) uint64 { return math.MaxUint64 } case strings.HasSuffix(maxUnused, "%"): maxUnused = strings.TrimSuffix(maxUnused, "%") p, err := strconv.ParseFloat(maxUnused, 64) if err != nil { return errors.Fatalf("invalid percentage %q passed for --max-unused: %v", opts.MaxUnused, err) } if p < 0 { return errors.Fatal("percentage for --max-unused must be positive") } if p >= 100 { return errors.Fatal("percentage for --max-unused must be below 100%") } opts.maxUnusedBytes = func(used uint64) uint64 { return uint64(p / (100 - p) * float64(used)) } default: size, err := parseSizeStr(maxUnused) if err != nil { return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err) } opts.maxUnusedBytes = func(used uint64) uint64 { return uint64(size) } } return nil } func shortenStatus(maxLength int, s string) string { if len(s) <= maxLength { return s } if maxLength < 3 { return s[:maxLength] } return s[:maxLength-3] + "..." } func runPrune(opts PruneOptions, gopts GlobalOptions) error { err := verifyPruneOptions(&opts) if err != nil { return err } repo, err := OpenRepository(gopts) if err != nil { return err } lock, err := lockRepoExclusive(gopts.ctx, repo) defer unlockRepo(lock) if err != nil { return err } return runPruneWithRepo(opts, gopts, repo, restic.NewIDSet()) } func runPruneWithRepo(opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet) error { // we do not need index updates while pruning! repo.DisableAutoIndexUpdate() if repo.Cache == nil { Print("warning: running prune without a cache, this may be very slow!\n") } Verbosef("loading indexes...\n") err := repo.LoadIndex(gopts.ctx) if err != nil { return err } usedBlobs, err := getUsedBlobs(gopts, repo, ignoreSnapshots) if err != nil { return err } return prune(opts, gopts, repo, usedBlobs) } type packInfo struct { usedBlobs uint unusedBlobs uint duplicateBlobs uint usedSize uint64 unusedSize uint64 tpe restic.BlobType } type packInfoWithID struct { ID restic.ID packInfo } // prune selects which files to rewrite and then does that. The map usedBlobs is // modified in the process. func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedBlobs restic.BlobSet) error { ctx := gopts.ctx var stats struct { blobs struct { used uint duplicate uint unused uint remove uint repack uint repackrm uint } size struct { used uint64 duplicate uint64 unused uint64 remove uint64 repack uint64 repackrm uint64 unref uint64 } packs struct { used uint unused uint partlyUsed uint keep uint } } Verbosef("searching used packs...\n") keepBlobs := restic.NewBlobSet() duplicateBlobs := restic.NewBlobSet() // iterate over all blobs in index to find out which blobs are duplicates for blob := range repo.Index().Each(ctx) { bh := blob.BlobHandle size := uint64(blob.Length) switch { case usedBlobs.Has(bh): // used blob, move to keepBlobs usedBlobs.Delete(bh) keepBlobs.Insert(bh) stats.size.used += size stats.blobs.used++ case keepBlobs.Has(bh): // duplicate blob duplicateBlobs.Insert(bh) stats.size.duplicate += size stats.blobs.duplicate++ default: stats.size.unused += size stats.blobs.unused++ } } // Check if all used blobs have been found in index if len(usedBlobs) != 0 { Warnf("%v not found in the index\n\n"+ "Integrity check failed: Data seems to be missing.\n"+ "Will not start prune to prevent (additional) data loss!\n"+ "Please report this error (along with the output of the 'prune' run) at\n"+ "https://github.com/restic/restic/issues/new/choose\n", usedBlobs) return errorIndexIncomplete } indexPack := make(map[restic.ID]packInfo) // save computed pack header size for pid, hdrSize := range repo.Index().PackSize(ctx, true) { // initialize tpe with NumBlobTypes to indicate it's not set indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} } // iterate over all blobs in index to generate packInfo for blob := range repo.Index().Each(ctx) { ip := indexPack[blob.PackID] // Set blob type if not yet set if ip.tpe == restic.NumBlobTypes { ip.tpe = blob.Type } // mark mixed packs with "Invalid blob type" if ip.tpe != blob.Type { ip.tpe = restic.InvalidBlob } bh := blob.BlobHandle size := uint64(blob.Length) switch { case duplicateBlobs.Has(bh): // duplicate blob ip.usedSize += size ip.duplicateBlobs++ case keepBlobs.Has(bh): // used blob, not duplicate ip.usedSize += size ip.usedBlobs++ default: // unused blob ip.unusedSize += size ip.unusedBlobs++ } // update indexPack indexPack[blob.PackID] = ip } Verbosef("collecting packs for deletion and repacking\n") removePacksFirst := restic.NewIDSet() removePacks := restic.NewIDSet() repackPacks := restic.NewIDSet() var repackCandidates []packInfoWithID repackAllPacksWithDuplicates := true keep := func(p packInfo) { stats.packs.keep++ if p.duplicateBlobs > 0 { repackAllPacksWithDuplicates = false } } // loop over all packs and decide what to do bar := newProgressMax(!gopts.Quiet, uint64(len(indexPack)), "packs processed") err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { p, ok := indexPack[id] if !ok { // Pack was not referenced in index and is not used => immediately remove! Verboseff("will remove pack %v as it is unused and not indexed\n", id.Str()) removePacksFirst.Insert(id) stats.size.unref += uint64(packSize) return nil } if p.unusedSize+p.usedSize != uint64(packSize) && !(p.usedBlobs == 0 && p.duplicateBlobs == 0) { // Pack size does not fit and pack is needed => error // If the pack is not needed, this is no error, the pack can // and will be simply removed, see below. Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic rebuild-index'.\n", id.Str(), p.unusedSize+p.usedSize, packSize) return errorSizeNotMatching } // statistics switch { case p.usedBlobs == 0 && p.duplicateBlobs == 0: stats.packs.unused++ case p.unusedBlobs == 0: stats.packs.used++ default: stats.packs.partlyUsed++ } // decide what to do switch { case p.usedBlobs == 0 && p.duplicateBlobs == 0: // All blobs in pack are no longer used => remove pack! removePacks.Insert(id) stats.blobs.remove += p.unusedBlobs stats.size.remove += p.unusedSize case opts.RepackCachableOnly && p.tpe == restic.DataBlob: // if this is a data pack and --repack-cacheable-only is set => keep pack! keep(p) case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob: // All blobs in pack are used and not duplicates/mixed => keep pack! keep(p) default: // all other packs are candidates for repacking repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p}) } delete(indexPack, id) bar.Add(1) return nil }) bar.Done() if err != nil { return err } // At this point indexPacks contains only missing packs! // missing packs that are not needed can be ignored ignorePacks := restic.NewIDSet() for id, p := range indexPack { if p.usedBlobs == 0 && p.duplicateBlobs == 0 { ignorePacks.Insert(id) stats.blobs.remove += p.unusedBlobs stats.size.remove += p.unusedSize delete(indexPack, id) } } if len(indexPack) != 0 { Warnf("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) for id := range indexPack { Warnf(" %v\n", id) } return errorPacksMissing } if len(ignorePacks) != 0 { Warnf("Missing but unneeded pack files are referenced in the index, will be repaired\n") for id := range ignorePacks { Warnf("will forget missing pack file %v\n", id) } } // calculate limit for number of unused bytes in the repo after repacking maxUnusedSizeAfter := opts.maxUnusedBytes(stats.size.used) // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. // This is equivalent to sorting by unused / total space. // Instead of unused[i] / used[i] > unused[j] / used[j] we use // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 // Morover duplicates and packs containing trees are sorted to the beginning sort.Slice(repackCandidates, func(i, j int) bool { pi := repackCandidates[i].packInfo pj := repackCandidates[j].packInfo switch { case pi.duplicateBlobs > 0 && pj.duplicateBlobs == 0: return true case pj.duplicateBlobs > 0 && pi.duplicateBlobs == 0: return false case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: return true case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: return false } return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize }) repack := func(id restic.ID, p packInfo) { repackPacks.Insert(id) stats.blobs.repack += p.unusedBlobs + p.duplicateBlobs + p.usedBlobs stats.size.repack += p.unusedSize + p.usedSize stats.blobs.repackrm += p.unusedBlobs stats.size.repackrm += p.unusedSize } for _, p := range repackCandidates { reachedUnusedSizeAfter := (stats.size.unused-stats.size.remove-stats.size.repackrm < maxUnusedSizeAfter) reachedRepackSize := false if opts.MaxRepackBytes > 0 { reachedRepackSize = stats.size.repack+p.unusedSize+p.usedSize > opts.MaxRepackBytes } switch { case reachedRepackSize: keep(p.packInfo) case p.duplicateBlobs > 0, p.tpe != restic.DataBlob: // repacking duplicates/non-data is only limited by repackSize repack(p.ID, p.packInfo) case reachedUnusedSizeAfter: // for all other packs stop repacking if tolerated unused size is reached. keep(p.packInfo) default: repack(p.ID, p.packInfo) } } // if all duplicates are repacked, print out correct statistics if repackAllPacksWithDuplicates { stats.blobs.repackrm += stats.blobs.duplicate stats.size.repackrm += stats.size.duplicate } Verboseff("\nused: %10d blobs / %s\n", stats.blobs.used, formatBytes(stats.size.used)) if stats.blobs.duplicate > 0 { Verboseff("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, formatBytes(stats.size.duplicate)) } Verboseff("unused: %10d blobs / %s\n", stats.blobs.unused, formatBytes(stats.size.unused)) if stats.size.unref > 0 { Verboseff("unreferenced: %s\n", formatBytes(stats.size.unref)) } totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref unusedSize := stats.size.duplicate + stats.size.unused Verboseff("total: %10d blobs / %s\n", totalBlobs, formatBytes(totalSize)) Verboseff("unused size: %s of total size\n", formatPercent(unusedSize, totalSize)) Verbosef("\nto repack: %10d blobs / %s\n", stats.blobs.repack, formatBytes(stats.size.repack)) Verbosef("this removes %10d blobs / %s\n", stats.blobs.repackrm, formatBytes(stats.size.repackrm)) Verbosef("to delete: %10d blobs / %s\n", stats.blobs.remove, formatBytes(stats.size.remove+stats.size.unref)) totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref Verbosef("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, formatBytes(totalPruneSize)) Verbosef("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), formatBytes(totalSize-totalPruneSize)) unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm Verbosef("unused size after prune: %s (%s of remaining size)\n", formatBytes(unusedAfter), formatPercent(unusedAfter, totalSize-totalPruneSize)) Verbosef("\n") Verboseff("totally used packs: %10d\n", stats.packs.used) Verboseff("partly used packs: %10d\n", stats.packs.partlyUsed) Verboseff("unused packs: %10d\n\n", stats.packs.unused) Verboseff("to keep: %10d packs\n", stats.packs.keep) Verboseff("to repack: %10d packs\n", len(repackPacks)) Verboseff("to delete: %10d packs\n", len(removePacks)) if len(removePacksFirst) > 0 { Verboseff("to delete: %10d unreferenced packs\n\n", len(removePacksFirst)) } if opts.DryRun { if !gopts.JSON && gopts.verbosity >= 2 { if len(removePacksFirst) > 0 { Printf("Would have removed the following unreferenced packs:\n%v\n\n", removePacksFirst) } Printf("Would have repacked and removed the following packs:\n%v\n\n", repackPacks) Printf("Would have removed the following no longer used packs:\n%v\n\n", removePacks) } // Always quit here if DryRun was set! return nil } // unreferenced packs can be safely deleted first if len(removePacksFirst) != 0 { Verbosef("deleting unreferenced packs\n") DeleteFiles(gopts, repo, removePacksFirst, restic.PackFile) } if len(repackPacks) != 0 { Verbosef("repacking packs\n") bar := newProgressMax(!gopts.Quiet, uint64(len(repackPacks)), "packs repacked") _, err := repository.Repack(ctx, repo, repackPacks, keepBlobs, bar) bar.Done() if err != nil { return errors.Fatalf("%s", err) } // Also remove repacked packs removePacks.Merge(repackPacks) } if len(ignorePacks) == 0 { ignorePacks = removePacks } else { ignorePacks.Merge(removePacks) } if len(ignorePacks) != 0 { err = rebuildIndexFiles(gopts, repo, ignorePacks, nil) if err != nil { return errors.Fatalf("%s", err) } } if len(removePacks) != 0 { Verbosef("removing %d old packs\n", len(removePacks)) DeleteFiles(gopts, repo, removePacks, restic.PackFile) } Verbosef("done\n") return nil } func rebuildIndexFiles(gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs) error { Verbosef("rebuilding index\n") idx := (repo.Index()).(*repository.MasterIndex) packcount := uint64(len(idx.Packs(removePacks))) bar := newProgressMax(!gopts.Quiet, packcount, "packs processed") obsoleteIndexes, err := idx.Save(gopts.ctx, repo, removePacks, extraObsolete, bar) bar.Done() if err != nil { return err } Verbosef("deleting obsolete index files\n") return DeleteFilesChecked(gopts, repo, obsoleteIndexes, restic.IndexFile) } func getUsedBlobs(gopts GlobalOptions, repo restic.Repository, ignoreSnapshots restic.IDSet) (usedBlobs restic.BlobSet, err error) { ctx := gopts.ctx var snapshotTrees restic.IDs Verbosef("loading all snapshots...\n") err = restic.ForAllSnapshots(gopts.ctx, repo, ignoreSnapshots, func(id restic.ID, sn *restic.Snapshot, err error) error { debug.Log("add snapshot %v (tree %v, error %v)", id, *sn.Tree, err) if err != nil { return err } snapshotTrees = append(snapshotTrees, *sn.Tree) return nil }) if err != nil { return nil, err } Verbosef("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) usedBlobs = restic.NewBlobSet() bar := newProgressMax(!gopts.Quiet, uint64(len(snapshotTrees)), "snapshots") defer bar.Done() err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) if err != nil { if repo.Backend().IsNotExist(err) { return nil, errors.Fatal("unable to load a tree from the repo: " + err.Error()) } return nil, err } return usedBlobs, nil }
1
13,485
I think we also need something similar for `rebuild-index`?
restic-restic
go
@@ -20,12 +20,14 @@ import ( "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/common/util" "github.com/spiffe/spire/pkg/server" + bundleClient "github.com/spiffe/spire/pkg/server/bundle/client" ) const ( - defaultConfigPath = "conf/server/server.conf" - defaultSocketPath = "/tmp/spire-registration.sock" - defaultLogLevel = "INFO" + defaultConfigPath = "conf/server/server.conf" + defaultSocketPath = "/tmp/spire-registration.sock" + defaultLogLevel = "INFO" + defaultBundleEndpointPort = 443 ) // runConfig represents available configurables for file and CLI options
1
package run import ( "context" "crypto/x509/pkix" "errors" "flag" "fmt" "io/ioutil" "net" "os" "path/filepath" "time" "github.com/hashicorp/hcl" "github.com/spiffe/spire/pkg/common/catalog" "github.com/spiffe/spire/pkg/common/cli" "github.com/spiffe/spire/pkg/common/idutil" "github.com/spiffe/spire/pkg/common/log" "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/common/util" "github.com/spiffe/spire/pkg/server" ) const ( defaultConfigPath = "conf/server/server.conf" defaultSocketPath = "/tmp/spire-registration.sock" defaultLogLevel = "INFO" ) // runConfig represents available configurables for file and CLI options type runConfig struct { Server serverRunConfig `hcl:"server"` PluginConfigs catalog.HCLPluginConfigMap `hcl:"plugins"` Telemetry telemetry.FileConfig `hcl:"telemetry"` } type serverRunConfig struct { BindAddress string `hcl:"bind_address"` BindPort int `hcl:"bind_port"` CASubject *caSubjectConfig `hcl:"ca_subject"` CATTL string `hcl:"ca_ttl"` DataDir string `hcl:"data_dir"` LogFile string `hcl:"log_file"` LogLevel string `hcl:"log_level"` RegistrationUDSPath string `hcl:"registration_uds_path"` SVIDTTL string `hcl:"svid_ttl"` TrustDomain string `hcl:"trust_domain"` UpstreamBundle bool `hcl:"upstream_bundle"` Experimental experimentalConfig `hcl:"experimental"` ConfigPath string // Undocumented configurables ProfilingEnabled bool `hcl:"profiling_enabled"` ProfilingPort int `hcl:"profiling_port"` ProfilingFreq int `hcl:"profiling_freq"` ProfilingNames []string `hcl:"profiling_names"` } type experimentalConfig struct { AllowAgentlessNodeAttestors bool `hcl:"allow_agentless_node_attestors"` } type caSubjectConfig struct { Country []string `hcl:"country"` Organization []string `hcl:"organization"` CommonName string `hcl:"common_name"` } type serverConfig struct { server.Config } // Run CLI struct type RunCLI struct { } //Help prints the server cmd usage func (*RunCLI) Help() string { _, err := parseFlags([]string{"-h"}) return err.Error() } //Run the SPIFFE Server func (*RunCLI) Run(args []string) int { cliConfig, err := parseFlags(args) if err != nil { fmt.Fprintln(os.Stderr, err) return 1 } fileConfig, err := parseFile(cliConfig.Server.ConfigPath) if err != nil { fmt.Fprintln(os.Stderr, err) return 1 } c := newDefaultConfig() // Get the plugin and telemetry configurations from the file c.PluginConfigs = fileConfig.PluginConfigs c.Telemetry = fileConfig.Telemetry err = mergeConfigs(c, fileConfig, cliConfig) if err != nil { fmt.Fprintln(os.Stderr, err) return 1 } err = validateConfig(c) if err != nil { fmt.Fprintln(os.Stderr, err) return 1 } // set umask before starting up the server cli.SetUmask(c.Log) s := server.New(c.Config) ctx, cancel := context.WithCancel(context.Background()) defer cancel() util.SignalListener(ctx, cancel) err = s.Run(ctx) if err != nil { c.Log.Error(err.Error()) return 1 } return 0 } //Synopsis of the command func (*RunCLI) Synopsis() string { return "Runs the server" } func parseFile(filePath string) (*runConfig, error) { c := &runConfig{} // Return a friendly error if the file is missing if _, err := os.Stat(filePath); os.IsNotExist(err) { msg := "could not find config file %s: please use the -config flag" p, err := filepath.Abs(filePath) if err != nil { p = filePath msg = "could not determine CWD; config file not found at %s: use -config" } return nil, fmt.Errorf(msg, p) } data, err := ioutil.ReadFile(filePath) if err != nil { return nil, fmt.Errorf("unable to read configuration: %v", err) } if err := hcl.Decode(&c, string(data)); err != nil { return nil, fmt.Errorf("unable to decode configuration: %v", err) } return c, nil } func parseFlags(args []string) (*runConfig, error) { flags := flag.NewFlagSet("run", flag.ContinueOnError) c := &runConfig{} flags.StringVar(&c.Server.BindAddress, "bindAddress", "", "IP address or DNS name of the SPIRE server") flags.IntVar(&c.Server.BindPort, "serverPort", 0, "Port number of the SPIRE server") flags.StringVar(&c.Server.RegistrationUDSPath, "registrationUDSPath", "", "UDS Path to bind registration API") flags.StringVar(&c.Server.TrustDomain, "trustDomain", "", "The trust domain that this server belongs to") flags.StringVar(&c.Server.LogFile, "logFile", "", "File to write logs to") flags.StringVar(&c.Server.LogLevel, "logLevel", "", "DEBUG, INFO, WARN or ERROR") flags.StringVar(&c.Server.DataDir, "dataDir", "", "Directory to store runtime data to") flags.StringVar(&c.Server.ConfigPath, "config", defaultConfigPath, "Path to a SPIRE config file") flags.BoolVar(&c.Server.UpstreamBundle, "upstreamBundle", false, "Include upstream CA certificates in the bundle") err := flags.Parse(args) if err != nil { return nil, err } return c, nil } func mergeConfigs(c *serverConfig, fileConfig, cliConfig *runConfig) error { // CLI > File, merge fileConfig first err := mergeConfig(c, fileConfig) if err != nil { return err } return mergeConfig(c, cliConfig) } func mergeConfig(orig *serverConfig, cmd *runConfig) error { // Parse server address if cmd.Server.BindAddress != "" { ip := net.ParseIP(cmd.Server.BindAddress) if ip == nil { return fmt.Errorf("It was not possible to parse BindAdress: %v", cmd.Server.BindAddress) } orig.BindAddress.IP = ip } if cmd.Server.RegistrationUDSPath != "" { orig.BindUDSAddress.Name = cmd.Server.RegistrationUDSPath } if cmd.Server.BindPort != 0 { orig.BindAddress.Port = cmd.Server.BindPort } if cmd.Server.DataDir != "" { orig.DataDir = cmd.Server.DataDir } if cmd.Server.TrustDomain != "" { trustDomain, err := idutil.ParseSpiffeID("spiffe://"+cmd.Server.TrustDomain, idutil.AllowAnyTrustDomain()) if err != nil { return err } orig.TrustDomain = *trustDomain } // Handle log file and level if cmd.Server.LogFile != "" || cmd.Server.LogLevel != "" { logLevel := defaultLogLevel if cmd.Server.LogLevel != "" { logLevel = cmd.Server.LogLevel } logger, err := log.NewLogger(logLevel, cmd.Server.LogFile) if err != nil { return fmt.Errorf("Could not open log file %s: %s", cmd.Server.LogFile, err) } orig.Log = logger } // TODO: CLI should be able to override with `false` value if cmd.Server.UpstreamBundle { orig.UpstreamBundle = cmd.Server.UpstreamBundle } if cmd.Server.Experimental.AllowAgentlessNodeAttestors { orig.Experimental.AllowAgentlessNodeAttestors = cmd.Server.Experimental.AllowAgentlessNodeAttestors } if cmd.Server.ProfilingEnabled { orig.ProfilingEnabled = cmd.Server.ProfilingEnabled } if orig.ProfilingEnabled { if cmd.Server.ProfilingPort > 0 { orig.ProfilingPort = cmd.Server.ProfilingPort } if cmd.Server.ProfilingFreq > 0 { orig.ProfilingFreq = cmd.Server.ProfilingFreq } if len(cmd.Server.ProfilingNames) > 0 { orig.ProfilingNames = cmd.Server.ProfilingNames } } if cmd.Server.SVIDTTL != "" { ttl, err := time.ParseDuration(cmd.Server.SVIDTTL) if err != nil { return fmt.Errorf("unable to parse default ttl %q: %v", cmd.Server.SVIDTTL, err) } orig.SVIDTTL = ttl } if cmd.Server.CATTL != "" { ttl, err := time.ParseDuration(cmd.Server.CATTL) if err != nil { return fmt.Errorf("unable to parse default ttl %q: %v", cmd.Server.CATTL, err) } orig.CATTL = ttl } if subject := cmd.Server.CASubject; subject != nil { orig.CASubject = pkix.Name{ Organization: subject.Organization, Country: subject.Country, CommonName: subject.CommonName, } } return nil } func validateConfig(c *serverConfig) error { if c.BindAddress.IP == nil || c.BindAddress.Port == 0 { return errors.New("BindAddress and BindPort are required") } if c.BindUDSAddress.Name == "" { return errors.New("BindUDSAddress Name is required") } if c.TrustDomain.String() == "" { return errors.New("TrustDomain is required") } if c.DataDir == "" { return errors.New("DataDir is required") } return nil } func newDefaultConfig() *serverConfig { // log.NewLogger() cannot return error when using STDOUT logger, _ := log.NewLogger(defaultLogLevel, "") bindAddress := &net.TCPAddr{} bindUDSAddress := &net.UnixAddr{Name: defaultSocketPath, Net: "unix"} return &serverConfig{ Config: server.Config{ Log: logger, BindAddress: bindAddress, BindUDSAddress: bindUDSAddress, }, } }
1
11,420
I think convention is snake case for import naming?
spiffe-spire
go
@@ -303,7 +303,13 @@ public class TiDAGRequest implements Serializable { // double read case if (!hasPk) { // add handle column - indexScanBuilder.addColumns(handleColumn); + if (!tableInfo.isCommonHandle()) { + indexScanBuilder.addColumns(handleColumn); + } else { + for (TiIndexColumn col : tableInfo.getPrimaryKey().getIndexColumns()) { + indexScanBuilder.addColumns(tableInfo.getColumn(col.getName()).toProto(tableInfo)); + } + } ++colCount; addRequiredIndexDataType(); }
1
/* * Copyright 2017 PingCAP, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * See the License for the specific language governing permissions and * limitations under the License. */ package com.pingcap.tikv.meta; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.pingcap.tikv.predicates.PredicateUtils.mergeCNFExpressions; import static java.util.Objects.requireNonNull; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.pingcap.tidb.tipb.Aggregation; import com.pingcap.tidb.tipb.ColumnInfo; import com.pingcap.tidb.tipb.DAGRequest; import com.pingcap.tidb.tipb.EncodeType; import com.pingcap.tidb.tipb.ExecType; import com.pingcap.tidb.tipb.Executor; import com.pingcap.tidb.tipb.IndexScan; import com.pingcap.tidb.tipb.Limit; import com.pingcap.tidb.tipb.Selection; import com.pingcap.tidb.tipb.TableScan; import com.pingcap.tidb.tipb.TopN; import com.pingcap.tikv.codec.KeyUtils; import com.pingcap.tikv.exception.DAGRequestException; import com.pingcap.tikv.exception.TiClientInternalException; import com.pingcap.tikv.expression.AggregateFunction; import com.pingcap.tikv.expression.ByItem; import com.pingcap.tikv.expression.ColumnRef; import com.pingcap.tikv.expression.Expression; import com.pingcap.tikv.expression.visitor.ProtoConverter; import com.pingcap.tikv.key.RowKey; import com.pingcap.tikv.predicates.PredicateUtils; import com.pingcap.tikv.region.TiStoreType; import com.pingcap.tikv.types.DataType; import com.pingcap.tikv.types.IntegerType; import com.pingcap.tikv.util.KeyRangeUtils; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import javax.annotation.Nonnull; import org.tikv.kvproto.Coprocessor; /** * Type TiDAGRequest. * * <p>Used for constructing a new DAG request to TiKV */ public class TiDAGRequest implements Serializable { /** Predefined executor priority map. */ private static final Map<ExecType, Integer> EXEC_TYPE_PRIORITY_MAP = ImmutableMap.<ExecType, Integer>builder() .put(ExecType.TypeTableScan, 0) .put(ExecType.TypeIndexScan, 0) .put(ExecType.TypeSelection, 1) .put(ExecType.TypeAggregation, 2) .put(ExecType.TypeTopN, 3) .put(ExecType.TypeLimit, 4) .build(); private static final ColumnInfo handleColumn = ColumnInfo.newBuilder() .setColumnId(-1) .setPkHandle(true) // We haven't changed the field name in protobuf file, but // we need to set this to true in order to retrieve the handle, // so the name 'setPkHandle' may sounds strange. .setTp(8) .setColumnLen(20) .setFlag(2) .build(); private final List<ColumnRef> fields = new ArrayList<>(); private final List<DataType> indexDataTypes = new ArrayList<>(); private final List<Expression> filters = new ArrayList<>(); private final List<ByItem> groupByItems = new ArrayList<>(); private final List<ByItem> orderByItems = new ArrayList<>(); // System like Spark has different type promotion rules // we need a cast to target when given private final List<AggregateFunction> aggregates = new ArrayList<>(); private final Map<Long, List<Coprocessor.KeyRange>> idToRanges = new HashMap<>(); // If index scanning of this request is not possible in some scenario, we downgrade it // to a table scan and use downGradeRanges instead of index scan ranges stored in // idToRanges along with downgradeFilters to perform a table scan. private final List<Expression> downgradeFilters = new ArrayList<>(); private final List<Expression> pushDownFilters = new ArrayList<>(); private final List<AggregateFunction> pushDownAggregates = new ArrayList<>(); private final List<ByItem> pushDownGroupBys = new ArrayList<>(); private final List<ByItem> pushDownOrderBys = new ArrayList<>(); private final PushDownType pushDownType; private TiTableInfo tableInfo; private List<TiPartitionDef> prunedParts; private TiStoreType storeType = TiStoreType.TiKV; private TiIndexInfo indexInfo; private List<Long> prunedPhysicalIds = new ArrayList<>(); private final Map<Long, String> prunedPartNames = new HashMap<>(); private long physicalId; private int pushDownLimits; private int limit; private int timeZoneOffset; private long flags; private TiTimestamp startTs; private Expression having; private boolean distinct; private boolean isDoubleRead; private EncodeType encodeType; private double estimatedCount = -1; public TiDAGRequest(PushDownType pushDownType) { this.pushDownType = pushDownType; this.encodeType = EncodeType.TypeDefault; } private TiDAGRequest(PushDownType pushDownType, EncodeType encodeType) { this.pushDownType = pushDownType; this.encodeType = encodeType; } public TiDAGRequest(PushDownType pushDownType, EncodeType encodeType, int timeZoneOffset) { this(pushDownType, encodeType); this.timeZoneOffset = timeZoneOffset; } public TiDAGRequest(PushDownType pushDownType, int timeZoneOffset) { this(pushDownType, EncodeType.TypeDefault); this.timeZoneOffset = timeZoneOffset; } public List<TiPartitionDef> getPrunedParts() { return prunedParts; } private String getPrunedPartName(long id) { return prunedPartNames.getOrDefault(id, "unknown"); } public void setPrunedParts(List<TiPartitionDef> prunedParts) { this.prunedParts = prunedParts; if (prunedParts != null) { List<Long> ids = new ArrayList<>(); prunedPartNames.clear(); for (TiPartitionDef pDef : prunedParts) { ids.add(pDef.getId()); prunedPartNames.put(pDef.getId(), pDef.getName()); } this.prunedPhysicalIds = ids; } } public List<Long> getPrunedPhysicalIds() { if (!this.tableInfo.isPartitionEnabled()) { return prunedPhysicalIds = ImmutableList.of(this.tableInfo.getId()); } else { return prunedPhysicalIds; } } public TiStoreType getStoreType() { return storeType; } public void setStoreType(TiStoreType storeType) { this.storeType = storeType; } public EncodeType getEncodeType() { return encodeType; } public void setEncodeType(EncodeType encodeType) { this.encodeType = encodeType; } public boolean isCommonHandle() { return tableInfo.isCommonHandle(); } public DAGRequest buildIndexScan() { List<Integer> outputOffsets = new ArrayList<>(); DAGRequest.Builder builder = buildScan(true, outputOffsets); return buildRequest(builder, outputOffsets); } public DAGRequest buildTableScan() { List<Integer> outputOffsets = new ArrayList<>(); boolean isCoveringIndex = isCoveringIndexScan(); DAGRequest.Builder builder = buildScan(isCoveringIndex, outputOffsets); return buildRequest(builder, outputOffsets); } private DAGRequest buildRequest( DAGRequest.Builder dagRequestBuilder, List<Integer> outputOffsets) { checkNotNull(startTs, "startTs is null"); checkArgument(startTs.getVersion() != 0, "timestamp is 0"); DAGRequest request = dagRequestBuilder .setTimeZoneOffset(timeZoneOffset) .setFlags(flags) .addAllOutputOffsets(outputOffsets) .setEncodeType(this.encodeType) // set start ts fallback is to solving compatible issue. .setStartTsFallback(startTs.getVersion()) .build(); validateRequest(request); return request; } /** * Unify indexScan and tableScan building logic since they are very much alike. DAGRequest for * IndexScan should also contain filters and aggregation, so we can reuse this part of logic. * * <p>DAGRequest is made up of a chain of executors with strict orders: TableScan/IndexScan > * Selection > Aggregation > TopN/Limit a DAGRequest must contain one and only one TableScan or * IndexScan. * * @param buildIndexScan whether the dagRequest to build should be an {@link * com.pingcap.tidb.tipb.IndexScan} * @return final DAGRequest built */ private DAGRequest.Builder buildScan(boolean buildIndexScan, List<Integer> outputOffsets) { long id = getPhysicalId(); checkNotNull(startTs, "startTs is null"); checkArgument(startTs.getVersion() != 0, "timestamp is 0"); clearPushDownInfo(); DAGRequest.Builder dagRequestBuilder = DAGRequest.newBuilder(); Executor.Builder executorBuilder = Executor.newBuilder(); IndexScan.Builder indexScanBuilder = IndexScan.newBuilder(); TableScan.Builder tblScanBuilder = TableScan.newBuilder(); // find a column's offset in fields Map<String, Integer> colOffsetInFieldMap = new HashMap<>(); // find a column's position in index Map<String, Integer> colPosInIndexMap = new HashMap<>(); if (buildIndexScan) { // IndexScan if (indexInfo == null) { throw new TiClientInternalException("Index is empty for index scan"); } List<TiColumnInfo> columnInfoList = tableInfo.getColumns(); boolean hasPk = false; // We extract index column info List<Integer> indexColOffsets = indexInfo .getIndexColumns() .stream() .map(TiIndexColumn::getOffset) .collect(Collectors.toList()); int idxPos = 0; // for index scan builder, columns are added by its order in index for (Integer idx : indexColOffsets) { TiColumnInfo tiColumnInfo = columnInfoList.get(idx); ColumnInfo columnInfo = tiColumnInfo.toProto(tableInfo); colPosInIndexMap.put(tiColumnInfo.getName(), idxPos++); ColumnInfo.Builder colBuilder = ColumnInfo.newBuilder(columnInfo); if (columnInfo.getColumnId() == -1) { hasPk = true; colBuilder.setPkHandle(true); } indexScanBuilder.addColumns(colBuilder); } int colCount = indexScanBuilder.getColumnsCount(); if (isDoubleRead()) { // double read case: need to retrieve handle // =================== IMPORTANT ====================== // offset for dagRequest should be in accordance with fields // The last pos will be the handle // TODO: we may merge indexDoubleRead and coveringIndexRead logic for (ColumnRef col : getFields()) { Integer pos = colPosInIndexMap.get(col.getName()); if (pos != null) { TiColumnInfo columnInfo = columnInfoList.get(indexColOffsets.get(pos)); if (col.matchName(columnInfo.getName())) { colOffsetInFieldMap.put(col.getName(), pos); } // TODO: primary key may also be considered if pkIsHandle } } // double read case if (!hasPk) { // add handle column indexScanBuilder.addColumns(handleColumn); ++colCount; addRequiredIndexDataType(); } if (colCount == 0) { throw new DAGRequestException("Incorrect index scan with zero column count"); } outputOffsets.add(colCount - 1); } else { boolean pkIsNeeded = false; // =================== IMPORTANT ====================== // offset for dagRequest should be in accordance with fields for (ColumnRef col : getFields()) { Integer pos = colPosInIndexMap.get(col.getName()); if (pos != null) { TiColumnInfo columnInfo = columnInfoList.get(indexColOffsets.get(pos)); if (col.matchName(columnInfo.getName())) { outputOffsets.add(pos); colOffsetInFieldMap.put(col.getName(), pos); } } // if a column of field is not contained in index selected, // logically it must be the pk column. Extra check here. else if (tableInfo.getColumn(col.getName()).isPrimaryKey()) { pkIsNeeded = true; // offset should be processed for each primary key encountered outputOffsets.add(colCount); // for index scan, column offset must be in the order of index->handle colOffsetInFieldMap.put(col.getName(), indexColOffsets.size()); } else { throw new DAGRequestException( "columns other than primary key and index key exist in fields while index single read: " + col.getName()); } } // pk is not included in index but still needed if (pkIsNeeded) { indexScanBuilder.addColumns(handleColumn); } } executorBuilder.setTp(ExecType.TypeIndexScan); indexScanBuilder.setTableId(id).setIndexId(indexInfo.getId()); dagRequestBuilder.addExecutors(executorBuilder.setIdxScan(indexScanBuilder).build()); } else { // TableScan executorBuilder.setTp(ExecType.TypeTableScan); tblScanBuilder.setTableId(id); if (tableInfo.isCommonHandle()) { for (TiIndexColumn col : tableInfo.getPrimaryKey().getIndexColumns()) { tblScanBuilder.addPrimaryColumnIds(tableInfo.getColumn(col.getName()).getId()); } } // Step1. Add columns to first executor int lastOffset = 0; for (ColumnRef col : getFields()) { // can't allow duplicated col added into executor. if (!colOffsetInFieldMap.containsKey(col.getName())) { tblScanBuilder.addColumns(tableInfo.getColumn(col.getName()).toProto(tableInfo)); colOffsetInFieldMap.put(col.getName(), lastOffset); lastOffset++; } // column offset should be in accordance with fields outputOffsets.add(colOffsetInFieldMap.get(col.getName())); } dagRequestBuilder.addExecutors(executorBuilder.setTblScan(tblScanBuilder)); } boolean isIndexDoubleScan = buildIndexScan && isDoubleRead(); // Should build these executors when performing CoveringIndexScan/TableScan // clear executorBuilder executorBuilder.clear(); // Step2. Add others // DO NOT EDIT EXPRESSION CONSTRUCTION ORDER // Or make sure the construction order is below: // TableScan/IndexScan > Selection > Aggregation > TopN/Limit Expression whereExpr = mergeCNFExpressions(getFilters()); if (whereExpr != null) { if (!isIndexDoubleScan || isExpressionCoveredByIndex(whereExpr)) { executorBuilder.setTp(ExecType.TypeSelection); dagRequestBuilder.addExecutors( executorBuilder.setSelection( Selection.newBuilder() .addConditions(ProtoConverter.toProto(whereExpr, colOffsetInFieldMap)))); executorBuilder.clear(); addPushDownFilters(); } else { return dagRequestBuilder; } } if (!getGroupByItems().isEmpty() || !getAggregates().isEmpty()) { // only allow table scan or covering index scan push down groupby and agg if (!isIndexDoubleScan || (isGroupByCoveredByIndex() && isAggregateCoveredByIndex())) { pushDownAggAndGroupBy( dagRequestBuilder, executorBuilder, outputOffsets, colOffsetInFieldMap); } else { return dagRequestBuilder; } } if (!getOrderByItems().isEmpty()) { if (!isIndexDoubleScan || isOrderByCoveredByIndex()) { // only allow table scan or covering index scan push down orderby pushDownOrderBy(dagRequestBuilder, executorBuilder, colOffsetInFieldMap); } } else if (getLimit() != 0) { if (!isIndexDoubleScan) { pushDownLimit(dagRequestBuilder, executorBuilder); } } return dagRequestBuilder; } private void pushDownLimit( DAGRequest.Builder dagRequestBuilder, Executor.Builder executorBuilder) { Limit.Builder limitBuilder = Limit.newBuilder(); limitBuilder.setLimit(getLimit()); executorBuilder.setTp(ExecType.TypeLimit); dagRequestBuilder.addExecutors(executorBuilder.setLimit(limitBuilder)); executorBuilder.clear(); addPushDownLimits(); } private void pushDownOrderBy( DAGRequest.Builder dagRequestBuilder, Executor.Builder executorBuilder, Map<String, Integer> colOffsetInFieldMap) { TopN.Builder topNBuilder = TopN.newBuilder(); getOrderByItems() .forEach( tiByItem -> topNBuilder.addOrderBy( com.pingcap.tidb.tipb.ByItem.newBuilder() .setExpr(ProtoConverter.toProto(tiByItem.getExpr(), colOffsetInFieldMap)) .setDesc(tiByItem.isDesc()))); executorBuilder.setTp(ExecType.TypeTopN); topNBuilder.setLimit(getLimit()); dagRequestBuilder.addExecutors(executorBuilder.setTopN(topNBuilder)); executorBuilder.clear(); addPushDownOrderBys(); } private void pushDownAggAndGroupBy( DAGRequest.Builder dagRequestBuilder, Executor.Builder executorBuilder, List<Integer> outputOffsets, Map<String, Integer> colOffsetInFieldMap) { Aggregation.Builder aggregationBuilder = Aggregation.newBuilder(); getAggregates() .forEach( tiExpr -> aggregationBuilder.addAggFunc(ProtoConverter.toProto(tiExpr, colOffsetInFieldMap))); getGroupByItems() .forEach( tiByItem -> aggregationBuilder.addGroupBy( ProtoConverter.toProto(tiByItem.getExpr(), colOffsetInFieldMap))); executorBuilder.setTp(ExecType.TypeAggregation); dagRequestBuilder.addExecutors(executorBuilder.setAggregation(aggregationBuilder)); executorBuilder.clear(); addPushDownGroupBys(); addPushDownAggregates(); // adding output offsets for aggs outputOffsets.clear(); for (int i = 0; i < getAggregates().size(); i++) { outputOffsets.add(i); } // adding output offsets for group by int currentMaxOutputOffset = outputOffsets.get(outputOffsets.size() - 1) + 1; for (int i = 0; i < getGroupByItems().size(); i++) { outputOffsets.add(currentMaxOutputOffset + i); } } private boolean isExpressionCoveredByIndex(Expression expr) { Set<String> indexColumnRefSet = indexInfo .getIndexColumns() .stream() .filter(x -> !x.isPrefixIndex()) .map(TiIndexColumn::getName) .collect(Collectors.toSet()); return !isDoubleRead() && PredicateUtils.extractColumnRefFromExpression(expr) .stream() .map(ColumnRef::getName) .allMatch(indexColumnRefSet::contains); } private boolean isGroupByCoveredByIndex() { return isByItemCoveredByIndex(getGroupByItems()); } private boolean isOrderByCoveredByIndex() { return isByItemCoveredByIndex(getOrderByItems()); } private boolean isByItemCoveredByIndex(List<ByItem> byItems) { if (byItems.isEmpty()) { return false; } return byItems.stream().allMatch(x -> isExpressionCoveredByIndex(x.getExpr())); } private boolean isAggregateCoveredByIndex() { if (aggregates.isEmpty()) { return false; } return aggregates.stream().allMatch(this::isExpressionCoveredByIndex); } /** * Check if a DAG request is valid. * * <p>Note: When constructing a DAG request, a executor with an ExecType of higher priority should * always be placed before those lower ones. * * @param dagRequest Request DAG. */ private void validateRequest(DAGRequest dagRequest) { requireNonNull(dagRequest); // check encode type requireNonNull(dagRequest.getEncodeType()); // A DAG request must has at least one executor. if (dagRequest.getExecutorsCount() < 1) { throw new DAGRequestException("Invalid executors count:" + dagRequest.getExecutorsCount()); } // A DAG request must start with TableScan or IndexScan Executor ExecType formerType = dagRequest.getExecutors(0).getTp(); if (formerType != ExecType.TypeTableScan && formerType != ExecType.TypeIndexScan) { throw new DAGRequestException( "Invalid first executor type:" + formerType + ", must one of TypeTableScan or TypeIndexScan"); } for (int i = 1; i < dagRequest.getExecutorsCount(); i++) { ExecType currentType = dagRequest.getExecutors(i).getTp(); if (EXEC_TYPE_PRIORITY_MAP.get(currentType) < EXEC_TYPE_PRIORITY_MAP.get(formerType)) { throw new DAGRequestException("Invalid executor priority."); } formerType = currentType; } } public TiTableInfo getTableInfo() { return this.tableInfo; } public TiDAGRequest setTableInfo(TiTableInfo tableInfo) { this.tableInfo = requireNonNull(tableInfo, "tableInfo is null"); setPhysicalId(tableInfo.getId()); return this; } public long getPhysicalId() { return this.physicalId; } public TiDAGRequest setPhysicalId(long id) { this.physicalId = id; return this; } public TiIndexInfo getIndexInfo() { return indexInfo; } public TiDAGRequest setIndexInfo(TiIndexInfo indexInfo) { this.indexInfo = requireNonNull(indexInfo, "indexInfo is null"); return this; } public void clearIndexInfo() { indexInfo = null; clearPushDownInfo(); } public int getLimit() { return limit; } /** * add limit clause to select query. * * @param limit is just a integer. * @return a SelectBuilder */ public TiDAGRequest setLimit(int limit) { this.limit = limit; return this; } int getTimeZoneOffset() { return timeZoneOffset; } /** * set truncate mode * * @param mode truncate mode * @return a TiDAGRequest */ TiDAGRequest setTruncateMode(TiDAGRequest.TruncateMode mode) { flags = requireNonNull(mode, "mode is null").mask(flags); return this; } @VisibleForTesting long getFlags() { return flags; } @VisibleForTesting public TiTimestamp getStartTs() { return startTs; } /** * set start timestamp for the transaction * * @param startTs timestamp * @return a TiDAGRequest */ public TiDAGRequest setStartTs(@Nonnull TiTimestamp startTs) { this.startTs = startTs; return this; } /** * set having clause to select query * * @param having is a expression represents Having * @return a TiDAGRequest */ public TiDAGRequest setHaving(Expression having) { this.having = requireNonNull(having, "having is null"); return this; } public boolean isDistinct() { return distinct; } public TiDAGRequest setDistinct(boolean distinct) { this.distinct = distinct; return this; } public TiDAGRequest addAggregate(AggregateFunction expr) { requireNonNull(expr, "aggregation expr is null"); aggregates.add(expr); return this; } List<AggregateFunction> getAggregates() { return aggregates; } /** * add a order by clause to select query. * * @param byItem is a TiByItem. * @return a SelectBuilder */ public TiDAGRequest addOrderByItem(ByItem byItem) { orderByItems.add(requireNonNull(byItem, "byItem is null")); return this; } List<ByItem> getOrderByItems() { return orderByItems; } /** * add a group by clause to select query * * @param byItem is a TiByItem * @return a SelectBuilder */ public TiDAGRequest addGroupByItem(ByItem byItem) { groupByItems.add(requireNonNull(byItem, "byItem is null")); return this; } public List<ByItem> getGroupByItems() { return groupByItems; } /** * Field is not support in TiDB yet, for here we simply allow TiColumnRef instead of TiExpr like * in SelectRequest proto * * <p> * * <p>This interface allows duplicate columns and it's user's responsibility to do dedup since we * need to ensure exact order and items preserved during decoding * * @param column is column referred during selectReq */ public TiDAGRequest addRequiredColumn(ColumnRef column) { if (!column.isResolved()) { throw new UnsupportedOperationException( String.format("cannot add unresolved column %s to dag request", column.getName())); } fields.add(requireNonNull(column, "columnRef is null")); return this; } public List<ColumnRef> getFields() { return fields; } /** Required index columns for double read */ private void addRequiredIndexDataType() { indexDataTypes.add(requireNonNull(IntegerType.BIGINT, "dataType is null")); } public List<DataType> getIndexDataTypes() { return indexDataTypes; } /** * set key range of scan * * @param ranges key range of scan */ public TiDAGRequest addRanges(Map<Long, List<Coprocessor.KeyRange>> ranges) { idToRanges.putAll(requireNonNull(ranges, "KeyRange is null")); return this; } private void resetRanges() { idToRanges.clear(); } public void resetFilters(List<Expression> filters) { this.filters.clear(); this.filters.addAll(filters); } public List<Coprocessor.KeyRange> getRangesByPhysicalId(long physicalId) { return idToRanges.get(physicalId); } public Map<Long, List<Coprocessor.KeyRange>> getRangesMaps() { return idToRanges; } public TiDAGRequest addFilters(List<Expression> filters) { this.filters.addAll(requireNonNull(filters, "filters expr is null")); return this; } public List<Expression> getFilters() { return filters; } public void addDowngradeFilter(Expression filter) { this.downgradeFilters.add(requireNonNull(filter, "downgrade filter is null")); } public List<Expression> getDowngradeFilters() { return downgradeFilters; } private void addPushDownFilters() { // all filters will be pushed down // TODO: choose some filters to push down this.pushDownFilters.addAll(filters); } private List<Expression> getPushDownFilters() { return pushDownFilters; } private void addPushDownAggregates() { this.pushDownAggregates.addAll(aggregates); } public List<AggregateFunction> getPushDownAggregates() { return pushDownAggregates; } private void addPushDownGroupBys() { this.pushDownGroupBys.addAll(getGroupByItems()); } public List<ByItem> getPushDownGroupBys() { return pushDownGroupBys; } private void addPushDownOrderBys() { this.pushDownOrderBys.addAll(getOrderByItems()); } public List<ByItem> getPushDownOrderBys() { return pushDownOrderBys; } private void addPushDownLimits() { this.pushDownLimits = limit; } private int getPushDownLimits() { return pushDownLimits; } private void clearPushDownInfo() { indexDataTypes.clear(); pushDownFilters.clear(); pushDownAggregates.clear(); pushDownGroupBys.clear(); pushDownOrderBys.clear(); pushDownLimits = 0; } /** * Check whether the DAG request has any aggregate expression. * * @return the boolean */ public boolean hasPushDownAggregate() { return !getPushDownAggregates().isEmpty(); } /** * Check whether the DAG request has any group by expression. * * @return the boolean */ public boolean hasPushDownGroupBy() { return !getPushDownGroupBys().isEmpty(); } /** * Returns whether needs to read handle from index first and find its corresponding row. i.e, * "double read" * * @return boolean */ public boolean isDoubleRead() { return isDoubleRead; } /** * Sets isDoubleRead * * @param isDoubleRead if is double read */ public void setIsDoubleRead(boolean isDoubleRead) { this.isDoubleRead = isDoubleRead; } /** * Returns whether the request is CoveringIndex * * @return boolean */ private boolean isCoveringIndexScan() { return hasIndex() && !isDoubleRead(); } /** * Returns whether this request is of indexScanType * * @return true iff indexInfo is provided, false otherwise */ public boolean hasIndex() { return indexInfo != null; } /** * Whether we use streaming processing to retrieve data * * @return push down type. */ public PushDownType getPushDownType() { return pushDownType; } /** Get the estimated row count will be fetched from this request. */ public double getEstimatedCount() { return estimatedCount; } /** Set the estimated row count will be fetched from this request. */ public void setEstimatedCount(double estimatedCount) { this.estimatedCount = estimatedCount; } public void init(boolean readHandle) { if (readHandle) { buildIndexScan(); } else { buildTableScan(); } } private void init() { init(hasIndex()); } public IndexScanType getIndexScanType() { if (hasIndex()) { if (isDoubleRead) { return IndexScanType.INDEX_SCAN; } else { return IndexScanType.COVERING_INDEX_SCAN; } } else { return IndexScanType.TABLE_SCAN; } } @Override public String toString() { return this.copy().toStringInternal(); } private String toStringInternal() { init(); StringBuilder sb = new StringBuilder(); if (tableInfo != null) { sb.append(String.format("[table: %s] ", tableInfo.getName())); } boolean isIndexScan = false; switch (getIndexScanType()) { case INDEX_SCAN: sb.append("IndexScan"); sb.append(String.format("[Index: %s] ", indexInfo.getName())); isIndexScan = true; break; case COVERING_INDEX_SCAN: sb.append("CoveringIndexScan"); sb.append(String.format("[Index: %s] ", indexInfo.getName())); break; case TABLE_SCAN: sb.append("TableScan"); } if (!getFields().isEmpty()) { sb.append(", Columns: "); Joiner.on(", ").skipNulls().appendTo(sb, getFields()); } if (isIndexScan && !getDowngradeFilters().isEmpty()) { sb.append(", Downgrade Filter: "); Joiner.on(", ").skipNulls().appendTo(sb, getDowngradeFilters()); } if (!isIndexScan && !getFilters().isEmpty()) { sb.append(", Residual Filter: "); Joiner.on(", ").skipNulls().appendTo(sb, getFilters()); } if (!getPushDownFilters().isEmpty()) { sb.append(", PushDown Filter: "); Joiner.on(", ").skipNulls().appendTo(sb, getPushDownFilters()); } // Key ranges might be also useful if (!getRangesMaps().isEmpty()) { sb.append(", KeyRange: ["); if (tableInfo.isPartitionEnabled()) { getRangesMaps() .forEach( (key, value) -> { for (Coprocessor.KeyRange v : value) { sb.append(" partition: ").append(getPrunedPartName(key)); // LogDesensitization: show key range in coprocessor request in log sb.append(KeyUtils.formatBytesUTF8(v)); } }); } else { getRangesMaps() .values() .forEach( vList -> { for (Coprocessor.KeyRange range : vList) { // LogDesensitization: show key range in coprocessor request in log sb.append(KeyUtils.formatBytesUTF8(range)); } }); } sb.append("]"); } if (!getPushDownFilters().isEmpty()) { sb.append(", Aggregates: "); Joiner.on(", ").skipNulls().appendTo(sb, getPushDownAggregates()); } if (!getGroupByItems().isEmpty()) { sb.append(", Group By: "); Joiner.on(", ").skipNulls().appendTo(sb, getGroupByItems()); } if (!getOrderByItems().isEmpty()) { sb.append(", Order By: "); Joiner.on(", ").skipNulls().appendTo(sb, getOrderByItems()); } if (getLimit() != 0) { sb.append(", Limit: "); sb.append("[").append(limit).append("]"); } sb.append(", startTs: ").append(startTs.getVersion()); return sb.toString(); } public TiDAGRequest copy() { try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(baos); oos.writeObject(this); ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); ObjectInputStream ois = new ObjectInputStream(bais); return ((TiDAGRequest) ois.readObject()); } catch (Exception e) { throw new RuntimeException(e); } } public TiDAGRequest copyReqWithPhysicalId(long id) { TiDAGRequest req = this.copy(); req.setPhysicalId(id); List<Coprocessor.KeyRange> currentIdRange = req.getRangesByPhysicalId(id); req.resetRanges(); Map<Long, List<Coprocessor.KeyRange>> rangeMap = new HashMap<>(); rangeMap.put(id, currentIdRange); req.addRanges(rangeMap); return req; } public enum TruncateMode { IgnoreTruncation(0x1), TruncationAsWarning(0x2); private final long mask; TruncateMode(long mask) { this.mask = mask; } public long mask(long flags) { return flags | mask; } } /** Whether we use streaming to push down the request */ public enum PushDownType { STREAMING, NORMAL } public enum IndexScanType { INDEX_SCAN, COVERING_INDEX_SCAN, TABLE_SCAN } public static class Builder { private final List<String> requiredCols = new ArrayList<>(); private final List<Expression> filters = new ArrayList<>(); private final List<ByItem> orderBys = new ArrayList<>(); private final Map<Long, List<Coprocessor.KeyRange>> ranges = new HashMap<>(); private TiTableInfo tableInfo; private long physicalId; private int limit; private TiTimestamp startTs; public static Builder newBuilder() { return new Builder(); } public Builder setFullTableScan(TiTableInfo tableInfo) { requireNonNull(tableInfo); setTableInfo(tableInfo); if (!tableInfo.isPartitionEnabled()) { RowKey start = RowKey.createMin(tableInfo.getId()); RowKey end = RowKey.createBeyondMax(tableInfo.getId()); ranges.put( tableInfo.getId(), ImmutableList.of( KeyRangeUtils.makeCoprocRange(start.toByteString(), end.toByteString()))); } else { for (TiPartitionDef pDef : tableInfo.getPartitionInfo().getDefs()) { RowKey start = RowKey.createMin(pDef.getId()); RowKey end = RowKey.createBeyondMax(pDef.getId()); ranges.put( pDef.getId(), ImmutableList.of( KeyRangeUtils.makeCoprocRange(start.toByteString(), end.toByteString()))); } } return this; } public Builder setLimit(int limit) { this.limit = limit; return this; } public Builder setTableInfo(TiTableInfo tableInfo) { this.tableInfo = tableInfo; setPhysicalId(tableInfo.getId()); return this; } public Builder setPhysicalId(long id) { this.physicalId = id; return this; } public Builder addRequiredCols(List<String> cols) { this.requiredCols.addAll(cols); return this; } public Builder addFilter(Expression filter) { this.filters.add(filter); return this; } public Builder addOrderBy(ByItem item) { this.orderBys.add(item); return this; } public Builder setStartTs(@Nonnull TiTimestamp ts) { this.startTs = ts; return this; } public TiDAGRequest build(PushDownType pushDownType) { TiDAGRequest req = new TiDAGRequest(pushDownType); req.setTableInfo(tableInfo); req.setPhysicalId(physicalId); req.addRanges(ranges); req.addFilters(filters); // this request will push down all filters req.addPushDownFilters(); if (!orderBys.isEmpty()) { orderBys.forEach(req::addOrderByItem); } if (limit != 0) { req.setLimit(limit); } requiredCols.forEach(c -> req.addRequiredColumn(ColumnRef.create(c, tableInfo.getColumn(c)))); req.setStartTs(startTs); return req; } } }
1
13,102
i think haspk is false?
pingcap-tispark
java
@@ -113,6 +113,7 @@ var opts struct { FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"` NumRuns int `long:"num_runs" short:"n" description:"Number of times to run each test target."` TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."` + SurefireDir cli.Filepath `long:"surefir_dir" default:"plz-out/surefire-reports" description:"Directory to copy XML test results to."` ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."` Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."` Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."`
1
package main import ( "fmt" "net/http" _ "net/http/pprof" "os" "path" "runtime" "runtime/pprof" "strings" "syscall" "time" "github.com/jessevdk/go-flags" "gopkg.in/op/go-logging.v1" "build" "cache" "clean" "cli" "core" "export" "follow" "fs" "gc" "hashes" "help" "metrics" "output" "parse" "query" "run" "sync" "test" "tool" "update" "utils" "watch" ) var log = logging.MustGetLogger("plz") var config *core.Configuration var opts struct { Usage string `usage:"Please is a high-performance multi-language build system.\n\nIt uses BUILD files to describe what to build and how to build it.\nSee https://please.build for more information about how it works and what Please can do for you."` BuildFlags struct { Config string `short:"c" long:"config" description:"Build config to use. Defaults to opt."` Arch cli.Arch `short:"a" long:"arch" description:"Architecture to compile for."` RepoRoot cli.Filepath `short:"r" long:"repo_root" description:"Root of repository to build."` KeepGoing bool `short:"k" long:"keep_going" description:"Don't stop on first failed target."` NumThreads int `short:"n" long:"num_threads" description:"Number of concurrent build operations. Default is number of CPUs + 2."` Include []string `short:"i" long:"include" description:"Label of targets to include in automatic detection."` Exclude []string `short:"e" long:"exclude" description:"Label of targets to exclude from automatic detection."` Option ConfigOverrides `short:"o" long:"override" env:"PLZ_OVERRIDES" env-delim:";" description:"Options to override from .plzconfig (e.g. -o please.selfupdate:false)"` Profile string `long:"profile" env:"PLZ_CONFIG_PROFILE" description:"Configuration profile to load; e.g. --profile=dev will load .plzconfig.dev if it exists."` } `group:"Options controlling what to build & how to build it"` OutputFlags struct { Verbosity int `short:"v" long:"verbosity" description:"Verbosity of output (higher number = more output, default 1 -> warnings and errors only)" default:"1"` LogFile cli.Filepath `long:"log_file" description:"File to echo full logging output to" default:"plz-out/log/build.log"` LogFileLevel int `long:"log_file_level" description:"Log level for file output" default:"4"` InteractiveOutput bool `long:"interactive_output" description:"Show interactive output ina terminal"` PlainOutput bool `short:"p" long:"plain_output" description:"Don't show interactive output."` Colour bool `long:"colour" description:"Forces coloured output from logging & other shell output."` NoColour bool `long:"nocolour" description:"Forces colourless output from logging & other shell output."` TraceFile cli.Filepath `long:"trace_file" description:"File to write Chrome tracing output into"` ShowAllOutput bool `long:"show_all_output" description:"Show all output live from all commands. Implies --plain_output."` CompletionScript bool `long:"completion_script" description:"Prints the bash / zsh completion script to stdout"` Version bool `long:"version" description:"Print the version of the tool"` } `group:"Options controlling output & logging"` FeatureFlags struct { NoUpdate bool `long:"noupdate" description:"Disable Please attempting to auto-update itself."` NoCache bool `long:"nocache" description:"Disable caches (NB. not incrementality)"` NoHashVerification bool `long:"nohash_verification" description:"Hash verification errors are nonfatal."` NoLock bool `long:"nolock" description:"Don't attempt to lock the repo exclusively. Use with care."` KeepWorkdirs bool `long:"keep_workdirs" description:"Don't clean directories in plz-out/tmp after successfully building targets."` } `group:"Options that enable / disable certain features"` Profile string `long:"profile_file" hidden:"true" description:"Write profiling output to this file"` MemProfile string `long:"mem_profile_file" hidden:"true" description:"Write a memory profile to this file"` ProfilePort int `long:"profile_port" hidden:"true" description:"Serve profiling info on this port."` ParsePackageOnly bool `description:"Parses a single package only. All that's necessary for some commands." no-flag:"true"` Complete string `long:"complete" hidden:"true" env:"PLZ_COMPLETE" description:"Provide completion options for this build target."` VisibilityParse bool `description:"Parse all targets that the original targets are visible to. Used for some query steps." no-flag:"true"` Build struct { Prepare bool `long:"prepare" description:"Prepare build directory for these targets but don't build them."` Shell bool `long:"shell" description:"Like --prepare, but opens a shell in the build directory with the appropriate environment variables."` ShowStatus bool `long:"show_status" hidden:"true" description:"Show status of each target in output after build"` Args struct { // Inner nesting is necessary to make positional-args work :( Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"` } `positional-args:"true" required:"true"` } `command:"build" description:"Builds one or more targets"` Rebuild struct { Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" required:"true" description:"Targets to rebuild"` } `positional-args:"true" required:"true"` } `command:"rebuild" description:"Forces a rebuild of one or more targets"` Hash struct { Detailed bool `long:"detailed" description:"Produces a detailed breakdown of the hash"` Update bool `short:"u" long:"update" description:"Rewrites the hashes in the BUILD file to the new values"` Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"` } `positional-args:"true" required:"true"` } `command:"hash" description:"Calculates hash for one or more targets"` Test struct { FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"` NumRuns int `long:"num_runs" short:"n" description:"Number of times to run each test target."` TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."` ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."` Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."` Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."` Detailed bool `long:"detailed" description:"Prints more detailed output after tests."` // Slightly awkward since we can specify a single test with arguments or multiple test targets. Args struct { Target core.BuildLabel `positional-arg-name:"target" description:"Target to test"` Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors"` } `positional-args:"true"` } `command:"test" description:"Builds and tests one or more targets"` Cover struct { FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"` NoCoverageReport bool `long:"nocoverage_report" description:"Suppress the per-file coverage report displayed in the shell"` LineCoverageReport bool `short:"l" long:"line_coverage_report" description:" Show a line-by-line coverage report for all affected files."` NumRuns int `short:"n" long:"num_runs" description:"Number of times to run each test target."` IncludeAllFiles bool `short:"a" long:"include_all_files" description:"Include all dependent files in coverage (default is just those from relevant packages)"` IncludeFile []string `long:"include_file" description:"Filenames to filter coverage display to"` TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."` CoverageResultsFile cli.Filepath `long:"coverage_results_file" default:"plz-out/log/coverage.json" description:"File to write combined coverage results to."` ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."` Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."` Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."` Detailed bool `long:"detailed" description:"Prints more detailed output after tests."` Args struct { Target core.BuildLabel `positional-arg-name:"target" description:"Target to test" group:"one test"` Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors" group:"one test"` } `positional-args:"true"` } `command:"cover" description:"Builds and tests one or more targets, and calculates coverage."` Run struct { Env bool `long:"env" description:"Overrides environment variables (e.g. PATH) in the new process."` Parallel struct { NumTasks int `short:"n" long:"num_tasks" default:"10" description:"Maximum number of subtasks to run in parallel"` Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."` PositionalArgs struct { Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"` } `positional-args:"true" required:"true"` Args []string `short:"a" long:"arg" description:"Arguments to pass to the called processes."` } `command:"parallel" description:"Runs a sequence of targets in parallel"` Sequential struct { Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."` PositionalArgs struct { Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"` } `positional-args:"true" required:"true"` Args []string `short:"a" long:"arg" description:"Arguments to pass to the called processes."` } `command:"sequential" description:"Runs a sequence of targets sequentially."` Args struct { Target core.BuildLabel `positional-arg-name:"target" required:"true" description:"Target to run"` Args []string `positional-arg-name:"arguments" description:"Arguments to pass to target when running (to pass flags to the target, put -- before them)"` } `positional-args:"true"` } `command:"run" subcommands-optional:"true" description:"Builds and runs a single target"` Clean struct { NoBackground bool `long:"nobackground" short:"f" description:"Don't fork & detach until clean is finished."` Remote bool `long:"remote" description:"Clean entire remote cache when no targets are given (default is local only)"` Args struct { // Inner nesting is necessary to make positional-args work :( Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to clean (default is to clean everything)"` } `positional-args:"true"` } `command:"clean" description:"Cleans build artifacts" subcommands-optional:"true"` Watch struct { Run bool `short:"r" long:"run" description:"Runs the specified targets when they change (default is to build or test as appropriate)."` Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" required:"true" description:"Targets to watch the sources of for changes"` } `positional-args:"true" required:"true"` } `command:"watch" description:"Watches sources of targets for changes and rebuilds them"` Update struct { Force bool `long:"force" description:"Forces a re-download of the new version."` NoVerify bool `long:"noverify" description:"Skips signature verification of downloaded version"` Latest bool `long:"latest" description:"Update to latest available version (overrides config)."` Version cli.Version `long:"version" description:"Updates to a particular version (overrides config)."` } `command:"update" description:"Checks for an update and updates if needed."` Op struct { } `command:"op" description:"Re-runs previous command."` Init struct { Dir cli.Filepath `long:"dir" description:"Directory to create config in" default:"."` BazelCompatibility bool `long:"bazel_compat" description:"Initialises config for Bazel compatibility mode."` } `command:"init" description:"Initialises a .plzconfig file in the current directory"` Gc struct { Conservative bool `short:"c" long:"conservative" description:"Runs a more conservative / safer GC."` TargetsOnly bool `short:"t" long:"targets_only" description:"Only print the targets to delete"` SrcsOnly bool `short:"s" long:"srcs_only" description:"Only print the source files to delete"` NoPrompt bool `short:"y" long:"no_prompt" description:"Remove targets without prompting"` DryRun bool `short:"n" long:"dry_run" description:"Don't remove any targets or files, just print what would be done"` Git bool `short:"g" long:"git" description:"Use 'git rm' to remove unused files instead of just 'rm'."` Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to limit gc to."` } `positional-args:"true"` } `command:"gc" description:"Analyzes the repo to determine unneeded targets."` Export struct { Output string `short:"o" long:"output" required:"true" description:"Directory to export into"` Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to export."` } `positional-args:"true"` Outputs struct { Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to export."` } `positional-args:"true"` } `command:"outputs" description:"Exports outputs of a set of targets"` } `command:"export" subcommands-optional:"true" description:"Exports a set of targets and files from the repo."` Follow struct { Retries int `long:"retries" description:"Number of times to retry the connection"` Delay cli.Duration `long:"delay" default:"1s" description:"Delay between timeouts"` Args struct { URL cli.URL `positional-arg-name:"URL" required:"true" description:"URL of remote server to connect to, e.g. 10.23.0.5:7777"` } `positional-args:"true"` } `command:"follow" description:"Connects to a remote Please instance to stream build events from."` Help struct { Args struct { Topic help.Topic `positional-arg-name:"topic" description:"Topic to display help on"` } `positional-args:"true"` } `command:"help" alias:"halp" description:"Displays help about various parts of plz or its build rules"` Tool struct { Args struct { Tool tool.Tool `positional-arg-name:"tool" description:"Tool to invoke (jarcat, lint, etc)"` Args []string `positional-arg-name:"arguments" description:"Arguments to pass to the tool"` } `positional-args:"true"` } `command:"tool" hidden:"true" description:"Invoke one of Please's sub-tools"` Query struct { Deps struct { Unique bool `long:"unique" short:"u" description:"Only output each dependency once"` Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"` } `positional-args:"true" required:"true"` } `command:"deps" description:"Queries the dependencies of a target."` ReverseDeps struct { Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"` } `positional-args:"true" required:"true"` } `command:"reverseDeps" alias:"revdeps" description:"Queries all the reverse dependencies of a target."` SomePath struct { Args struct { Target1 core.BuildLabel `positional-arg-name:"target1" description:"First build target" required:"true"` Target2 core.BuildLabel `positional-arg-name:"target2" description:"Second build target" required:"true"` } `positional-args:"true" required:"true"` } `command:"somepath" description:"Queries for a path between two targets"` AllTargets struct { Hidden bool `long:"hidden" description:"Show hidden targets as well"` Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query"` } `positional-args:"true"` } `command:"alltargets" description:"Lists all targets in the graph"` Print struct { Fields []string `short:"f" long:"field" description:"Individual fields to print of the target"` Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to print" required:"true"` } `positional-args:"true" required:"true"` } `command:"print" description:"Prints a representation of a single target"` Completions struct { Cmd string `long:"cmd" description:"Command to complete for" default:"build"` Args struct { Fragments cli.StdinStrings `positional-arg-name:"fragment" description:"Initial fragment to attempt to complete"` } `positional-args:"true"` } `command:"completions" subcommands-optional:"true" description:"Prints possible completions for a string."` AffectedTargets struct { Tests bool `long:"tests" description:"Shows only affected tests, no other targets."` Intransitive bool `long:"intransitive" description:"Shows only immediately affected targets, not transitive dependencies."` Args struct { Files cli.StdinStrings `positional-arg-name:"files" required:"true" description:"Files to query affected tests for"` } `positional-args:"true"` } `command:"affectedtargets" description:"Prints any targets affected by a set of files."` Input struct { Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display inputs for" required:"true"` } `positional-args:"true" required:"true"` } `command:"input" alias:"inputs" description:"Prints all transitive inputs of a target."` Output struct { Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display outputs for" required:"true"` } `positional-args:"true" required:"true"` } `command:"output" alias:"outputs" description:"Prints all outputs of a target."` Graph struct { Args struct { Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to render graph for"` } `positional-args:"true"` } `command:"graph" description:"Prints a JSON representation of the build graph."` WhatOutputs struct { EchoFiles bool `long:"echo_files" description:"Echo the file for which the printed output is responsible."` Args struct { Files cli.StdinStrings `positional-arg-name:"files" required:"true" description:"Files to query targets responsible for"` } `positional-args:"true"` } `command:"whatoutputs" description:"Prints out target(s) responsible for outputting provided file(s)"` Rules struct { Args struct { Targets []core.BuildLabel `position-arg-name:"targets" description:"Additional targets to load rules from"` } `positional-args:"true"` } `command:"rules" description:"Prints built-in rules to stdout as JSON"` Changes struct { Since string `short:"s" long:"since" default:"origin/master" description:"Revision to compare against"` CheckoutCommand string `long:"checkout_command" default:"git checkout %s" description:"Command to run to check out the before/after revisions."` CurrentCommand string `long:"current_revision_command" default:"git rev-parse --abbrev-ref HEAD" description:"Command to run to get the current revision (which will be checked out again at the end)"` Args struct { Files cli.StdinStrings `positional-arg-name:"files" description:"Files to consider changed"` } `positional-args:"true"` } `command:"changes" description:"Calculates the difference between two different states of the build graph"` } `command:"query" description:"Queries information about the build graph"` } // Definitions of what we do for each command. // Functions are called after args are parsed and return true for success. var buildFunctions = map[string]func() bool{ "build": func() bool { success, _ := runBuild(opts.Build.Args.Targets, true, false) return success }, "rebuild": func() bool { // It would be more pure to require --nocache for this, but in basically any context that // you use 'plz rebuild', you don't want the cache coming in and mucking things up. // 'plz clean' followed by 'plz build' would still work in those cases, anyway. opts.FeatureFlags.NoCache = true success, _ := runBuild(opts.Rebuild.Args.Targets, true, false) return success }, "hash": func() bool { success, state := runBuild(opts.Hash.Args.Targets, true, false) if opts.Hash.Detailed { for _, target := range state.ExpandOriginalTargets() { build.PrintHashes(state, state.Graph.TargetOrDie(target)) } } if opts.Hash.Update { hashes.RewriteHashes(state, state.ExpandOriginalTargets()) } return success }, "test": func() bool { targets := testTargets(opts.Test.Args.Target, opts.Test.Args.Args, opts.Test.Failed, opts.Test.TestResultsFile) os.RemoveAll(string(opts.Test.TestResultsFile)) success, state := runBuild(targets, true, true) test.WriteResultsToFileOrDie(state.Graph, string(opts.Test.TestResultsFile)) return success || opts.Test.FailingTestsOk }, "cover": func() bool { if opts.BuildFlags.Config != "" { log.Warning("Build config overridden; coverage may not be available for some languages") } else { opts.BuildFlags.Config = "cover" } targets := testTargets(opts.Cover.Args.Target, opts.Cover.Args.Args, opts.Cover.Failed, opts.Cover.TestResultsFile) os.RemoveAll(string(opts.Cover.TestResultsFile)) os.RemoveAll(string(opts.Cover.CoverageResultsFile)) success, state := runBuild(targets, true, true) test.WriteResultsToFileOrDie(state.Graph, string(opts.Cover.TestResultsFile)) test.AddOriginalTargetsToCoverage(state, opts.Cover.IncludeAllFiles) test.RemoveFilesFromCoverage(state.Coverage, state.Config.Cover.ExcludeExtension) test.WriteCoverageToFileOrDie(state.Coverage, string(opts.Cover.CoverageResultsFile)) if opts.Cover.LineCoverageReport { output.PrintLineCoverageReport(state, opts.Cover.IncludeFile) } else if !opts.Cover.NoCoverageReport { output.PrintCoverage(state, opts.Cover.IncludeFile) } return success || opts.Cover.FailingTestsOk }, "run": func() bool { if success, state := runBuild([]core.BuildLabel{opts.Run.Args.Target}, true, false); success { run.Run(state, opts.Run.Args.Target, opts.Run.Args.Args, opts.Run.Env) } return false // We should never return from run.Run so if we make it here something's wrong. }, "parallel": func() bool { if success, state := runBuild(opts.Run.Parallel.PositionalArgs.Targets, true, false); success { os.Exit(run.Parallel(state, state.ExpandOriginalTargets(), opts.Run.Parallel.Args, opts.Run.Parallel.NumTasks, opts.Run.Parallel.Quiet, opts.Run.Env)) } return false }, "sequential": func() bool { if success, state := runBuild(opts.Run.Sequential.PositionalArgs.Targets, true, false); success { os.Exit(run.Sequential(state, state.ExpandOriginalTargets(), opts.Run.Sequential.Args, opts.Run.Sequential.Quiet, opts.Run.Env)) } return false }, "clean": func() bool { config.Cache.DirClean = false if len(opts.Clean.Args.Targets) == 0 { if len(opts.BuildFlags.Include) == 0 && len(opts.BuildFlags.Exclude) == 0 { // Clean everything, doesn't require parsing at all. if !opts.Clean.Remote { // Don't construct the remote caches if they didn't pass --remote. config.Cache.RPCURL = "" config.Cache.HTTPURL = "" } clean.Clean(config, newCache(config), !opts.Clean.NoBackground) return true } opts.Clean.Args.Targets = core.WholeGraph } if success, state := runBuild(opts.Clean.Args.Targets, false, false); success { clean.Targets(state, state.ExpandOriginalTargets(), !opts.FeatureFlags.NoCache) return true } return false }, "watch": func() bool { success, state := runBuild(opts.Watch.Args.Targets, false, false) if success { watch.Watch(state, state.ExpandOriginalTargets(), opts.Watch.Run) } return success }, "update": func() bool { fmt.Printf("Up to date (version %s).\n", core.PleaseVersion) return true // We'd have died already if something was wrong. }, "op": func() bool { cmd := core.ReadLastOperationOrDie() log.Notice("OP PLZ: %s", strings.Join(cmd, " ")) // Annoyingly we don't seem to have any access to execvp() which would be rather useful here... executable, err := os.Executable() if err == nil { err = syscall.Exec(executable, append([]string{executable}, cmd...), os.Environ()) } log.Fatalf("SORRY OP: %s", err) // On success Exec never returns. return false }, "gc": func() bool { success, state := runBuild(core.WholeGraph, false, false) if success { state.OriginalTargets = state.Config.Gc.Keep gc.GarbageCollect(state, opts.Gc.Args.Targets, state.ExpandOriginalTargets(), state.Config.Gc.Keep, state.Config.Gc.KeepLabel, opts.Gc.Conservative, opts.Gc.TargetsOnly, opts.Gc.SrcsOnly, opts.Gc.NoPrompt, opts.Gc.DryRun, opts.Gc.Git) } return success }, "export": func() bool { success, state := runBuild(opts.Export.Args.Targets, false, false) if success { export.ToDir(state, opts.Export.Output, state.ExpandOriginalTargets()) } return success }, "follow": func() bool { // This is only temporary, ConnectClient will alter it to match the server. state := core.NewBuildState(1, nil, opts.OutputFlags.Verbosity, config) return follow.ConnectClient(state, opts.Follow.Args.URL.String(), opts.Follow.Retries, time.Duration(opts.Follow.Delay)) }, "outputs": func() bool { success, state := runBuild(opts.Export.Outputs.Args.Targets, true, false) if success { export.Outputs(state, opts.Export.Output, state.ExpandOriginalTargets()) } return success }, "help": func() bool { return help.Help(string(opts.Help.Args.Topic)) }, "tool": func() bool { tool.Run(config, opts.Tool.Args.Tool, opts.Tool.Args.Args) return false // If the function returns (which it shouldn't), something went wrong. }, "deps": func() bool { return runQuery(true, opts.Query.Deps.Args.Targets, func(state *core.BuildState) { query.Deps(state, state.ExpandOriginalTargets(), opts.Query.Deps.Unique) }) }, "reverseDeps": func() bool { opts.VisibilityParse = true return runQuery(false, opts.Query.ReverseDeps.Args.Targets, func(state *core.BuildState) { query.ReverseDeps(state.Graph, state.ExpandOriginalTargets()) }) }, "somepath": func() bool { return runQuery(true, []core.BuildLabel{opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2}, func(state *core.BuildState) { query.SomePath(state.Graph, opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2) }, ) }, "alltargets": func() bool { return runQuery(true, opts.Query.AllTargets.Args.Targets, func(state *core.BuildState) { query.AllTargets(state.Graph, state.ExpandOriginalTargets(), opts.Query.AllTargets.Hidden) }) }, "print": func() bool { return runQuery(false, opts.Query.Print.Args.Targets, func(state *core.BuildState) { query.Print(state.Graph, state.ExpandOriginalTargets(), opts.Query.Print.Fields) }) }, "affectedtargets": func() bool { files := opts.Query.AffectedTargets.Args.Files targets := core.WholeGraph if opts.Query.AffectedTargets.Intransitive { state := core.NewBuildState(1, nil, 1, config) targets = core.FindOwningPackages(state, files) } return runQuery(true, targets, func(state *core.BuildState) { query.AffectedTargets(state, files.Get(), opts.BuildFlags.Include, opts.BuildFlags.Exclude, opts.Query.AffectedTargets.Tests, !opts.Query.AffectedTargets.Intransitive) }) }, "input": func() bool { return runQuery(true, opts.Query.Input.Args.Targets, func(state *core.BuildState) { query.TargetInputs(state.Graph, state.ExpandOriginalTargets()) }) }, "output": func() bool { return runQuery(true, opts.Query.Output.Args.Targets, func(state *core.BuildState) { query.TargetOutputs(state.Graph, state.ExpandOriginalTargets()) }) }, "completions": func() bool { // Somewhat fiddly because the inputs are not necessarily well-formed at this point. opts.ParsePackageOnly = true fragments := opts.Query.Completions.Args.Fragments.Get() if opts.Query.Completions.Cmd == "help" { // Special-case completing help topics rather than build targets. if len(fragments) == 0 { help.Topics("") } else { help.Topics(fragments[0]) } return true } if len(fragments) == 0 || len(fragments) == 1 && strings.Trim(fragments[0], "/ ") == "" { os.Exit(0) // Don't do anything for empty completion, it's normally too slow. } labels, parseLabels, hidden := query.CompletionLabels(config, fragments, core.RepoRoot) if success, state := Please(parseLabels, config, false, false, false); success { binary := opts.Query.Completions.Cmd == "run" test := opts.Query.Completions.Cmd == "test" || opts.Query.Completions.Cmd == "cover" query.Completions(state.Graph, labels, binary, test, hidden) return true } return false }, "graph": func() bool { return runQuery(true, opts.Query.Graph.Args.Targets, func(state *core.BuildState) { if len(opts.Query.Graph.Args.Targets) == 0 { state.OriginalTargets = opts.Query.Graph.Args.Targets // It special-cases doing the full graph. } query.Graph(state, state.ExpandOriginalTargets()) }) }, "whatoutputs": func() bool { return runQuery(true, core.WholeGraph, func(state *core.BuildState) { query.WhatOutputs(state.Graph, opts.Query.WhatOutputs.Args.Files.Get(), opts.Query.WhatOutputs.EchoFiles) }) }, "rules": func() bool { targets := opts.Query.Rules.Args.Targets success, state := Please(opts.Query.Rules.Args.Targets, config, true, true, false) if !success { return false } targets = state.ExpandOriginalTargets() parse.PrintRuleArgs(state, targets) return true }, "changes": func() bool { // Temporarily set this flag on to avoid fatal errors from the first parse. keepGoing := opts.BuildFlags.KeepGoing opts.BuildFlags.KeepGoing = true original := query.MustGetRevision(opts.Query.Changes.CurrentCommand) files := opts.Query.Changes.Args.Files.Get() query.MustCheckout(opts.Query.Changes.Since, opts.Query.Changes.CheckoutCommand) _, before := runBuild(core.WholeGraph, false, false) opts.BuildFlags.KeepGoing = keepGoing // N.B. Ignore failure here; if we can't parse the graph before then it will suffice to // assume that anything we don't know about has changed. query.MustCheckout(original, opts.Query.Changes.CheckoutCommand) success, after := runBuild(core.WholeGraph, false, false) if !success { return false } for _, target := range query.DiffGraphs(before, after, files) { fmt.Printf("%s\n", target) } return true }, } // ConfigOverrides are used to implement completion on the -o flag. type ConfigOverrides map[string]string // Complete implements the flags.Completer interface. func (overrides ConfigOverrides) Complete(match string) []flags.Completion { return core.DefaultConfiguration().Completions(match) } // Used above as a convenience wrapper for query functions. func runQuery(needFullParse bool, labels []core.BuildLabel, onSuccess func(state *core.BuildState)) bool { if !needFullParse { opts.ParsePackageOnly = true } if len(labels) == 0 { labels = core.WholeGraph } if success, state := runBuild(labels, false, false); success { onSuccess(state) return true } return false } func please(tid int, state *core.BuildState, parsePackageOnly bool, include, exclude []string) { for { label, dependor, t := state.NextTask() switch t { case core.Stop, core.Kill: return case core.Parse, core.SubincludeParse: t := t label := label dependor := dependor state.ParsePool <- func() { parse.Parse(tid, state, label, dependor, parsePackageOnly, include, exclude, t == core.SubincludeParse) if opts.VisibilityParse && state.IsOriginalTarget(label) { parseForVisibleTargets(state, label) } state.TaskDone(false) } case core.Build, core.SubincludeBuild: build.Build(tid, state, label) state.TaskDone(true) case core.Test: test.Test(tid, state, label) state.TaskDone(true) } } } // parseForVisibleTargets adds parse tasks for any targets that the given label is visible to. func parseForVisibleTargets(state *core.BuildState, label core.BuildLabel) { if target := state.Graph.Target(label); target != nil { for _, vis := range target.Visibility { findOriginalTask(state, vis, false) } } } // prettyOutputs determines from input flags whether we should show 'pretty' output (ie. interactive). func prettyOutput(interactiveOutput bool, plainOutput bool, verbosity int) bool { if interactiveOutput && plainOutput { log.Fatal("Can't pass both --interactive_output and --plain_output") } return interactiveOutput || (!plainOutput && cli.StdErrIsATerminal && verbosity < 4) } // newCache constructs a new cache based on the current config / flags. func newCache(config *core.Configuration) core.Cache { if opts.FeatureFlags.NoCache { return nil } return cache.NewCache(config) } // Please starts & runs the main build process through to its completion. func Please(targets []core.BuildLabel, config *core.Configuration, prettyOutput, shouldBuild, shouldTest bool) (bool, *core.BuildState) { if opts.BuildFlags.NumThreads > 0 { config.Please.NumThreads = opts.BuildFlags.NumThreads } else if config.Please.NumThreads <= 0 { config.Please.NumThreads = runtime.NumCPU() + 2 } debugTests := opts.Test.Debug || opts.Cover.Debug if opts.BuildFlags.Config != "" { config.Build.Config = opts.BuildFlags.Config } else if debugTests { config.Build.Config = "dbg" } c := newCache(config) state := core.NewBuildState(config.Please.NumThreads, c, opts.OutputFlags.Verbosity, config) state.VerifyHashes = !opts.FeatureFlags.NoHashVerification state.NumTestRuns = opts.Test.NumRuns + opts.Cover.NumRuns // Only one of these can be passed. state.TestArgs = append(opts.Test.Args.Args, opts.Cover.Args.Args...) // Similarly here. state.NeedCoverage = !opts.Cover.Args.Target.IsEmpty() state.NeedBuild = shouldBuild state.NeedTests = shouldTest state.NeedHashesOnly = len(opts.Hash.Args.Targets) > 0 state.PrepareOnly = opts.Build.Prepare || opts.Build.Shell state.PrepareShell = opts.Build.Shell state.CleanWorkdirs = !opts.FeatureFlags.KeepWorkdirs state.ForceRebuild = len(opts.Rebuild.Args.Targets) > 0 state.ShowTestOutput = opts.Test.ShowOutput || opts.Cover.ShowOutput state.DebugTests = debugTests state.ShowAllOutput = opts.OutputFlags.ShowAllOutput state.SetIncludeAndExclude(opts.BuildFlags.Include, opts.BuildFlags.Exclude) parse.InitParser(state) build.Init(state) if config.Events.Port != 0 && shouldBuild { shutdown := follow.InitialiseServer(state, config.Events.Port) defer shutdown() } if config.Events.Port != 0 || config.Display.SystemStats { go follow.UpdateResources(state) } metrics.InitFromConfig(config) // Acquire the lock before we start building if (shouldBuild || shouldTest) && !opts.FeatureFlags.NoLock { core.AcquireRepoLock() defer core.ReleaseRepoLock() } if state.DebugTests && len(targets) != 1 { log.Fatalf("-d/--debug flag can only be used with a single test target") } detailedTests := shouldTest && (opts.Test.Detailed || opts.Cover.Detailed || (len(targets) == 1 && !targets[0].IsAllTargets() && !targets[0].IsAllSubpackages() && targets[0] != core.BuildLabelStdin)) // Start looking for the initial targets to kick the build off go findOriginalTasks(state, targets) // Start up all the build workers var wg sync.WaitGroup wg.Add(config.Please.NumThreads) for i := 0; i < config.Please.NumThreads; i++ { go func(tid int) { please(tid, state, opts.ParsePackageOnly, opts.BuildFlags.Include, opts.BuildFlags.Exclude) wg.Done() }(i) } // Wait until they've all exited, which they'll do once they have no tasks left. go func() { wg.Wait() close(state.Results) // This will signal MonitorState (below) to stop. }() // Draw stuff to the screen while there are still results coming through. shouldRun := !opts.Run.Args.Target.IsEmpty() success := output.MonitorState(state, config.Please.NumThreads, !prettyOutput, opts.BuildFlags.KeepGoing, shouldBuild, shouldTest, shouldRun, opts.Build.ShowStatus, detailedTests, string(opts.OutputFlags.TraceFile)) metrics.Stop() build.StopWorkers() if c != nil { c.Shutdown() } return success, state } // findOriginalTasks finds the original parse tasks for the original set of targets. func findOriginalTasks(state *core.BuildState, targets []core.BuildLabel) { if state.Config.Bazel.Compatibility && fs.FileExists("WORKSPACE") { // We have to parse the WORKSPACE file before anything else to understand subrepos. // This is a bit crap really since it inhibits parallelism for the first step. parse.Parse(0, state, core.NewBuildLabel("workspace", "all"), core.OriginalTarget, false, state.Include, state.Exclude, false) } if opts.BuildFlags.Arch.Arch != "" { // Set up a new subrepo for this architecture. state.Graph.AddSubrepo(core.SubrepoForArch(state, opts.BuildFlags.Arch)) } for _, target := range targets { if target == core.BuildLabelStdin { for label := range cli.ReadStdin() { findOriginalTask(state, core.ParseBuildLabels([]string{label})[0], true) } } else { findOriginalTask(state, target, true) } } state.TaskDone(true) // initial target adding counts as one. } func findOriginalTask(state *core.BuildState, target core.BuildLabel, addToList bool) { if opts.BuildFlags.Arch.Arch != "" { target.PackageName = path.Join(opts.BuildFlags.Arch.String(), target.PackageName) } if target.IsAllSubpackages() { for pkg := range utils.FindAllSubpackages(state.Config, target.PackageName, "") { state.AddOriginalTarget(core.NewBuildLabel(pkg, "all"), addToList) } } else { state.AddOriginalTarget(target, addToList) } } // testTargets handles test targets which can be given in two formats; a list of targets or a single // target with a list of trailing arguments. // Alternatively they can be completely omitted in which case we test everything under the working dir. // One can also pass a 'failed' flag which runs the failed tests from last time. func testTargets(target core.BuildLabel, args []string, failed bool, resultsFile cli.Filepath) []core.BuildLabel { if failed { targets, args := test.LoadPreviousFailures(string(resultsFile)) // Have to reset these - it doesn't matter which gets which. opts.Test.Args.Args = args opts.Cover.Args.Args = nil return targets } else if target.Name == "" { return core.InitialPackage() } else if len(args) > 0 && core.LooksLikeABuildLabel(args[0]) { opts.Cover.Args.Args = []string{} opts.Test.Args.Args = []string{} return append(core.ParseBuildLabels(args), target) } return []core.BuildLabel{target} } // readConfig sets various things up and reads the initial configuration. func readConfig(forceUpdate bool) *core.Configuration { if opts.FeatureFlags.NoHashVerification { log.Warning("You've disabled hash verification; this is intended to help temporarily while modifying build targets. You shouldn't use this regularly.") } config, err := core.ReadConfigFiles([]string{ core.MachineConfigFileName, core.ExpandHomePath(core.UserConfigFileName), path.Join(core.RepoRoot, core.ConfigFileName), path.Join(core.RepoRoot, core.ArchConfigFileName), path.Join(core.RepoRoot, core.LocalConfigFileName), }, opts.BuildFlags.Profile) if err != nil { log.Fatalf("Error reading config file: %s", err) } else if err := config.ApplyOverrides(opts.BuildFlags.Option); err != nil { log.Fatalf("Can't override requested config setting: %s", err) } // Now apply any flags that override this if opts.Update.Latest { config.Please.Version.Unset() } else if opts.Update.Version.IsSet { config.Please.Version = opts.Update.Version } update.CheckAndUpdate(config, !opts.FeatureFlags.NoUpdate, forceUpdate, opts.Update.Force, !opts.Update.NoVerify) return config } // Runs the actual build // Which phases get run are controlled by shouldBuild and shouldTest. func runBuild(targets []core.BuildLabel, shouldBuild, shouldTest bool) (bool, *core.BuildState) { if len(targets) == 0 { targets = core.InitialPackage() } pretty := prettyOutput(opts.OutputFlags.InteractiveOutput, opts.OutputFlags.PlainOutput, opts.OutputFlags.Verbosity) return Please(targets, config, pretty, shouldBuild, shouldTest) } // readConfigAndSetRoot reads the .plzconfig files and moves to the repo root. func readConfigAndSetRoot(forceUpdate bool) *core.Configuration { if opts.BuildFlags.RepoRoot == "" { log.Debug("Found repo root at %s", core.MustFindRepoRoot()) } else { core.RepoRoot = string(opts.BuildFlags.RepoRoot) } // Please always runs from the repo root, so move there now. if err := os.Chdir(core.RepoRoot); err != nil { log.Fatalf("%s", err) } // Reset this now we're at the repo root. if opts.OutputFlags.LogFile != "" { if !path.IsAbs(string(opts.OutputFlags.LogFile)) { opts.OutputFlags.LogFile = cli.Filepath(path.Join(core.RepoRoot, string(opts.OutputFlags.LogFile))) } cli.InitFileLogging(string(opts.OutputFlags.LogFile), opts.OutputFlags.LogFileLevel) } return readConfig(forceUpdate) } // handleCompletions handles shell completion. Typically it just prints to stdout but // may do a little more if we think we need to handle aliases. func handleCompletions(parser *flags.Parser, items []flags.Completion) { if len(items) > 0 { cli.PrintCompletions(items) } else { cli.InitLogging(0) // Ensure this is quiet opts.FeatureFlags.NoUpdate = true // Ensure we don't try to update config := readConfigAndSetRoot(false) if len(config.Aliases) > 0 { for k, v := range config.Aliases { parser.AddCommand(k, v, v, &struct{}{}) } // Run again without this registered as a completion handler parser.CompletionHandler = nil parser.ParseArgs(os.Args[1:]) } } // Regardless of what happened, always exit with 0 at this point. os.Exit(0) } func main() { parser, extraArgs, flagsErr := cli.ParseFlags("Please", &opts, os.Args, handleCompletions) // Note that we must leave flagsErr for later, because it may be affected by aliases. if opts.OutputFlags.Version { fmt.Printf("Please version %s\n", core.PleaseVersion) os.Exit(0) // Ignore other flags if --version was passed. } if opts.OutputFlags.Colour { output.SetColouredOutput(true) } else if opts.OutputFlags.NoColour { output.SetColouredOutput(false) } if opts.OutputFlags.ShowAllOutput { opts.OutputFlags.PlainOutput = true } // Init logging, but don't do file output until we've chdir'd. cli.InitLogging(opts.OutputFlags.Verbosity) command := cli.ActiveCommand(parser.Command) if opts.Complete != "" { // Completion via PLZ_COMPLETE env var sidesteps other commands opts.Query.Completions.Cmd = command opts.Query.Completions.Args.Fragments = []string{opts.Complete} command = "completions" } else if command == "init" { if flagsErr != nil { // This error otherwise doesn't get checked until later. cli.ParseFlagsFromArgsOrDie("Please", core.PleaseVersion.String(), &opts, os.Args) } // If we're running plz init then we obviously don't expect to read a config file. utils.InitConfig(string(opts.Init.Dir), opts.Init.BazelCompatibility) os.Exit(0) } else if command == "help" || command == "follow" { config = core.DefaultConfiguration() if !buildFunctions[command]() { os.Exit(1) } os.Exit(0) } else if opts.OutputFlags.CompletionScript { utils.PrintCompletionScript() os.Exit(0) } // Read the config now config = readConfigAndSetRoot(command == "update") if parser.Command.Active != nil && parser.Command.Active.Name == "query" { // Query commands don't need either of these set. opts.OutputFlags.PlainOutput = true config.Cache.DirClean = false } // Now we've read the config file, we may need to re-run the parser; the aliases in the config // can affect how we parse otherwise illegal flag combinations. if flagsErr != nil || len(extraArgs) > 0 { for idx, arg := range os.Args[1:] { // Please should not touch anything that comes after `--` if arg == "--" { break } for k, v := range config.Aliases { if arg == k { // We could insert every token in v into os.Args at this point and then we could have // aliases defined in terms of other aliases but that seems rather like overkill so just // stick the replacement in wholesale instead. os.Args[idx+1] = v } } } argv := strings.Join(os.Args[1:], " ") command = cli.ParseFlagsFromArgsOrDie("Please", core.PleaseVersion.String(), &opts, strings.Fields(os.Args[0]+" "+argv)) } if opts.ProfilePort != 0 { go func() { log.Warning("%s", http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", opts.ProfilePort), nil)) }() } if opts.Profile != "" { f, err := os.Create(opts.Profile) if err != nil { log.Fatalf("Failed to open profile file: %s", err) } if err := pprof.StartCPUProfile(f); err != nil { log.Fatalf("could not start profiler: %s", err) } defer pprof.StopCPUProfile() } if opts.MemProfile != "" { f, err := os.Create(opts.MemProfile) if err != nil { log.Fatalf("Failed to open memory profile file: %s", err) } defer f.Close() defer pprof.WriteHeapProfile(f) } if !buildFunctions[command]() { os.Exit(7) // Something distinctive, is sometimes useful to identify this externally. } }
1
8,178
you should add this to the cover command too (but let's have a more general convo about whether it's a flag or config option)
thought-machine-please
go
@@ -104,7 +104,7 @@ func (s *historyArchiverSuite) SetupTest() { s.Assertions = require.New(s.T()) s.container = &archiver.HistoryBootstrapContainer{ Logger: log.NewNoopLogger(), - MetricsClient: metrics.NewClient(scope, metrics.HistoryArchiverScope), + MetricsClient: metrics.NewClient(&metrics.ClientConfig{}, scope, metrics.HistoryArchiverScope), } s.controller = gomock.NewController(s.T())
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package s3store import ( "bytes" "context" "errors" "fmt" "io" "sort" "strconv" "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/uber-go/tally/v4" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/s3store/mocks" "go.temporal.io/server/common/codec" "go.temporal.io/server/common/convert" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/primitives/timestamp" ) const ( testNamespaceID = "test-namespace-id" testNamespace = "test-namespace" testWorkflowID = "test-workflow-id" testRunID = "test-run-id" testNextEventID = 1800 testCloseFailoverVersion = int64(100) testPageSize = 100 testBucket = "test-bucket" testBucketURI = "s3://test-bucket" ) var ( testBranchToken = []byte{1, 2, 3} ) type historyArchiverSuite struct { *require.Assertions suite.Suite s3cli *mocks.MockS3API container *archiver.HistoryBootstrapContainer logger log.Logger testArchivalURI archiver.URI historyBatchesV1 []*archiverspb.HistoryBlob historyBatchesV100 []*archiverspb.HistoryBlob controller *gomock.Controller } func TestHistoryArchiverSuite(t *testing.T) { suite.Run(t, new(historyArchiverSuite)) } func (s *historyArchiverSuite) SetupSuite() { var err error s.testArchivalURI, err = archiver.NewURI(testBucketURI) s.Require().NoError(err) } func (s *historyArchiverSuite) TearDownSuite() { } func (s *historyArchiverSuite) SetupTest() { scope := tally.NewTestScope("test", nil) s.Assertions = require.New(s.T()) s.container = &archiver.HistoryBootstrapContainer{ Logger: log.NewNoopLogger(), MetricsClient: metrics.NewClient(scope, metrics.HistoryArchiverScope), } s.controller = gomock.NewController(s.T()) s.s3cli = mocks.NewMockS3API(s.controller) setupFsEmulation(s.s3cli) s.setupHistoryDirectory() } func setupFsEmulation(s3cli *mocks.MockS3API) { fs := make(map[string][]byte) putObjectFn := func(_ aws.Context, input *s3.PutObjectInput, _ ...request.Option) (*s3.PutObjectOutput, error) { buf := new(bytes.Buffer) buf.ReadFrom(input.Body) fs[*input.Bucket+*input.Key] = buf.Bytes() return &s3.PutObjectOutput{}, nil } s3cli.EXPECT().ListObjectsV2WithContext(gomock.Any(), gomock.Any()).DoAndReturn( func(_ context.Context, input *s3.ListObjectsV2Input, opts ...request.Option) (*s3.ListObjectsV2Output, error) { objects := make([]*s3.Object, 0) commonPrefixMap := map[string]bool{} for k := range fs { if strings.HasPrefix(k, *input.Bucket+*input.Prefix) { key := k[len(*input.Bucket):] keyWithoutPrefix := key[len(*input.Prefix):] index := strings.Index(keyWithoutPrefix, "/") if index == -1 || input.Delimiter == nil { objects = append(objects, &s3.Object{ Key: aws.String(key), }) } else { commonPrefixMap[key[:len(*input.Prefix)+index]] = true } } } commonPrefixes := make([]*s3.CommonPrefix, 0) for k := range commonPrefixMap { commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{ Prefix: aws.String(k), }) } sort.SliceStable(objects, func(i, j int) bool { return *objects[i].Key < *objects[j].Key }) maxKeys := 1000 if input.MaxKeys != nil { maxKeys = int(*input.MaxKeys) } start := 0 if input.ContinuationToken != nil { start, _ = strconv.Atoi(*input.ContinuationToken) } if input.StartAfter != nil { for k, v := range objects { if *input.StartAfter == *v.Key { start = k + 1 } } } isTruncated := false var nextContinuationToken *string if len(objects) > start+maxKeys { isTruncated = true nextContinuationToken = convert.StringPtr(fmt.Sprintf("%d", start+maxKeys)) objects = objects[start : start+maxKeys] } else { objects = objects[start:] } if input.StartAfter != nil { for k, v := range commonPrefixes { if *input.StartAfter == *v.Prefix { start = k + 1 } } } if len(commonPrefixes) > start+maxKeys { isTruncated = true nextContinuationToken = convert.StringPtr(fmt.Sprintf("%d", start+maxKeys)) commonPrefixes = commonPrefixes[start : start+maxKeys] } else if len(commonPrefixes) > 0 { commonPrefixes = commonPrefixes[start:] } return &s3.ListObjectsV2Output{ CommonPrefixes: commonPrefixes, Contents: objects, IsTruncated: &isTruncated, NextContinuationToken: nextContinuationToken, }, nil }).AnyTimes() s3cli.EXPECT().PutObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn(putObjectFn).AnyTimes() s3cli.EXPECT().HeadObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( func(ctx aws.Context, input *s3.HeadObjectInput, options ...request.Option) (*s3.HeadObjectOutput, error) { _, ok := fs[*input.Bucket+*input.Key] if !ok { return nil, awserr.New("NotFound", "", nil) } return &s3.HeadObjectOutput{}, nil }).AnyTimes() s3cli.EXPECT().GetObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( func(ctx aws.Context, input *s3.GetObjectInput, options ...request.Option) (*s3.GetObjectOutput, error) { _, ok := fs[*input.Bucket+*input.Key] if !ok { return nil, awserr.New(s3.ErrCodeNoSuchKey, "", nil) } return &s3.GetObjectOutput{ Body: io.NopCloser(bytes.NewReader(fs[*input.Bucket+*input.Key])), }, nil }).AnyTimes() } func (s *historyArchiverSuite) TestValidateURI() { testCases := []struct { URI string expectedErr error }{ { URI: "wrongscheme:///a/b/c", expectedErr: archiver.ErrURISchemeMismatch, }, { URI: "s3://", expectedErr: errNoBucketSpecified, }, { URI: "s3://bucket/a/b/c", expectedErr: errBucketNotExists, }, { URI: testBucketURI, expectedErr: nil, }, } s.s3cli.EXPECT().HeadBucketWithContext(gomock.Any(), gomock.Any()).DoAndReturn( func(ctx aws.Context, input *s3.HeadBucketInput, options ...request.Option) (*s3.HeadBucketOutput, error) { if *input.Bucket != s.testArchivalURI.Hostname() { return nil, awserr.New("NotFound", "", nil) } return &s3.HeadBucketOutput{}, nil }).AnyTimes() historyArchiver := s.newTestHistoryArchiver(nil) for _, tc := range testCases { URI, err := archiver.NewURI(tc.URI) s.NoError(err) s.Equal(tc.expectedErr, historyArchiver.ValidateURI(URI)) } } func (s *historyArchiverSuite) TestArchive_Fail_InvalidURI() { historyArchiver := s.newTestHistoryArchiver(nil) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: testWorkflowID, RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } URI, err := archiver.NewURI("wrongscheme://") s.NoError(err) err = historyArchiver.Archive(context.Background(), URI, request) s.Error(err) } func (s *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { historyArchiver := s.newTestHistoryArchiver(nil) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: "", // an invalid request RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) s.Error(err) } func (s *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { mockCtrl := gomock.NewController(s.T()) defer mockCtrl.Finish() historyIterator := archiver.NewMockHistoryIterator(mockCtrl) gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), historyIterator.EXPECT().Next().Return(nil, errors.New("some random error")), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: testWorkflowID, RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) s.Error(err) } func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { mockCtrl := gomock.NewController(s.T()) defer mockCtrl.Finish() historyIterator := archiver.NewMockHistoryIterator(mockCtrl) gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), historyIterator.EXPECT().Next().Return(nil, serviceerror.NewResourceExhausted("")), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: testWorkflowID, RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } err := historyArchiver.Archive(getCanceledContext(), s.testArchivalURI, request) s.Error(err) } func (s *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { mockCtrl := gomock.NewController(s.T()) defer mockCtrl.Finish() historyIterator := archiver.NewMockHistoryIterator(mockCtrl) historyBatches := []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: common.FirstEventID + 1, EventTime: timestamp.TimePtr(time.Now().UTC()), Version: testCloseFailoverVersion + 1, }, }, }, } historyBlob := &archiverspb.HistoryBlob{ Header: &archiverspb.HistoryBlobHeader{ IsLast: true, }, Body: historyBatches, } gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), historyIterator.EXPECT().Next().Return(historyBlob, nil), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: testWorkflowID, RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request) s.Error(err) } func (s *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { mockCtrl := gomock.NewController(s.T()) defer mockCtrl.Finish() historyIterator := archiver.NewMockHistoryIterator(mockCtrl) gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), historyIterator.EXPECT().Next().Return(nil, errors.New("some random error")), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: testWorkflowID, RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } nonRetryableErr := errors.New("some non-retryable error") err := historyArchiver.Archive(context.Background(), s.testArchivalURI, request, archiver.GetNonRetryableErrorOption(nonRetryableErr)) s.Equal(nonRetryableErr, err) } func (s *historyArchiverSuite) TestArchive_Success() { mockCtrl := gomock.NewController(s.T()) defer mockCtrl.Finish() historyIterator := archiver.NewMockHistoryIterator(mockCtrl) historyBatches := []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: common.FirstEventID + 1, EventTime: timestamp.TimePtr(time.Now().UTC()), Version: testCloseFailoverVersion, }, { EventId: common.FirstEventID + 2, EventTime: timestamp.TimePtr(time.Now().UTC()), Version: testCloseFailoverVersion, }, }, }, { Events: []*historypb.HistoryEvent{ { EventId: testNextEventID - 1, EventTime: timestamp.TimePtr(time.Now().UTC()), Version: testCloseFailoverVersion, }, }, }, } historyBlob := &archiverspb.HistoryBlob{ Header: &archiverspb.HistoryBlobHeader{ IsLast: true, }, Body: historyBatches, } gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), historyIterator.EXPECT().Next().Return(historyBlob, nil), historyIterator.EXPECT().HasNext().Return(false), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: testWorkflowID, RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } URI, err := archiver.NewURI(testBucketURI + "/TestArchive_Success") s.NoError(err) err = historyArchiver.Archive(context.Background(), URI, request) s.NoError(err) expectedkey := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, testCloseFailoverVersion, 0) s.assertKeyExists(expectedkey) } func (s *historyArchiverSuite) TestGet_Fail_InvalidURI() { historyArchiver := s.newTestHistoryArchiver(nil) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: 100, } URI, err := archiver.NewURI("wrongscheme://") s.NoError(err) response, err := historyArchiver.Get(context.Background(), URI, request) s.Nil(response) s.Error(err) } func (s *historyArchiverSuite) TestGet_Fail_InvalidRequest() { historyArchiver := s.newTestHistoryArchiver(nil) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: 0, // pageSize should be greater than 0 } response, err := historyArchiver.Get(context.Background(), s.testArchivalURI, request) s.Nil(response) s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err) } func (s *historyArchiverSuite) TestGet_Fail_InvalidToken() { historyArchiver := s.newTestHistoryArchiver(nil) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: testPageSize, NextPageToken: []byte{'r', 'a', 'n', 'd', 'o', 'm'}, } URI, err := archiver.NewURI(testBucketURI) s.NoError(err) response, err := historyArchiver.Get(context.Background(), URI, request) s.Nil(response) s.Error(err) s.IsType(&serviceerror.InvalidArgument{}, err) } func (s *historyArchiverSuite) TestGet_Fail_KeyNotExist() { historyArchiver := s.newTestHistoryArchiver(nil) testCloseFailoverVersion := testCloseFailoverVersion request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: testPageSize, CloseFailoverVersion: &testCloseFailoverVersion, } URI, err := archiver.NewURI("s3://test-bucket/non-existent") s.NoError(err) response, err := historyArchiver.Get(context.Background(), URI, request) s.Nil(response) s.Error(err) s.IsType(&serviceerror.NotFound{}, err) } func (s *historyArchiverSuite) TestGet_Success_PickHighestVersion() { historyArchiver := s.newTestHistoryArchiver(nil) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: testPageSize, } URI, err := archiver.NewURI(testBucketURI) s.NoError(err) response, err := historyArchiver.Get(context.Background(), URI, request) s.NoError(err) s.Nil(response.NextPageToken) s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), response.HistoryBatches) } func (s *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { historyArchiver := s.newTestHistoryArchiver(nil) testCloseFailoverVersion := int64(1) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: testPageSize, CloseFailoverVersion: &testCloseFailoverVersion, } URI, err := archiver.NewURI(testBucketURI) s.NoError(err) response, err := historyArchiver.Get(context.Background(), URI, request) s.NoError(err) s.Nil(response.NextPageToken) s.Equal(s.historyBatchesV1[0].Body, response.HistoryBatches) } func (s *historyArchiverSuite) TestGet_Success_SmallPageSize() { historyArchiver := s.newTestHistoryArchiver(nil) testCloseFailoverVersion := testCloseFailoverVersion request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: 1, CloseFailoverVersion: &testCloseFailoverVersion, } var combinedHistory []*historypb.History URI, err := archiver.NewURI(testBucketURI) s.NoError(err) response, err := historyArchiver.Get(context.Background(), URI, request) s.NoError(err) s.NotNil(response) s.NotNil(response.NextPageToken) s.NotNil(response.HistoryBatches) s.Len(response.HistoryBatches, 1) combinedHistory = append(combinedHistory, response.HistoryBatches...) request.NextPageToken = response.NextPageToken response, err = historyArchiver.Get(context.Background(), URI, request) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.NotNil(response.HistoryBatches) s.Len(response.HistoryBatches, 1) combinedHistory = append(combinedHistory, response.HistoryBatches...) s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), combinedHistory) } func (s *historyArchiverSuite) TestArchiveAndGet() { mockCtrl := gomock.NewController(s.T()) defer mockCtrl.Finish() historyIterator := archiver.NewMockHistoryIterator(mockCtrl) gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), historyIterator.EXPECT().Next().Return(s.historyBatchesV100[0], nil), historyIterator.EXPECT().HasNext().Return(true), historyIterator.EXPECT().Next().Return(s.historyBatchesV100[1], nil), historyIterator.EXPECT().HasNext().Return(false), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) archiveRequest := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, WorkflowID: testWorkflowID, RunID: testRunID, BranchToken: testBranchToken, NextEventID: testNextEventID, CloseFailoverVersion: testCloseFailoverVersion, } URI, err := archiver.NewURI(testBucketURI + "/TestArchiveAndGet") s.NoError(err) err = historyArchiver.Archive(context.Background(), URI, archiveRequest) s.NoError(err) getRequest := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, RunID: testRunID, PageSize: testPageSize, } response, err := historyArchiver.Get(context.Background(), URI, getRequest) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.Equal(append(s.historyBatchesV100[0].Body, s.historyBatchesV100[1].Body...), response.HistoryBatches) } func (s *historyArchiverSuite) newTestHistoryArchiver(historyIterator archiver.HistoryIterator) *historyArchiver { // config := &config.S3Archiver{} // archiver, err := newHistoryArchiver(s.container, config, historyIterator) archiver := &historyArchiver{ container: s.container, s3cli: s.s3cli, historyIterator: historyIterator, } return archiver } func (s *historyArchiverSuite) setupHistoryDirectory() { now := time.Date(2020, 8, 22, 1, 2, 3, 4, time.UTC) s.historyBatchesV1 = []*archiverspb.HistoryBlob{ { Header: &archiverspb.HistoryBlobHeader{ IsLast: true, }, Body: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: testNextEventID - 1, EventTime: &now, Version: 1, }, }, }, }, }, } s.historyBatchesV100 = []*archiverspb.HistoryBlob{ { Header: &archiverspb.HistoryBlobHeader{ IsLast: false, }, Body: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: common.FirstEventID + 1, EventTime: &now, Version: testCloseFailoverVersion, }, { EventId: common.FirstEventID + 1, EventTime: &now, Version: testCloseFailoverVersion, }, }, }, }, }, { Header: &archiverspb.HistoryBlobHeader{ IsLast: true, }, Body: []*historypb.History{ { Events: []*historypb.HistoryEvent{ { EventId: testNextEventID - 1, EventTime: &now, Version: testCloseFailoverVersion, }, }, }, }, }, } s.writeHistoryBatchesForGetTest(s.historyBatchesV1, int64(1)) s.writeHistoryBatchesForGetTest(s.historyBatchesV100, testCloseFailoverVersion) } func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*archiverspb.HistoryBlob, version int64) { for i, batch := range historyBatches { encoder := codec.NewJSONPBEncoder() data, err := encoder.Encode(batch) s.Require().NoError(err) key := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, version, i) _, err = s.s3cli.PutObjectWithContext(context.Background(), &s3.PutObjectInput{ Bucket: aws.String(testBucket), Key: aws.String(key), Body: bytes.NewReader(data), }) s.Require().NoError(err) } } func (s *historyArchiverSuite) assertKeyExists(key string) { _, err := s.s3cli.GetObjectWithContext(context.Background(), &s3.GetObjectInput{ Bucket: aws.String(testBucket), Key: aws.String(key), }) s.NoError(err) } func getCanceledContext() context.Context { ctx, cancel := context.WithCancel(context.Background()) cancel() return ctx }
1
13,146
instead of passing in pointer to empty struct, we should make it optional and support passing nil and use default cfg if it is nil.
temporalio-temporal
go
@@ -101,7 +101,7 @@ class HashableJSON(json.JSONEncoder): elif isinstance(obj, np.ndarray): return obj.tolist() if pd and isinstance(obj, (pd.Series, pd.DataFrame)): - return repr(sorted(list(obj.to_dict().items()))) + return obj.to_csv().encode('utf-8') elif isinstance(obj, self.string_hashable): return str(obj) elif isinstance(obj, self.repr_hashable):
1
import os, sys, warnings, operator import time import types import numbers import inspect import itertools import string, fnmatch import unicodedata import datetime as dt from collections import defaultdict from functools import partial from contextlib import contextmanager from distutils.version import LooseVersion from threading import Thread, Event import numpy as np import param import json try: from cyordereddict import OrderedDict except: from collections import OrderedDict try: import __builtin__ as builtins # noqa (compatibility) except: import builtins as builtins # noqa (compatibility) datetime_types = (np.datetime64, dt.datetime) try: import pandas as pd if LooseVersion(pd.__version__) > '0.20.0': from pandas.core.dtypes.dtypes import DatetimeTZDtypeType else: from pandas.types.dtypes import DatetimeTZDtypeType datetime_types = datetime_types + (pd.Timestamp, DatetimeTZDtypeType) except ImportError: pd = None try: import dask.dataframe as dd except ImportError: dd = None class Config(param.ParameterizedFunction): """ Set of boolean configuration values to change HoloViews' global behavior. Typically used to control warnings relating to deprecations or set global parameter such as style 'themes'. """ style_17 = param.Boolean(default=False, doc=""" Switch to the default style options used up to (and including) the HoloViews 1.7 release.""") warn_options_call = param.Boolean(default=False, doc=""" Whether to warn when the deprecated __call__ options syntax is used (the opts method should now be used instead). It is recommended that users switch this on to update any uses of __call__ as it will be deprecated in future.""") def __call__(self, **params): self.set_param(**params) return self config = Config() class HashableJSON(json.JSONEncoder): """ Extends JSONEncoder to generate a hashable string for as many types of object as possible including nested objects and objects that are not normally hashable. The purpose of this class is to generate unique strings that once hashed are suitable for use in memoization and other cases where deep equality must be tested without storing the entire object. By default JSONEncoder supports booleans, numbers, strings, lists, tuples and dictionaries. In order to support other types such as sets, datetime objects and mutable objects such as pandas Dataframes or numpy arrays, HashableJSON has to convert these types to datastructures that can normally be represented as JSON. Support for other object types may need to be introduced in future. By default, unrecognized object types are represented by their id. One limitation of this approach is that dictionaries with composite keys (e.g tuples) are not supported due to the JSON spec. """ string_hashable = (dt.datetime,) repr_hashable = () def default(self, obj): if isinstance(obj, set): return hash(frozenset(obj)) elif isinstance(obj, np.ndarray): return obj.tolist() if pd and isinstance(obj, (pd.Series, pd.DataFrame)): return repr(sorted(list(obj.to_dict().items()))) elif isinstance(obj, self.string_hashable): return str(obj) elif isinstance(obj, self.repr_hashable): return repr(obj) try: return hash(obj) except: return id(obj) class periodic(Thread): """ Run a callback count times with a given period without blocking. If count is None, will run till timeout (which may be forever if None). """ def __init__(self, period, count, callback, timeout=None, block=False): if isinstance(count, int): if count < 0: raise ValueError('Count value must be positive') elif not type(count) is type(None): raise ValueError('Count value must be a positive integer or None') if block is False and count is None and timeout is None: raise ValueError('When using a non-blocking thread, please specify ' 'either a count or a timeout') super(periodic, self).__init__() self.period = period self.callback = callback self.count = count self.counter = 0 self.block = block self.timeout = timeout self._completed = Event() self._start_time = None @property def completed(self): return self._completed.is_set() def start(self): self._start_time = time.time() if self.block is False: super(periodic,self).start() else: self.run() def stop(self): self.timeout = None self._completed.set() def __repr__(self): return 'periodic(%s, %s, %s)' % (self.period, self.count, callable_name(self.callback)) def __str__(self): return repr(self) def run(self): while not self.completed: if self.block: time.sleep(self.period) else: self._completed.wait(self.period) self.counter += 1 try: self.callback(self.counter) except Exception as e: self.stop() if self.timeout is not None: dt = (time.time() - self._start_time) if dt > self.timeout: self.stop() if self.counter == self.count: self.stop() def deephash(obj): """ Given an object, return a hash using HashableJSON. This hash is not architecture, Python version or platform independent. """ try: return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True)) except: return None # Python3 compatibility if sys.version_info.major == 3: basestring = str unicode = str generator_types = (zip, range, types.GeneratorType) else: basestring = basestring unicode = unicode from itertools import izip generator_types = (izip, xrange, types.GeneratorType) def argspec(callable_obj): """ Returns an ArgSpec object for functions, staticmethods, instance methods, classmethods and partials. Note that the args list for instance and class methods are those as seen by the user. In other words, the first argument which is conventionally called 'self' or 'cls' is omitted in these cases. """ if (isinstance(callable_obj, type) and issubclass(callable_obj, param.ParameterizedFunction)): # Parameterized function.__call__ considered function in py3 but not py2 spec = inspect.getargspec(callable_obj.__call__) args=spec.args[1:] elif inspect.isfunction(callable_obj): # functions and staticmethods return inspect.getargspec(callable_obj) elif isinstance(callable_obj, partial): # partials arglen = len(callable_obj.args) spec = inspect.getargspec(callable_obj.func) args = [arg for arg in spec.args[arglen:] if arg not in callable_obj.keywords] elif inspect.ismethod(callable_obj): # instance and class methods spec = inspect.getargspec(callable_obj) args = spec.args[1:] else: # callable objects return argspec(callable_obj.__call__) return inspect.ArgSpec(args = args, varargs = spec.varargs, keywords = spec.keywords, defaults = spec.defaults) def validate_dynamic_argspec(callback, kdims, streams): """ Utility used by DynamicMap to ensure the supplied callback has an appropriate signature. If validation succeeds, returns a list of strings to be zipped with the positional arguments i.e kdim values. The zipped values can then be merged with the stream values to pass everything to the Callable as keywords. If the callbacks use *args, None is returned to indicate that kdim values must be passed to the Callable by position. In this situation, Callable passes *args and **kwargs directly to the callback. If the callback doesn't use **kwargs, the accepted keywords are validated against the stream parameter names. """ argspec = callback.argspec name = callback.name kdims = [kdim.name for kdim in kdims] stream_params = stream_parameters(streams) defaults = argspec.defaults if argspec.defaults else [] all_posargs = argspec.args[:-len(defaults)] if defaults else argspec.args # Filter out any posargs for streams posargs = [arg for arg in all_posargs if arg not in stream_params] kwargs = argspec.args[-len(defaults):] if argspec.keywords is None: unassigned_streams = set(stream_params) - set(argspec.args) if unassigned_streams: unassigned = ','.join(unassigned_streams) raise KeyError('Callable {name!r} missing keywords to ' 'accept stream parameters: {unassigned}'.format(name=name, unassigned=unassigned)) if len(posargs) > len(kdims) + len(stream_params): raise KeyError('Callable {name!r} accepts more positional arguments than ' 'there are kdims and stream parameters'.format(name=name)) if kdims == []: # Can be no posargs, stream kwargs already validated return [] if set(kdims) == set(posargs): # Posargs match exactly, can all be passed as kwargs return kdims elif len(posargs) == len(kdims): # Posargs match kdims length, supplying names if argspec.args[:len(kdims)] != posargs: raise KeyError('Unmatched positional kdim arguments only allowed at ' 'the start of the signature of {name!r}'.format(name=name)) return posargs elif argspec.varargs: # Posargs missing, passed to Callable directly return None elif set(posargs) - set(kdims): raise KeyError('Callable {name!r} accepts more positional arguments {posargs} ' 'than there are key dimensions {kdims}'.format(name=name, posargs=posargs, kdims=kdims)) elif set(kdims).issubset(set(kwargs)): # Key dims can be supplied by keyword return kdims elif set(kdims).issubset(set(posargs+kwargs)): return kdims else: raise KeyError('Callback {name!r} signature over {names} does not accommodate ' 'required kdims {kdims}'.format(name=name, names=list(set(posargs+kwargs)), kdims=kdims)) def callable_name(callable_obj): """ Attempt to return a meaningful name identifying a callable or generator """ try: if (isinstance(callable_obj, type) and issubclass(callable_obj, param.ParameterizedFunction)): return callable_obj.__name__ elif (isinstance(callable_obj, param.Parameterized) and 'operation' in callable_obj.params()): return callable_obj.operation.__name__ elif isinstance(callable_obj, partial): return str(callable_obj) elif inspect.isfunction(callable_obj): # functions and staticmethods return callable_obj.__name__ elif inspect.ismethod(callable_obj): # instance and class methods meth = callable_obj if sys.version_info < (3,0): owner = meth.im_class if meth.im_self is None else meth.im_self else: owner = meth.__self__ if meth.__name__ == '__call__': return type(owner).__name__ return '.'.join([owner.__name__, meth.__name__]) elif isinstance(callable_obj, types.GeneratorType): return callable_obj.__name__ else: return type(callable_obj).__name__ except: return str(callable_obj) def process_ellipses(obj, key, vdim_selection=False): """ Helper function to pad a __getitem__ key with the right number of empty slices (i.e :) when the key contains an Ellipsis (...). If the vdim_selection flag is true, check if the end of the key contains strings or Dimension objects in obj. If so, extra padding will not be applied for the value dimensions (i.e the resulting key will be exactly one longer than the number of kdims). Note: this flag should not be used for composite types. """ if isinstance(key, np.ndarray) and key.dtype.kind == 'b': return key wrapped_key = wrap_tuple(key) if wrapped_key.count(Ellipsis)== 0: return key if wrapped_key.count(Ellipsis)!=1: raise Exception("Only one ellipsis allowed at a time.") dim_count = len(obj.dimensions()) index = wrapped_key.index(Ellipsis) head = wrapped_key[:index] tail = wrapped_key[index+1:] padlen = dim_count - (len(head) + len(tail)) if vdim_selection: # If the end of the key (i.e the tail) is in vdims, pad to len(kdims)+1 if wrapped_key[-1] in obj.vdims: padlen = (len(obj.kdims) +1 ) - len(head+tail) return head + ((slice(None),) * padlen) + tail def bytes_to_unicode(value): """ Safely casts bytestring to unicode """ if isinstance(value, bytes): return unicode(value.decode('utf-8')) return value def capitalize_unicode_name(s): """ Turns a string such as 'capital delta' into the shortened, capitalized version, in this case simply 'Delta'. Used as a transform in sanitize_identifier. """ index = s.find('capital') if index == -1: return s tail = s[index:].replace('capital', '').strip() tail = tail[0].upper() + tail[1:] return s[:index] + tail class Aliases(object): """ Helper class useful for defining a set of alias tuples on a single object. For instance, when defining a group or label with an alias, instead of setting tuples in the constructor, you could use ``aliases.water`` if you first define: >>> aliases = Aliases(water='H_2O', glucose='C_6H_{12}O_6') >>> aliases.water ('water', 'H_2O') This may be used to conveniently define aliases for groups, labels or dimension names. """ def __init__(self, **kwargs): for k,v in kwargs.items(): setattr(self, k, (k,v)) class sanitize_identifier_fn(param.ParameterizedFunction): """ Sanitizes group/label values for use in AttrTree attribute access. Depending on the version parameter, either sanitization appropriate for Python 2 (no unicode gn identifiers allowed) or Python 3 (some unicode allowed) is used. Note that if you are using Python 3, you can switch to version 2 for compatibility but you cannot enable relaxed sanitization if you are using Python 2. Special characters are sanitized using their (lowercase) unicode name using the unicodedata module. For instance: >>> unicodedata.name(u'$').lower() 'dollar sign' As these names are often very long, this parameterized function allows filtered, substitutions and transforms to help shorten these names appropriately. """ version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc=""" The sanitization version. If set to 2, more aggressive sanitization appropriate for Python 2 is applied. Otherwise, if set to 3, more relaxed, Python 3 sanitization is used.""") capitalize = param.Boolean(default=True, doc=""" Whether the first letter should be converted to uppercase. Note, this will only be applied to ASCII characters in order to make sure paths aren't confused with method names.""") eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit', 'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc=""" Lowercase strings to be eliminated from the unicode names in order to shorten the sanitized name ( lowercase). Redundant strings should be removed but too much elimination could cause two unique strings to map to the same sanitized output.""") substitutions = param.Dict(default={'circumflex':'power', 'asterisk':'times', 'solidus':'over'}, doc=""" Lowercase substitutions of substrings in unicode names. For instance the ^ character has the name 'circumflex accent' even though it is more typically used for exponentiation. Note that substitutions occur after filtering and that there should be no ordering dependence between substitutions.""") transforms = param.List(default=[capitalize_unicode_name], doc=""" List of string transformation functions to apply after filtering and substitution in order to further compress the unicode name. For instance, the default capitalize_unicode_name function will turn the string "capital delta" into "Delta".""") disallowed = param.List(default=['trait_names', '_ipython_display_', '_getAttributeNames'], doc=""" An explicit list of name that should not be allowed as attribute names on Tree objects. By default, prevents IPython from creating an entry called Trait_names due to an inconvenient getattr check (during tab-completion).""") disable_leading_underscore = param.Boolean(default=False, doc=""" Whether leading underscores should be allowed to be sanitized with the leading prefix.""") aliases = param.Dict(default={}, doc=""" A dictionary of aliases mapping long strings to their short, sanitized equivalents""") prefix = 'A_' _lookup_table = param.Dict(default={}, doc=""" Cache of previously computed sanitizations""") @param.parameterized.bothmethod def add_aliases(self_or_cls, **kwargs): """ Conveniently add new aliases as keyword arguments. For instance you can add a new alias with add_aliases(short='Longer string') """ self_or_cls.aliases.update({v:k for k,v in kwargs.items()}) @param.parameterized.bothmethod def remove_aliases(self_or_cls, aliases): """ Remove a list of aliases. """ for k,v in self_or_cls.aliases.items(): if v in aliases: self_or_cls.aliases.pop(k) @param.parameterized.bothmethod def allowable(self_or_cls, name, disable_leading_underscore=None): disabled_reprs = ['javascript', 'jpeg', 'json', 'latex', 'latex', 'pdf', 'png', 'svg', 'markdown'] disabled_ = (self_or_cls.disable_leading_underscore if disable_leading_underscore is None else disable_leading_underscore) if disabled_ and name.startswith('_'): return False isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs) return (name not in self_or_cls.disallowed) and not isrepr @param.parameterized.bothmethod def prefixed(self, identifier, version): """ Whether or not the identifier will be prefixed. Strings that require the prefix are generally not recommended. """ invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc'] if identifier.startswith('_'): return True return((identifier[0] in string.digits) if version==2 else (unicodedata.category(identifier[0]) in invalid_starting)) @param.parameterized.bothmethod def remove_diacritics(self_or_cls, identifier): """ Remove diacritics and accents from the input leaving other unicode characters alone.""" chars = '' for c in identifier: replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore') if replacement != '': chars += bytes_to_unicode(replacement) else: chars += c return chars @param.parameterized.bothmethod def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]): """ Given a unicode character c, return the shortened unicode name (as a list of tokens) by applying the eliminations, substitutions and transforms. """ name = unicodedata.name(c).lower() # Filtering for elim in eliminations: name = name.replace(elim, '') # Substitution for i,o in substitutions.items(): name = name.replace(i, o) for transform in transforms: name = transform(name) return ' '.join(name.strip().split()).replace(' ','_').replace('-','_') def __call__(self, name, escape=True, version=None): if name in [None, '']: return name elif name in self.aliases: return self.aliases[name] elif name in self._lookup_table: return self._lookup_table[name] name = bytes_to_unicode(name) version = self.version if version is None else version if not self.allowable(name): raise AttributeError("String %r is in the disallowed list of attribute names: %r" % self.disallowed) if version == 2: name = self.remove_diacritics(name) if self.capitalize and name and name[0] in string.ascii_lowercase: name = name[0].upper()+name[1:] sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name)) if self.prefixed(name, version): sanitized = self.prefix + sanitized self._lookup_table[name] = sanitized return sanitized def _process_underscores(self, tokens): "Strip underscores to make sure the number is correct after join" groups = [[str(''.join(el))] if b else list(el) for (b,el) in itertools.groupby(tokens, lambda k: k=='_')] flattened = [el for group in groups for el in group] processed = [] for token in flattened: if token == '_': continue if token.startswith('_'): token = str(token[1:]) if token.endswith('_'): token = str(token[:-1]) processed.append(token) return processed def sanitize_py2(self, name): # This fix works but masks an issue in self.sanitize (py2) prefix = '_' if name.startswith('_') else '' valid_chars = string.ascii_letters+string.digits+'_' return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars))) def sanitize_py3(self, name): if not name.isidentifier(): return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier())) else: return name def sanitize(self, name, valid_fn): "Accumulate blocks of hex and separate blocks by underscores" invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'} for cc in filter(lambda el: el in name, invalid.keys()): raise Exception("Please use a raw string or escape control code '\%s'" % invalid[cc]) sanitized, chars = [], '' for split in name.split(): for c in split: if valid_fn(c): chars += str(c) if c=='_' else c else: short = self.shortened_character_name(c, self.eliminations, self.substitutions, self.transforms) sanitized.extend([chars] if chars else []) if short != '': sanitized.append(short) chars = '' if chars: sanitized.extend([chars]) chars='' return self._process_underscores(sanitized + ([chars] if chars else [])) sanitize_identifier = sanitize_identifier_fn.instance() group_sanitizer = sanitize_identifier_fn.instance() label_sanitizer = sanitize_identifier_fn.instance() dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False) def isnumeric(val): if isinstance(val, (basestring, bool, np.bool_)): return False try: float(val) return True except: return False def find_minmax(lims, olims): """ Takes (a1, a2) and (b1, b2) as input and returns (np.nanmin(a1, b1), np.nanmax(a2, b2)). Used to calculate min and max values of a number of items. """ try: limzip = zip(list(lims), list(olims), [np.nanmin, np.nanmax]) limits = tuple([float(fn([l, ol])) for l, ol, fn in limzip]) except: limits = (np.NaN, np.NaN) return limits def find_range(values, soft_range=[]): """ Safely finds either the numerical min and max of a set of values, falling back to the first and the last value in the sorted list of values. """ try: values = np.array(values) values = np.squeeze(values) if len(values.shape) > 1 else values if len(soft_range): values = np.concatenate([values, soft_range]) if values.dtype.kind == 'M': return values.min(), values.max() return np.nanmin(values), np.nanmax(values) except: try: values = sorted(values) return (values[0], values[-1]) except: return (None, None) def max_range(ranges): """ Computes the maximal lower and upper bounds from a list bounds. """ try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered') values = [r for r in ranges for v in r if v is not None] if pd and all(isinstance(v, pd.Timestamp) for r in values for v in r): values = [(v1.to_datetime64(), v2.to_datetime64()) for v1, v2 in values] arr = np.array(values) if arr.dtype.kind in 'OSU': arr = np.sort([v for v in arr.flat if not is_nan(v)]) return arr[0], arr[-1] if arr.dtype.kind in 'M': return arr[:, 0].min(), arr[:, 1].max() return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1])) except: return (np.NaN, np.NaN) def dimension_range(lower, upper, dimension): """ Computes the range along a dimension by combining the data range with the Dimension soft_range and range. """ lower, upper = max_range([(lower, upper), dimension.soft_range]) dmin, dmax = dimension.range lower = lower if dmin is None or not np.isfinite(dmin) else dmin upper = upper if dmax is None or not np.isfinite(dmax) else dmax return lower, upper def max_extents(extents, zrange=False): """ Computes the maximal extent in 2D and 3D space from list of 4-tuples or 6-tuples. If zrange is enabled all extents are converted to 6-tuples to compute x-, y- and z-limits. """ if zrange: num = 6 inds = [(0, 3), (1, 4), (2, 5)] extents = [e if len(e) == 6 else (e[0], e[1], None, e[2], e[3], None) for e in extents] else: num = 4 inds = [(0, 2), (1, 3)] arr = list(zip(*extents)) if extents else [] extents = [np.NaN] * num if len(arr) == 0: return extents with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered') for lidx, uidx in inds: lower = [v for v in arr[lidx] if v is not None and not is_nan(v)] upper = [v for v in arr[uidx] if v is not None and not is_nan(v)] if lower and isinstance(lower[0], datetime_types): extents[lidx] = np.min(lower) elif any(isinstance(l, basestring) for l in lower): extents[lidx] = np.sort(lower)[0] elif lower: extents[lidx] = np.nanmin(lower) if upper and isinstance(upper[0], datetime_types): extents[uidx] = np.max(upper) elif any(isinstance(u, basestring) for u in upper): extents[uidx] = np.sort(upper)[-1] elif upper: extents[uidx] = np.nanmax(upper) return tuple(extents) def int_to_alpha(n, upper=True): "Generates alphanumeric labels of form A-Z, AA-ZZ etc." casenum = 65 if upper else 97 label = '' count= 0 if n == 0: return str(chr(n + casenum)) while n >= 0: mod, div = n % 26, n for _ in range(count): div //= 26 div %= 26 if count == 0: val = mod else: val = div label += str(chr(val + casenum)) count += 1 n -= 26**count return label[::-1] def int_to_roman(input): if type(input) != type(1): raise TypeError("expected integer, got %s" % type(input)) if not 0 < input < 4000: raise ValueError("Argument must be between 1 and 3999") ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1) nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I') result = "" for i in range(len(ints)): count = int(input / ints[i]) result += nums[i] * count input -= ints[i] * count return result def unique_iterator(seq): """ Returns an iterator containing all non-duplicate elements in the input sequence. """ seen = set() for item in seq: if item not in seen: seen.add(item) yield item def unique_array(arr): """ Returns an array of unique values in the input order """ if not len(arr): return arr elif pd: return pd.unique(arr) else: arr = np.asarray(arr) _, uniq_inds = np.unique(arr, return_index=True) return arr[np.sort(uniq_inds)] def match_spec(element, specification): """ Matches the group.label specification of the supplied element against the supplied specification dictionary returning the value of the best match. """ match_tuple = () match = specification.get((), {}) for spec in [type(element).__name__, group_sanitizer(element.group, escape=False), label_sanitizer(element.label, escape=False)]: match_tuple += (spec,) if match_tuple in specification: match = specification[match_tuple] return match def python2sort(x,key=None): if len(x) == 0: return x it = iter(x) groups = [[next(it)]] for item in it: for group in groups: try: item_precedence = item if key is None else key(item) group_precedence = group[0] if key is None else key(group[0]) item_precedence < group_precedence # exception if not comparable group.append(item) break except TypeError: continue else: # did not break, make new group groups.append([item]) return itertools.chain.from_iterable(sorted(group, key=key) for group in groups) def merge_dimensions(dimensions_list): """ Merges lists of fully or partially overlapping dimensions by merging their values. >>> from holoviews import Dimension >>> dim_list = [[Dimension('A', values=[1, 2, 3]), Dimension('B')], ... [Dimension('A', values=[2, 3, 4])]] >>> dimensions = merge_dimensions(dim_list) >>> dimensions [Dimension('A'), Dimension('B')] >>> dimensions[0].values [1, 2, 3, 4] """ dvalues = defaultdict(list) dimensions = [] for dims in dimensions_list: for d in dims: dvalues[d.name].append(d.values) if d not in dimensions: dimensions.append(d) dvalues = {k: list(unique_iterator(itertools.chain(*vals))) for k, vals in dvalues.items()} return [d(values=dvalues.get(d.name, [])) for d in dimensions] def dimension_sort(odict, kdims, vdims, categorical, key_index, cached_values): """ Sorts data by key using usual Python tuple sorting semantics or sorts in categorical order for any categorical Dimensions. """ sortkws = {} ndims = len(kdims) dimensions = kdims+vdims indexes = [(dimensions[i], int(i not in range(ndims)), i if i in range(ndims) else i-ndims) for i in key_index] cached_values = {d: [None]+vals for d, vals in cached_values.items()} if len(set(key_index)) != len(key_index): raise ValueError("Cannot sort on duplicated dimensions") elif categorical: sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d]) if dim.values else x[t][d] for i, (dim, t, d) in enumerate(indexes)) elif key_index != list(range(len(kdims+vdims))): sortkws['key'] = lambda x: tuple(x[t][d] for _, t, d in indexes) if sys.version_info.major == 3: return python2sort(odict.items(), **sortkws) else: return sorted(odict.items(), **sortkws) # Copied from param should make param version public def is_number(obj): if isinstance(obj, numbers.Number): return True # The extra check is for classes that behave like numbers, such as those # found in numpy, gmpy, etc. elif (hasattr(obj, '__int__') and hasattr(obj, '__add__')): return True # This is for older versions of gmpy elif hasattr(obj, 'qdiv'): return True else: return False class ProgressIndicator(param.Parameterized): """ Baseclass for any ProgressIndicator that indicates progress as a completion percentage. """ percent_range = param.NumericTuple(default=(0.0, 100.0), doc=""" The total percentage spanned by the progress bar when called with a value between 0% and 100%. This allows an overall completion in percent to be broken down into smaller sub-tasks that individually complete to 100 percent.""") label = param.String(default='Progress', allow_None=True, doc=""" The label of the current progress bar.""") def __call__(self, completion): raise NotImplementedError def sort_topologically(graph): """ Stackless topological sorting. graph = { 3: [1], 5: [3], 4: [2], 6: [4], } sort_topologically(graph) [[1, 2], [3, 4], [5, 6]] """ levels_by_name = {} names_by_level = defaultdict(list) def add_level_to_name(name, level): levels_by_name[name] = level names_by_level[level].append(name) def walk_depth_first(name): stack = [name] while(stack): name = stack.pop() if name in levels_by_name: continue if name not in graph or not graph[name]: level = 0 add_level_to_name(name, level) continue children = graph[name] children_not_calculated = [child for child in children if child not in levels_by_name] if children_not_calculated: stack.append(name) stack.extend(children_not_calculated) continue level = 1 + max(levels_by_name[lname] for lname in children) add_level_to_name(name, level) for name in graph: walk_depth_first(name) return list(itertools.takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in itertools.count()))) def is_cyclic(graph): """ Return True if the directed graph g has a cycle. The directed graph should be represented as a dictionary mapping of edges for each node. """ path = set() def visit(vertex): path.add(vertex) for neighbour in graph.get(vertex, ()): if neighbour in path or visit(neighbour): return True path.remove(vertex) return False return any(visit(v) for v in graph) def one_to_one(graph, nodes): """ Return True if graph contains only one to one mappings. The directed graph should be represented as a dictionary mapping of edges for each node. Nodes should be passed a simple list. """ edges = itertools.chain.from_iterable(graph.values()) return len(graph) == len(nodes) and len(set(edges)) == len(nodes) def get_overlay_spec(o, k, v): """ Gets the type.group.label + key spec from an Element in an Overlay. """ k = wrap_tuple(k) return ((type(v).__name__, v.group, v.label) + k if len(o.kdims) else (type(v).__name__,) + k) def layer_sort(hmap): """ Find a global ordering for layers in a HoloMap of CompositeOverlay types. """ orderings = {} for o in hmap: okeys = [get_overlay_spec(o, k, v) for k, v in o.data.items()] if len(okeys) == 1 and not okeys[0] in orderings: orderings[okeys[0]] = [] else: orderings.update({k: [] if k == v else [v] for k, v in zip(okeys[1:], okeys)}) return [i for g in sort_topologically(orderings) for i in sorted(g)] def layer_groups(ordering, length=2): """ Splits a global ordering of Layers into groups based on a slice of the spec. The grouping behavior can be modified by changing the length of spec the entries are grouped by. """ group_orderings = defaultdict(list) for el in ordering: group_orderings[el[:length]].append(el) return group_orderings def group_select(selects, length=None, depth=None): """ Given a list of key tuples to select, groups them into sensible chunks to avoid duplicating indexing operations. """ if length == None and depth == None: length = depth = len(selects[0]) getter = operator.itemgetter(depth-length) if length > 1: selects = sorted(selects, key=getter) grouped_selects = defaultdict(dict) for k, v in itertools.groupby(selects, getter): grouped_selects[k] = group_select(list(v), length-1, depth) return grouped_selects else: return list(selects) def iterative_select(obj, dimensions, selects, depth=None): """ Takes the output of group_select selecting subgroups iteratively, avoiding duplicating select operations. """ ndims = len(dimensions) depth = depth if depth is not None else ndims items = [] if isinstance(selects, dict): for k, v in selects.items(): items += iterative_select(obj.select(**{dimensions[ndims-depth]: k}), dimensions, v, depth-1) else: for s in selects: items.append((s, obj.select(**{dimensions[-1]: s[-1]}))) return items def get_spec(obj): """ Gets the spec from any labeled data object. """ return (obj.__class__.__name__, obj.group, obj.label) def find_file(folder, filename): """ Find a file given folder and filename. If the filename can be resolved directly returns otherwise walks the supplied folder. """ matches = [] if os.path.isabs(filename) and os.path.isfile(filename): return filename for root, _, filenames in os.walk(folder): for fn in fnmatch.filter(filenames, filename): matches.append(os.path.join(root, fn)) if not matches: raise IOError('File %s could not be found' % filename) return matches[-1] def is_dataframe(data): """ Checks whether the supplied data is DataFrame type. """ return((pd is not None and isinstance(data, pd.DataFrame)) or (dd is not None and isinstance(data, dd.DataFrame))) def get_param_values(data): params = dict(kdims=data.kdims, vdims=data.vdims, label=data.label) if (data.group != data.params()['group'].default and not isinstance(type(data).group, property)): params['group'] = data.group return params @contextmanager def disable_constant(parameterized): """ Temporarily set parameters on Parameterized object to constant=False. """ params = parameterized.params().values() constants = [p.constant for p in params] for p in params: p.constant = False try: yield except: raise finally: for (p, const) in zip(params, constants): p.constant = const def get_ndmapping_label(ndmapping, attr): """ Function to get the first non-auxiliary object label attribute from an NdMapping. """ label = None els = itervalues(ndmapping.data) while label is None: try: el = next(els) except StopIteration: return None if not el._auxiliary_component: label = getattr(el, attr) if attr == 'group': tp = type(el).__name__ if tp == label: return None return label def wrap_tuple(unwrapped): """ Wraps any non-tuple types in a tuple """ return (unwrapped if isinstance(unwrapped, tuple) else (unwrapped,)) def stream_name_mapping(stream, exclude_params=['name'], reverse=False): """ Return a complete dictionary mapping between stream parameter names to their applicable renames, excluding parameters listed in exclude_params. If reverse is True, the mapping is from the renamed strings to the original stream parameter names. """ filtered = [k for k in stream.params().keys() if k not in exclude_params] mapping = {k:stream._rename.get(k,k) for k in filtered} if reverse: return {v:k for k,v in mapping.items()} else: return mapping def rename_stream_kwargs(stream, kwargs, reverse=False): """ Given a stream and a kwargs dictionary of parameter values, map to the corresponding dictionary where the keys are substituted with the appropriately renamed string. If reverse, the output will be a dictionary using the original parameter names given a dictionary using the renamed equivalents. """ mapped_kwargs = {} mapping = stream_name_mapping(stream, reverse=reverse) for k,v in kwargs.items(): if k not in mapping: msg = 'Could not map key {key} {direction} renamed equivalent' direction = 'from' if reverse else 'to' raise KeyError(msg.format(key=repr(k), direction=direction)) mapped_kwargs[mapping[k]] = v return mapped_kwargs def stream_parameters(streams, no_duplicates=True, exclude=['name']): """ Given a list of streams, return a flat list of parameter name, excluding those listed in the exclude list. If no_duplicates is enabled, a KeyError will be raised if there are parameter name clashes across the streams. """ param_groups = [s.contents.keys() for s in streams] names = [name for group in param_groups for name in group] if no_duplicates: clashes = sorted(set([n for n in names if names.count(n) > 1])) clash_streams = [s for s in streams for c in clashes if c in s.contents] if clashes: clashing = ', '.join([repr(c) for c in clash_streams[:-1]]) raise Exception('The supplied stream objects %s and %s ' 'clash on the following parameters: %r' % (clashing, clash_streams[-1], clashes)) return [name for name in names if name not in exclude] def dimensionless_contents(streams, kdims, no_duplicates=True): """ Return a list of stream parameters that have not been associated with any of the key dimensions. """ names = stream_parameters(streams, no_duplicates) return [name for name in names if name not in kdims] def unbound_dimensions(streams, kdims, no_duplicates=True): """ Return a list of dimensions that have not been associated with any streams. """ params = stream_parameters(streams, no_duplicates) return [d for d in kdims if d not in params] def wrap_tuple_streams(unwrapped, kdims, streams): """ Fills in tuple keys with dimensioned stream values as appropriate. """ param_groups = [(s.contents.keys(), s) for s in streams] pairs = [(name,s) for (group, s) in param_groups for name in group] substituted = [] for pos,el in enumerate(wrap_tuple(unwrapped)): if el is None and pos < len(kdims): matches = [(name,s) for (name,s) in pairs if name==kdims[pos].name] if len(matches) == 1: (name, stream) = matches[0] el = stream.contents[name] substituted.append(el) return tuple(substituted) def drop_streams(streams, kdims, keys): """ Drop any dimensioned streams from the keys and kdims. """ stream_params = stream_parameters(streams) inds, dims = zip(*[(ind, kdim) for ind, kdim in enumerate(kdims) if kdim not in stream_params]) return dims, [tuple(wrap_tuple(key)[ind] for ind in inds) for key in keys] def itervalues(obj): "Get value iterator from dictionary for Python 2 and 3" return iter(obj.values()) if sys.version_info.major == 3 else obj.itervalues() def iterkeys(obj): "Get key iterator from dictionary for Python 2 and 3" return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys() def get_unique_keys(ndmapping, dimensions): inds = [ndmapping.get_dimension_index(dim) for dim in dimensions] getter = operator.itemgetter(*inds) return unique_iterator(getter(key) if len(inds) > 1 else (key[inds[0]],) for key in ndmapping.data.keys()) def unpack_group(group, getter): for k, v in group.iterrows(): obj = v.values[0] key = getter(k) if hasattr(obj, 'kdims'): yield (key, obj) else: obj = tuple(v) yield (wrap_tuple(key), obj) def capitalize(string): """ Capitalizes the first letter of a string. """ return string[0].upper() + string[1:] def get_path(item): """ Gets a path from an Labelled object or from a tuple of an existing path and a labelled object. The path strings are sanitized and capitalized. """ sanitizers = [group_sanitizer, label_sanitizer] if isinstance(item, tuple): path, item = item if item.label: if len(path) > 1 and item.label == path[1]: path = path[:2] else: path = path[:1] + (item.label,) else: path = path[:1] else: path = (item.group, item.label) if item.label else (item.group,) return tuple(capitalize(fn(p)) for (p, fn) in zip(path, sanitizers)) def make_path_unique(path, counts, new): """ Given a path, a list of existing paths and counts for each of the existing paths. """ added = False while any(path == c[:i] for c in counts for i in range(1, len(c)+1)): count = counts[path] counts[path] += 1 if (not new and len(path) > 1) or added: path = path[:-1] else: added = True path = path + (int_to_roman(count),) if len(path) == 1: path = path + (int_to_roman(counts.get(path, 1)),) if path not in counts: counts[path] = 1 return path class ndmapping_groupby(param.ParameterizedFunction): """ Apply a groupby operation to an NdMapping, using pandas to improve performance (if available). """ def __call__(self, ndmapping, dimensions, container_type, group_type, sort=False, **kwargs): try: import pandas # noqa (optional import) groupby = self.groupby_pandas except: groupby = self.groupby_python return groupby(ndmapping, dimensions, container_type, group_type, sort=sort, **kwargs) @param.parameterized.bothmethod def groupby_pandas(self_or_cls, ndmapping, dimensions, container_type, group_type, sort=False, **kwargs): if 'kdims' in kwargs: idims = [ndmapping.get_dimension(d) for d in kwargs['kdims']] else: idims = [dim for dim in ndmapping.kdims if dim not in dimensions] all_dims = [d.name for d in ndmapping.kdims] inds = [ndmapping.get_dimension_index(dim) for dim in idims] getter = operator.itemgetter(*inds) if inds else lambda x: tuple() multi_index = pd.MultiIndex.from_tuples(ndmapping.keys(), names=all_dims) df = pd.DataFrame(list(map(wrap_tuple, ndmapping.values())), index=multi_index) kwargs = dict(dict(get_param_values(ndmapping), kdims=idims), **kwargs) groups = ((wrap_tuple(k), group_type(OrderedDict(unpack_group(group, getter)), **kwargs)) for k, group in df.groupby(level=[d.name for d in dimensions])) if sort: selects = list(get_unique_keys(ndmapping, dimensions)) groups = sorted(groups, key=lambda x: selects.index(x[0])) return container_type(groups, kdims=dimensions) @param.parameterized.bothmethod def groupby_python(self_or_cls, ndmapping, dimensions, container_type, group_type, sort=False, **kwargs): idims = [dim for dim in ndmapping.kdims if dim not in dimensions] dim_names = [dim.name for dim in dimensions] selects = get_unique_keys(ndmapping, dimensions) selects = group_select(list(selects)) groups = [(k, group_type((v.reindex(idims) if hasattr(v, 'kdims') else [((), (v,))]), **kwargs)) for k, v in iterative_select(ndmapping, dim_names, selects)] return container_type(groups, kdims=dimensions) def cartesian_product(arrays, flat=True, copy=False): """ Efficient cartesian product of a list of 1D arrays returning the expanded array views for each dimensions. By default arrays are flattened, which may be controlled with the flat flag. The array views can be turned into regular arrays with the copy flag. """ arrays = np.broadcast_arrays(*np.ix_(*arrays)) if flat: return tuple(arr.flatten() if copy else arr.flat for arr in arrays) return tuple(arr.copy() if copy else arr for arr in arrays) def arglexsort(arrays): """ Returns the indices of the lexicographical sorting order of the supplied arrays. """ dtypes = ','.join(array.dtype.str for array in arrays) recarray = np.empty(len(arrays[0]), dtype=dtypes) for i, array in enumerate(arrays): recarray['f%s' % i] = array return recarray.argsort() def dimensioned_streams(dmap): """ Given a DynamicMap return all streams that have any dimensioned parameters i.e parameters also listed in the key dimensions. """ dimensioned = [] for stream in dmap.streams: stream_params = stream_parameters([stream]) if set([str(k) for k in dmap.kdims]) & set(stream_params): dimensioned.append(stream) return dimensioned def expand_grid_coords(dataset, dim): """ Expand the coordinates along a dimension of the gridded dataset into an ND-array matching the dimensionality of the dataset. """ arrays = [dataset.interface.coords(dataset, d.name, True) for d in dataset.kdims] idx = dataset.get_dimension_index(dim) return cartesian_product(arrays, flat=False)[idx] def dt64_to_dt(dt64): """ Safely converts NumPy datetime64 to a datetime object. """ ts = (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's') return dt.datetime.utcfromtimestamp(ts) def is_nan(x): """ Checks whether value is NaN on arbitrary types """ try: return np.isnan(x) except: return False def bound_range(vals, density): """ Computes a bounding range and density from a number of samples assumed to be evenly spaced. Density is rounded to machine precision using significant digits reported by sys.float_info.dig. """ low, high = vals.min(), vals.max() invert = False if len(vals) > 1 and vals[0] > vals[1]: invert = True if not density: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered in double_scalars') full_precision_density = 1./((high-low)/(len(vals)-1)) density = round(full_precision_density, sys.float_info.dig) if density == 0: density = full_precision_density if density == 0: raise ValueError('Could not determine Image density, ensure it has a non-zero range.') halfd = 0.5/density return low-halfd, high+halfd, density, invert
1
19,028
A fair bit faster, although still not great, hence also adding a hashkey.
holoviz-holoviews
py
@@ -0,0 +1,17 @@ +<div class="started"> + <section class="trail"> + <header> + <h1><%= trail.name %></h1> + <span class="numerical-progress"> + <%= pluralize( + trail.steps_remaining_for(current_user), + "step" + ) %> remaining + </span> + </header> + + <section class="progress"> + <%= render partial: "trails/exercise", collection: trail.exercises %> + </section> + </section> +</div>
1
1
11,815
If we're going to leave this in here, how about we at least pull it into partial?
thoughtbot-upcase
rb
@@ -75,7 +75,7 @@ func TestCtlInstall(t *testing.T) { testApiServer, cleanup := install_framework.NewTestInstallApiServer(t) defer cleanup() - ctx, cancel := context.WithTimeout(context.TODO(), time.Second*20) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*40) defer cancel() if test.prerun {
1
/* Copyright 2021 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ctl import ( "bytes" "context" "fmt" "regexp" "strings" "testing" "time" "github.com/sergi/go-diff/diffmatchpatch" "github.com/jetstack/cert-manager/cmd/ctl/cmd" "github.com/jetstack/cert-manager/test/integration/ctl/install_framework" "github.com/jetstack/cert-manager/test/internal/util" ) func TestCtlInstall(t *testing.T) { tests := map[string]struct { prerun bool preInputArgs []string preExpErr bool preExpOutput string inputArgs []string expErr bool expOutput string }{ "install cert-manager": { inputArgs: []string{}, expErr: false, expOutput: `STATUS: deployed`, }, "install cert-manager (already installed)": { prerun: true, preInputArgs: []string{}, preExpErr: false, preExpOutput: `STATUS: deployed`, inputArgs: []string{}, expErr: true, expOutput: `^Found existing installed cert-manager CRDs! Cannot continue with installation.$`, }, "install cert-manager (already installed, in other namespace)": { prerun: true, preInputArgs: []string{"--namespace=test"}, preExpErr: false, preExpOutput: `STATUS: deployed`, inputArgs: []string{}, expErr: true, expOutput: `^Found existing installed cert-manager CRDs! Cannot continue with installation.$`, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { testApiServer, cleanup := install_framework.NewTestInstallApiServer(t) defer cleanup() ctx, cancel := context.WithTimeout(context.TODO(), time.Second*20) defer cancel() if test.prerun { executeCommandAndCheckOutput(t, ctx, testApiServer.KubeConfig(), test.preInputArgs, test.preExpErr, test.preExpOutput) } executeCommandAndCheckOutput(t, ctx, testApiServer.KubeConfig(), test.inputArgs, test.expErr, test.expOutput) }) } } func executeCommandAndCheckOutput( t *testing.T, ctx context.Context, kubeConfig string, inputArgs []string, expErr bool, expOutput string, ) { // Options to run status command stdin := bytes.NewBufferString("") stdout := bytes.NewBufferString("") chartPath := util.GetTestPath("deploy", "charts", "cert-manager", "cert-manager.tgz") cmd := cmd.NewCertManagerCtlCommand(ctx, stdin, stdout, stdout) cmd.SetArgs(append([]string{ fmt.Sprintf("--kubeconfig=%s", kubeConfig), "--wait=false", fmt.Sprintf("--chart-name=%s", chartPath), "x", "install", }, inputArgs...)) err := cmd.Execute() if err != nil { fmt.Fprintf(stdout, "%s\n", err) if !expErr { t.Errorf("got unexpected error: %v", err) } else { t.Logf("got an error, which was expected, details: %v", err) } } else if expErr { // expected error but error is nil t.Errorf("expected but got no error") } match, err := regexp.MatchString(strings.TrimSpace(expOutput), strings.TrimSpace(stdout.String())) if err != nil { t.Error(err) } dmp := diffmatchpatch.New() if !match { diffs := dmp.DiffMain(strings.TrimSpace(expOutput), strings.TrimSpace(stdout.String()), false) t.Errorf( "got unexpected output, diff (ignoring line anchors ^ and $ and regex for creation time):\n"+ "diff: %s\n\n"+ " exp: %s\n\n"+ " got: %s", dmp.DiffPrettyText(diffs), expOutput, stdout.String(), ) } }
1
29,097
This `context` is used correctly, there is nothing left TO DO.
jetstack-cert-manager
go
@@ -230,12 +230,9 @@ function countDocuments(coll, query, options, callback) { delete options.limit; delete options.skip; - coll.aggregate(pipeline, options, (err, result) => { + coll.aggregate(pipeline, options).toArray((err, docs) => { if (err) return handleCallback(callback, err); - result.toArray((err, docs) => { - if (err) return handleCallback(err); - handleCallback(callback, null, docs.length ? docs[0].n : 0); - }); + handleCallback(callback, null, docs.length ? docs[0].n : 0); }); }
1
'use strict'; const applyWriteConcern = require('../utils').applyWriteConcern; const checkCollectionName = require('../utils').checkCollectionName; const Code = require('mongodb-core').BSON.Code; const createIndexDb = require('./db_ops').createIndex; const decorateCommand = require('../utils').decorateCommand; const decorateWithCollation = require('../utils').decorateWithCollation; const decorateWithReadConcern = require('../utils').decorateWithReadConcern; const ensureIndexDb = require('./db_ops').ensureIndex; const evaluate = require('./db_ops').evaluate; const executeCommand = require('./db_ops').executeCommand; const executeDbAdminCommand = require('./db_ops').executeDbAdminCommand; const formattedOrderClause = require('../utils').formattedOrderClause; const resolveReadPreference = require('../utils').resolveReadPreference; const handleCallback = require('../utils').handleCallback; const indexInformationDb = require('./db_ops').indexInformation; const isObject = require('../utils').isObject; const Long = require('mongodb-core').BSON.Long; const MongoError = require('mongodb-core').MongoError; const ReadPreference = require('mongodb-core').ReadPreference; const toError = require('../utils').toError; /** * Group function helper * @ignore */ // var groupFunction = function () { // var c = db[ns].find(condition); // var map = new Map(); // var reduce_function = reduce; // // while (c.hasNext()) { // var obj = c.next(); // var key = {}; // // for (var i = 0, len = keys.length; i < len; ++i) { // var k = keys[i]; // key[k] = obj[k]; // } // // var aggObj = map.get(key); // // if (aggObj == null) { // var newObj = Object.extend({}, key); // aggObj = Object.extend(newObj, initial); // map.put(key, aggObj); // } // // reduce_function(obj, aggObj); // } // // return { "result": map.values() }; // }.toString(); const groupFunction = 'function () {\nvar c = db[ns].find(condition);\nvar map = new Map();\nvar reduce_function = reduce;\n\nwhile (c.hasNext()) {\nvar obj = c.next();\nvar key = {};\n\nfor (var i = 0, len = keys.length; i < len; ++i) {\nvar k = keys[i];\nkey[k] = obj[k];\n}\n\nvar aggObj = map.get(key);\n\nif (aggObj == null) {\nvar newObj = Object.extend({}, key);\naggObj = Object.extend(newObj, initial);\nmap.put(key, aggObj);\n}\n\nreduce_function(obj, aggObj);\n}\n\nreturn { "result": map.values() };\n}'; /** * Perform a bulkWrite operation. See Collection.prototype.bulkWrite for more information. * * @method * @param {Collection} a Collection instance. * @param {object[]} operations Bulk operations to perform. * @param {object} [options] Optional settings. See Collection.prototype.bulkWrite for a list of options. * @param {Collection~bulkWriteOpCallback} [callback] The command result callback */ function bulkWrite(coll, operations, options, callback) { // Add ignoreUndfined if (coll.s.options.ignoreUndefined) { options = Object.assign({}, options); options.ignoreUndefined = coll.s.options.ignoreUndefined; } // Create the bulk operation const bulk = options.ordered === true || options.ordered == null ? coll.initializeOrderedBulkOp(options) : coll.initializeUnorderedBulkOp(options); // Do we have a collation let collation = false; // for each op go through and add to the bulk try { for (let i = 0; i < operations.length; i++) { // Get the operation type const key = Object.keys(operations[i])[0]; // Check if we have a collation if (operations[i][key].collation) { collation = true; } // Pass to the raw bulk bulk.raw(operations[i]); } } catch (err) { return callback(err, null); } // Final options for write concern const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); const writeCon = finalOptions.writeConcern ? finalOptions.writeConcern : {}; const capabilities = coll.s.topology.capabilities(); // Did the user pass in a collation, check if our write server supports it if (collation && capabilities && !capabilities.commandsTakeCollation) { return callback(new MongoError('server/primary/mongos does not support collation')); } // Execute the bulk bulk.execute(writeCon, finalOptions, (err, r) => { // We have connection level error if (!r && err) { return callback(err, null); } r.insertedCount = r.nInserted; r.matchedCount = r.nMatched; r.modifiedCount = r.nModified || 0; r.deletedCount = r.nRemoved; r.upsertedCount = r.getUpsertedIds().length; r.upsertedIds = {}; r.insertedIds = {}; // Update the n r.n = r.insertedCount; // Inserted documents const inserted = r.getInsertedIds(); // Map inserted ids for (let i = 0; i < inserted.length; i++) { r.insertedIds[inserted[i].index] = inserted[i]._id; } // Upserted documents const upserted = r.getUpsertedIds(); // Map upserted ids for (let i = 0; i < upserted.length; i++) { r.upsertedIds[upserted[i].index] = upserted[i]._id; } // Return the results callback(null, r); }); } // Check the update operation to ensure it has atomic operators. function checkForAtomicOperators(update) { const keys = Object.keys(update); // same errors as the server would give for update doc lacking atomic operators if (keys.length === 0) { return toError('The update operation document must contain at least one atomic operator.'); } if (keys[0][0] !== '$') { return toError('the update operation document must contain atomic operators.'); } } /** * Count the number of documents in the collection that match the query. * * @method * @param {Collection} a Collection instance. * @param {object} query The query for the count. * @param {object} [options] Optional settings. See Collection.prototype.count for a list of options. * @param {Collection~countCallback} [callback] The command result callback */ function count(coll, query, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = Object.assign({}, options); const skip = options.skip; const limit = options.limit; const hint = options.hint; const maxTimeMS = options.maxTimeMS; query = query || {}; // Final query const cmd = { count: coll.s.name, query: query }; // Add limit, skip and maxTimeMS if defined if (typeof skip === 'number') cmd.skip = skip; if (typeof limit === 'number') cmd.limit = limit; if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS; if (hint) cmd.hint = hint; // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Do we have a readConcern specified decorateWithReadConcern(cmd, coll, options); // Have we specified collation decorateWithCollation(cmd, coll, options); executeCommand(coll.s.db, cmd, options, (err, result) => { if (err) return handleCallback(callback, err); handleCallback(callback, null, result.n); }); } function countDocuments(coll, query, options, callback) { const skip = options.skip; const limit = options.limit; options = Object.assign({}, options); const pipeline = [{ $match: query }]; // Add skip and limit if defined if (typeof skip === 'number') { pipeline.push({ $skip: skip }); } if (typeof limit === 'number') { pipeline.push({ $limit: limit }); } pipeline.push({ $group: { _id: null, n: { $sum: 1 } } }); delete options.limit; delete options.skip; coll.aggregate(pipeline, options, (err, result) => { if (err) return handleCallback(callback, err); result.toArray((err, docs) => { if (err) return handleCallback(err); handleCallback(callback, null, docs.length ? docs[0].n : 0); }); }); } /** * Create an index on the db and collection. * * @method * @param {Collection} a Collection instance. * @param {(string|object)} fieldOrSpec Defines the index. * @param {object} [options] Optional settings. See Collection.prototype.createIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function createIndex(coll, fieldOrSpec, options, callback) { createIndexDb(coll.s.db, coll.s.name, fieldOrSpec, options, callback); } /** * Create multiple indexes in the collection. This method is only supported for * MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported * error. Index specifications are defined at http://docs.mongodb.org/manual/reference/command/createIndexes/. * * @method * @param {Collection} a Collection instance. * @param {array} indexSpecs An array of index specifications to be created * @param {Object} [options] Optional settings. See Collection.prototype.createIndexes for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function createIndexes(coll, indexSpecs, options, callback) { const capabilities = coll.s.topology.capabilities(); // Ensure we generate the correct name if the parameter is not set for (let i = 0; i < indexSpecs.length; i++) { if (indexSpecs[i].name == null) { const keys = []; // Did the user pass in a collation, check if our write server supports it if (indexSpecs[i].collation && capabilities && !capabilities.commandsTakeCollation) { return callback(new MongoError('server/primary/mongos does not support collation')); } for (let name in indexSpecs[i].key) { keys.push(`${name}_${indexSpecs[i].key[name]}`); } // Set the name indexSpecs[i].name = keys.join('_'); } } options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY }); // Execute the index executeCommand( coll.s.db, { createIndexes: coll.s.name, indexes: indexSpecs }, options, callback ); } function deleteCallback(err, r, callback) { if (callback == null) return; if (err && callback) return callback(err); if (r == null) return callback(null, { result: { ok: 1 } }); r.deletedCount = r.result.n; if (callback) callback(null, r); } /** * Delete multiple documents from the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the documents to remove * @param {object} [options] Optional settings. See Collection.prototype.deleteMany for a list of options. * @param {Collection~deleteWriteOpCallback} [callback] The command result callback */ function deleteMany(coll, filter, options, callback) { options.single = false; removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback)); } /** * Delete a single document from the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the document to remove * @param {object} [options] Optional settings. See Collection.prototype.deleteOne for a list of options. * @param {Collection~deleteWriteOpCallback} [callback] The command result callback */ function deleteOne(coll, filter, options, callback) { options.single = true; removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback)); } /** * Return a list of distinct values for the given key across a collection. * * @method * @param {Collection} a Collection instance. * @param {string} key Field of the document to find distinct values for. * @param {object} query The query for filtering the set of documents to which we apply the distinct filter. * @param {object} [options] Optional settings. See Collection.prototype.distinct for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function distinct(coll, key, query, options, callback) { // maxTimeMS option const maxTimeMS = options.maxTimeMS; // Distinct command const cmd = { distinct: coll.s.name, key: key, query: query }; options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Add maxTimeMS if defined if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS; // Do we have a readConcern specified decorateWithReadConcern(cmd, coll, options); // Have we specified collation decorateWithCollation(cmd, coll, options); // Execute the command executeCommand(coll.s.db, cmd, options, (err, result) => { if (err) return handleCallback(callback, err); handleCallback(callback, null, result.values); }); } /** * Drop an index from this collection. * * @method * @param {Collection} a Collection instance. * @param {string} indexName Name of the index to drop. * @param {object} [options] Optional settings. See Collection.prototype.dropIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function dropIndex(coll, indexName, options, callback) { // Delete index command const cmd = { dropIndexes: coll.s.name, index: indexName }; // Decorate command with writeConcern if supported applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options); // Execute command executeCommand(coll.s.db, cmd, options, (err, result) => { if (typeof callback !== 'function') return; if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result); }); } /** * Drop all indexes from this collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.dropIndexes for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function dropIndexes(coll, options, callback) { dropIndex(coll, '*', options, err => { if (err) return handleCallback(callback, err, false); handleCallback(callback, null, true); }); } /** * Ensure that an index exists. If the index does not exist, this function creates it. * * @method * @param {Collection} a Collection instance. * @param {(string|object)} fieldOrSpec Defines the index. * @param {object} [options] Optional settings. See Collection.prototype.ensureIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function ensureIndex(coll, fieldOrSpec, options, callback) { ensureIndexDb(coll.s.db, coll.s.name, fieldOrSpec, options, callback); } /** * Find and update a document. * * @method * @param {Collection} a Collection instance. * @param {object} query Query object to locate the object to modify. * @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate. * @param {object} doc The fields/vals to be updated. * @param {object} [options] Optional settings. See Collection.prototype.findAndModify for a list of options. * @param {Collection~findAndModifyCallback} [callback] The command result callback * @deprecated use findOneAndUpdate, findOneAndReplace or findOneAndDelete instead */ function findAndModify(coll, query, sort, doc, options, callback) { // Create findAndModify command object const queryObject = { findAndModify: coll.s.name, query: query }; sort = formattedOrderClause(sort); if (sort) { queryObject.sort = sort; } queryObject.new = options.new ? true : false; queryObject.remove = options.remove ? true : false; queryObject.upsert = options.upsert ? true : false; const projection = options.projection || options.fields; if (projection) { queryObject.fields = projection; } if (options.arrayFilters) { queryObject.arrayFilters = options.arrayFilters; delete options.arrayFilters; } if (doc && !options.remove) { queryObject.update = doc; } if (options.maxTimeMS) queryObject.maxTimeMS = options.maxTimeMS; // Either use override on the function, or go back to default on either the collection // level or db options.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions; // No check on the documents options.checkKeys = false; // Get the write concern settings const finalOptions = applyWriteConcern(options, { db: coll.s.db, collection: coll }, options); // Decorate the findAndModify command with the write Concern if (finalOptions.writeConcern) { queryObject.writeConcern = finalOptions.writeConcern; } // Have we specified bypassDocumentValidation if (finalOptions.bypassDocumentValidation === true) { queryObject.bypassDocumentValidation = finalOptions.bypassDocumentValidation; } // Have we specified collation decorateWithCollation(queryObject, coll, finalOptions); // Execute the command executeCommand(coll.s.db, queryObject, finalOptions, (err, result) => { if (err) return handleCallback(callback, err, null); return handleCallback(callback, null, result); }); } /** * Find and remove a document. * * @method * @param {Collection} a Collection instance. * @param {object} query Query object to locate the object to modify. * @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate. * @param {object} [options] Optional settings. See Collection.prototype.findAndRemove for a list of options. * @param {Collection~resultCallback} [callback] The command result callback * @deprecated use findOneAndDelete instead */ function findAndRemove(coll, query, sort, options, callback) { // Add the remove option options.remove = true; // Execute the callback findAndModify(coll, query, sort, null, options, callback); } /** * Fetch the first document that matches the query. * * @method * @param {Collection} a Collection instance. * @param {object} query Query for find Operation * @param {object} [options] Optional settings. See Collection.prototype.findOne for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function findOne(coll, query, options, callback) { const cursor = coll .find(query, options) .limit(-1) .batchSize(1); // Return the item cursor.next((err, item) => { if (err != null) return handleCallback(callback, toError(err), null); handleCallback(callback, null, item); }); } /** * Find a document and delete it in one atomic operation. This requires a write lock for the duration of the operation. * * @method * @param {Collection} a Collection instance. * @param {object} filter Document selection filter. * @param {object} [options] Optional settings. See Collection.prototype.findOneAndDelete for a list of options. * @param {Collection~findAndModifyCallback} [callback] The collection result callback */ function findOneAndDelete(coll, filter, options, callback) { // Final options const finalOptions = Object.assign({}, options); finalOptions.fields = options.projection; finalOptions.remove = true; // Execute find and Modify findAndModify(coll, filter, options.sort, null, finalOptions, callback); } /** * Find a document and replace it in one atomic operation. This requires a write lock for the duration of the operation. * * @method * @param {Collection} a Collection instance. * @param {object} filter Document selection filter. * @param {object} replacement Document replacing the matching document. * @param {object} [options] Optional settings. See Collection.prototype.findOneAndReplace for a list of options. * @param {Collection~findAndModifyCallback} [callback] The collection result callback */ function findOneAndReplace(coll, filter, replacement, options, callback) { // Final options const finalOptions = Object.assign({}, options); finalOptions.fields = options.projection; finalOptions.update = true; finalOptions.new = options.returnOriginal !== void 0 ? !options.returnOriginal : false; finalOptions.upsert = options.upsert !== void 0 ? !!options.upsert : false; // Execute findAndModify findAndModify(coll, filter, options.sort, replacement, finalOptions, callback); } /** * Find a document and update it in one atomic operation. This requires a write lock for the duration of the operation. * * @method * @param {Collection} a Collection instance. * @param {object} filter Document selection filter. * @param {object} update Update operations to be performed on the document * @param {object} [options] Optional settings. See Collection.prototype.findOneAndUpdate for a list of options. * @param {Collection~findAndModifyCallback} [callback] The collection result callback */ function findOneAndUpdate(coll, filter, update, options, callback) { // Final options const finalOptions = Object.assign({}, options); finalOptions.fields = options.projection; finalOptions.update = true; finalOptions.new = typeof options.returnOriginal === 'boolean' ? !options.returnOriginal : false; finalOptions.upsert = typeof options.upsert === 'boolean' ? options.upsert : false; // Execute findAndModify findAndModify(coll, filter, options.sort, update, finalOptions, callback); } /** * Execute a geo search using a geo haystack index on a collection. * * @method * @param {Collection} a Collection instance. * @param {number} x Point to search on the x axis, ensure the indexes are ordered in the same order. * @param {number} y Point to search on the y axis, ensure the indexes are ordered in the same order. * @param {object} [options] Optional settings. See Collection.prototype.geoHaystackSearch for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function geoHaystackSearch(coll, x, y, options, callback) { // Build command object let commandObject = { geoSearch: coll.s.name, near: [x, y] }; // Remove read preference from hash if it exists commandObject = decorateCommand(commandObject, options, { readPreference: true, session: true }); options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Do we have a readConcern specified decorateWithReadConcern(commandObject, coll, options); // Execute the command executeCommand(coll.s.db, commandObject, options, (err, res) => { if (err) return handleCallback(callback, err); if (res.err || res.errmsg) handleCallback(callback, toError(res)); // should we only be returning res.results here? Not sure if the user // should see the other return information handleCallback(callback, null, res); }); } /** * Run a group command across a collection. * * @method * @param {Collection} a Collection instance. * @param {(object|array|function|code)} keys An object, array or function expressing the keys to group by. * @param {object} condition An optional condition that must be true for a row to be considered. * @param {object} initial Initial value of the aggregation counter object. * @param {(function|Code)} reduce The reduce function aggregates (reduces) the objects iterated * @param {(function|Code)} finalize An optional function to be run on each item in the result set just before the item is returned. * @param {boolean} command Specify if you wish to run using the internal group command or using eval, default is true. * @param {object} [options] Optional settings. See Collection.prototype.group for a list of options. * @param {Collection~resultCallback} [callback] The command result callback * @deprecated MongoDB 3.6 or higher will no longer support the group command. We recommend rewriting using the aggregation framework. */ function group(coll, keys, condition, initial, reduce, finalize, command, options, callback) { // Execute using the command if (command) { const reduceFunction = reduce && reduce._bsontype === 'Code' ? reduce : new Code(reduce); const selector = { group: { ns: coll.s.name, $reduce: reduceFunction, cond: condition, initial: initial, out: 'inline' } }; // if finalize is defined if (finalize != null) selector.group['finalize'] = finalize; // Set up group selector if ('function' === typeof keys || (keys && keys._bsontype === 'Code')) { selector.group.$keyf = keys && keys._bsontype === 'Code' ? keys : new Code(keys); } else { const hash = {}; keys.forEach(key => { hash[key] = 1; }); selector.group.key = hash; } options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Do we have a readConcern specified decorateWithReadConcern(selector, coll, options); // Have we specified collation decorateWithCollation(selector, coll, options); // Execute command executeCommand(coll.s.db, selector, options, (err, result) => { if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result.retval); }); } else { // Create execution scope const scope = reduce != null && reduce._bsontype === 'Code' ? reduce.scope : {}; scope.ns = coll.s.name; scope.keys = keys; scope.condition = condition; scope.initial = initial; // Pass in the function text to execute within mongodb. const groupfn = groupFunction.replace(/ reduce;/, reduce.toString() + ';'); evaluate(coll.s.db, new Code(groupfn, scope), null, options, (err, results) => { if (err) return handleCallback(callback, err, null); handleCallback(callback, null, results.result || results); }); } } /** * Retrieve all the indexes on the collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.indexes for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function indexes(coll, options, callback) { options = Object.assign({}, { full: true }, options); indexInformationDb(coll.s.db, coll.s.name, options, callback); } /** * Check if one or more indexes exist on the collection. This fails on the first index that doesn't exist. * * @method * @param {Collection} a Collection instance. * @param {(string|array)} indexes One or more index names to check. * @param {Object} [options] Optional settings. See Collection.prototype.indexExists for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function indexExists(coll, indexes, options, callback) { indexInformation(coll, options, (err, indexInformation) => { // If we have an error return if (err != null) return handleCallback(callback, err, null); // Let's check for the index names if (!Array.isArray(indexes)) return handleCallback(callback, null, indexInformation[indexes] != null); // Check in list of indexes for (let i = 0; i < indexes.length; i++) { if (indexInformation[indexes[i]] == null) { return handleCallback(callback, null, false); } } // All keys found return true return handleCallback(callback, null, true); }); } /** * Retrieve this collection's index info. * * @method * @param {Collection} a Collection instance. * @param {object} [options] Optional settings. See Collection.prototype.indexInformation for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function indexInformation(coll, options, callback) { indexInformationDb(coll.s.db, coll.s.name, options, callback); } function insertDocuments(coll, docs, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; // Ensure we are operating on an array op docs docs = Array.isArray(docs) ? docs : [docs]; // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // If keep going set unordered if (finalOptions.keepGoing === true) finalOptions.ordered = false; finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions; docs = prepareDocs(coll, docs, options); // File inserts coll.s.topology.insert(coll.s.namespace, docs, finalOptions, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err); if (result == null) return handleCallback(callback, null, null); if (result.result.code) return handleCallback(callback, toError(result.result)); if (result.result.writeErrors) return handleCallback(callback, toError(result.result.writeErrors[0])); // Add docs to the list result.ops = docs; // Return the results handleCallback(callback, null, result); }); } /** * Insert a single document into the collection. See Collection.prototype.insertOne for more information. * * @method * @param {Collection} a Collection instance. * @param {object} doc Document to insert. * @param {object} [options] Optional settings. See Collection.prototype.insertOne for a list of options. * @param {Collection~insertOneWriteOpCallback} [callback] The command result callback */ function insertOne(coll, doc, options, callback) { if (Array.isArray(doc)) { return callback( MongoError.create({ message: 'doc parameter must be an object', driver: true }) ); } insertDocuments(coll, [doc], options, (err, r) => { if (callback == null) return; if (err && callback) return callback(err); // Workaround for pre 2.6 servers if (r == null) return callback(null, { result: { ok: 1 } }); // Add values to top level to ensure crud spec compatibility r.insertedCount = r.result.n; r.insertedId = doc._id; if (callback) callback(null, r); }); } /** * Determine whether the collection is a capped collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.isCapped for a list of options. * @param {Collection~resultCallback} [callback] The results callback */ function isCapped(coll, options, callback) { optionsOp(coll, options, (err, document) => { if (err) return handleCallback(callback, err); handleCallback(callback, null, document && document.capped); }); } /** * Run Map Reduce across a collection. Be aware that the inline option for out will return an array of results not a collection. * * @method * @param {Collection} a Collection instance. * @param {(function|string)} map The mapping function. * @param {(function|string)} reduce The reduce function. * @param {object} [options] Optional settings. See Collection.prototype.mapReduce for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function mapReduce(coll, map, reduce, options, callback) { const mapCommandHash = { mapreduce: coll.s.name, map: map, reduce: reduce }; // Exclusion list const exclusionList = ['readPreference', 'session', 'bypassDocumentValidation']; // Add any other options passed in for (let n in options) { if ('scope' === n) { mapCommandHash[n] = processScope(options[n]); } else { // Only include if not in exclusion list if (exclusionList.indexOf(n) === -1) { mapCommandHash[n] = options[n]; } } } options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // If we have a read preference and inline is not set as output fail hard if ( options.readPreference !== false && options.readPreference !== 'primary' && options['out'] && (options['out'].inline !== 1 && options['out'] !== 'inline') ) { // Force readPreference to primary options.readPreference = 'primary'; // Decorate command with writeConcern if supported applyWriteConcern(mapCommandHash, { db: coll.s.db, collection: coll }, options); } else { decorateWithReadConcern(mapCommandHash, coll, options); } // Is bypassDocumentValidation specified if (options.bypassDocumentValidation === true) { mapCommandHash.bypassDocumentValidation = options.bypassDocumentValidation; } // Have we specified collation decorateWithCollation(mapCommandHash, coll, options); // Execute command executeCommand(coll.s.db, mapCommandHash, options, (err, result) => { if (err) return handleCallback(callback, err); // Check if we have an error if (1 !== result.ok || result.err || result.errmsg) { return handleCallback(callback, toError(result)); } // Create statistics value const stats = {}; if (result.timeMillis) stats['processtime'] = result.timeMillis; if (result.counts) stats['counts'] = result.counts; if (result.timing) stats['timing'] = result.timing; // invoked with inline? if (result.results) { // If we wish for no verbosity if (options['verbose'] == null || !options['verbose']) { return handleCallback(callback, null, result.results); } return handleCallback(callback, null, { results: result.results, stats: stats }); } // The returned collection let collection = null; // If we have an object it's a different db if (result.result != null && typeof result.result === 'object') { const doc = result.result; // Return a collection from another db const Db = require('../db'); collection = new Db(doc.db, coll.s.db.s.topology, coll.s.db.s.options).collection( doc.collection ); } else { // Create a collection object that wraps the result collection collection = coll.s.db.collection(result.result); } // If we wish for no verbosity if (options['verbose'] == null || !options['verbose']) { return handleCallback(callback, err, collection); } // Return stats as third set of values handleCallback(callback, err, { collection: collection, stats: stats }); }); } /** * Return the options of the collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.options for a list of options. * @param {Collection~resultCallback} [callback] The results callback */ function optionsOp(coll, opts, callback) { coll.s.db.listCollections({ name: coll.s.name }, opts).toArray((err, collections) => { if (err) return handleCallback(callback, err); if (collections.length === 0) { return handleCallback( callback, MongoError.create({ message: `collection ${coll.s.namespace} not found`, driver: true }) ); } handleCallback(callback, err, collections[0].options || null); }); } /** * Return N parallel cursors for a collection to allow parallel reading of the entire collection. There are * no ordering guarantees for returned results. * * @method * @param {Collection} a Collection instance. * @param {object} [options] Optional settings. See Collection.prototype.parallelCollectionScan for a list of options. * @param {Collection~parallelCollectionScanCallback} [callback] The command result callback */ function parallelCollectionScan(coll, options, callback) { // Create command object const commandObject = { parallelCollectionScan: coll.s.name, numCursors: options.numCursors }; // Do we have a readConcern specified decorateWithReadConcern(commandObject, coll, options); // Store the raw value const raw = options.raw; delete options['raw']; // Execute the command executeCommand(coll.s.db, commandObject, options, (err, result) => { if (err) return handleCallback(callback, err, null); if (result == null) return handleCallback( callback, new Error('no result returned for parallelCollectionScan'), null ); options = Object.assign({ explicitlyIgnoreSession: true }, options); const cursors = []; // Add the raw back to the option if (raw) options.raw = raw; // Create command cursors for each item for (let i = 0; i < result.cursors.length; i++) { const rawId = result.cursors[i].cursor.id; // Convert cursorId to Long if needed const cursorId = typeof rawId === 'number' ? Long.fromNumber(rawId) : rawId; // Add a command cursor cursors.push(coll.s.topology.cursor(coll.s.namespace, cursorId, options)); } handleCallback(callback, null, cursors); }); } // modifies documents before being inserted or updated function prepareDocs(coll, docs, options) { const forceServerObjectId = typeof options.forceServerObjectId === 'boolean' ? options.forceServerObjectId : coll.s.db.options.forceServerObjectId; // no need to modify the docs if server sets the ObjectId if (forceServerObjectId === true) { return docs; } return docs.map(doc => { if (forceServerObjectId !== true && doc._id == null) { doc._id = coll.s.pkFactory.createPk(); } return doc; }); } /** * Functions that are passed as scope args must * be converted to Code instances. * @ignore */ function processScope(scope) { if (!isObject(scope) || scope._bsontype === 'ObjectID') { return scope; } const keys = Object.keys(scope); let key; const new_scope = {}; for (let i = keys.length - 1; i >= 0; i--) { key = keys[i]; if ('function' === typeof scope[key]) { new_scope[key] = new Code(String(scope[key])); } else { new_scope[key] = processScope(scope[key]); } } return new_scope; } /** * Reindex all indexes on the collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.reIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function reIndex(coll, options, callback) { // Reindex const cmd = { reIndex: coll.s.name }; // Execute the command executeCommand(coll.s.db, cmd, options, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result.ok ? true : false); }); } function removeDocuments(coll, selector, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}); } else if (typeof selector === 'function') { callback = selector; options = {}; selector = {}; } // Create an empty options object if the provided one is null options = options || {}; // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // If selector is null set empty if (selector == null) selector = {}; // Build the op const op = { q: selector, limit: 0 }; if (options.single) { op.limit = 1; } else if (finalOptions.retryWrites) { finalOptions.retryWrites = false; } // Have we specified collation decorateWithCollation(finalOptions, coll, options); // Execute the remove coll.s.topology.remove(coll.s.namespace, [op], finalOptions, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err, null); if (result == null) return handleCallback(callback, null, null); if (result.result.code) return handleCallback(callback, toError(result.result)); if (result.result.writeErrors) return handleCallback(callback, toError(result.result.writeErrors[0])); // Return the results handleCallback(callback, null, result); }); } /** * Rename the collection. * * @method * @param {Collection} a Collection instance. * @param {string} newName New name of of the collection. * @param {object} [options] Optional settings. See Collection.prototype.rename for a list of options. * @param {Collection~collectionResultCallback} [callback] The results callback */ function rename(coll, newName, options, callback) { const Collection = require('../collection'); // Check the collection name checkCollectionName(newName); // Build the command const renameCollection = `${coll.s.dbName}.${coll.s.name}`; const toCollection = `${coll.s.dbName}.${newName}`; const dropTarget = typeof options.dropTarget === 'boolean' ? options.dropTarget : false; const cmd = { renameCollection: renameCollection, to: toCollection, dropTarget: dropTarget }; // Decorate command with writeConcern if supported applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options); // Execute against admin executeDbAdminCommand(coll.s.db.admin().s.db, cmd, options, (err, doc) => { if (err) return handleCallback(callback, err, null); // We have an error if (doc.errmsg) return handleCallback(callback, toError(doc), null); try { return handleCallback( callback, null, new Collection( coll.s.db, coll.s.topology, coll.s.dbName, newName, coll.s.pkFactory, coll.s.options ) ); } catch (err) { return handleCallback(callback, toError(err), null); } }); } /** * Replace a document in the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the document to update * @param {object} doc The Document that replaces the matching document * @param {object} [options] Optional settings. See Collection.prototype.replaceOne for a list of options. * @param {Collection~updateWriteOpCallback} [callback] The command result callback */ function replaceOne(coll, filter, doc, options, callback) { // Set single document update options.multi = false; // Execute update updateDocuments(coll, filter, doc, options, (err, r) => { if (callback == null) return; if (err && callback) return callback(err); if (r == null) return callback(null, { result: { ok: 1 } }); r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n; r.upsertedId = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id` : null; r.upsertedCount = Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0; r.matchedCount = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n; r.ops = [doc]; if (callback) callback(null, r); }); } /** * Save a document. * * @method * @param {Collection} a Collection instance. * @param {object} doc Document to save * @param {object} [options] Optional settings. See Collection.prototype.save for a list of options. * @param {Collection~writeOpCallback} [callback] The command result callback * @deprecated use insertOne, insertMany, updateOne or updateMany */ function save(coll, doc, options, callback) { // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // Establish if we need to perform an insert or update if (doc._id != null) { finalOptions.upsert = true; return updateDocuments(coll, { _id: doc._id }, doc, finalOptions, callback); } // Insert the document insertDocuments(coll, [doc], finalOptions, (err, result) => { if (callback == null) return; if (doc == null) return handleCallback(callback, null, null); if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result); }); } /** * Get all the collection statistics. * * @method * @param {Collection} a Collection instance. * @param {object} [options] Optional settings. See Collection.prototype.stats for a list of options. * @param {Collection~resultCallback} [callback] The collection result callback */ function stats(coll, options, callback) { // Build command object const commandObject = { collStats: coll.s.name }; // Check if we have the scale value if (options['scale'] != null) commandObject['scale'] = options['scale']; options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Execute the command executeCommand(coll.s.db, commandObject, options, callback); } function updateCallback(err, r, callback) { if (callback == null) return; if (err) return callback(err); if (r == null) return callback(null, { result: { ok: 1 } }); r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n; r.upsertedId = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id` : null; r.upsertedCount = Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0; r.matchedCount = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n; callback(null, r); } function updateDocuments(coll, selector, document, options, callback) { if ('function' === typeof options) (callback = options), (options = null); if (options == null) options = {}; if (!('function' === typeof callback)) callback = null; // If we are not providing a selector or document throw if (selector == null || typeof selector !== 'object') return callback(toError('selector must be a valid JavaScript object')); if (document == null || typeof document !== 'object') return callback(toError('document must be a valid JavaScript object')); // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // Do we return the actual result document // Either use override on the function, or go back to default on either the collection // level or db finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions; // Execute the operation const op = { q: selector, u: document }; op.upsert = options.upsert !== void 0 ? !!options.upsert : false; op.multi = options.multi !== void 0 ? !!options.multi : false; if (finalOptions.arrayFilters) { op.arrayFilters = finalOptions.arrayFilters; delete finalOptions.arrayFilters; } if (finalOptions.retryWrites && op.multi) { finalOptions.retryWrites = false; } // Have we specified collation decorateWithCollation(finalOptions, coll, options); // Update options coll.s.topology.update(coll.s.namespace, [op], finalOptions, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err, null); if (result == null) return handleCallback(callback, null, null); if (result.result.code) return handleCallback(callback, toError(result.result)); if (result.result.writeErrors) return handleCallback(callback, toError(result.result.writeErrors[0])); // Return the results handleCallback(callback, null, result); }); } /** * Update multiple documents in the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the documents to update * @param {object} update The update operations to be applied to the document * @param {object} [options] Optional settings. See Collection.prototype.updateMany for a list of options. * @param {Collection~updateWriteOpCallback} [callback] The command result callback */ function updateMany(coll, filter, update, options, callback) { // Set single document update options.multi = true; // Execute update updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback)); } /** * Update a single document in the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the document to update * @param {object} update The update operations to be applied to the document * @param {object} [options] Optional settings. See Collection.prototype.updateOne for a list of options. * @param {Collection~updateWriteOpCallback} [callback] The command result callback */ function updateOne(coll, filter, update, options, callback) { // Set single document update options.multi = false; // Execute update updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback)); } module.exports = { bulkWrite, checkForAtomicOperators, count, countDocuments, createIndex, createIndexes, deleteMany, deleteOne, distinct, dropIndex, dropIndexes, ensureIndex, findAndModify, findAndRemove, findOne, findOneAndDelete, findOneAndReplace, findOneAndUpdate, geoHaystackSearch, group, indexes, indexExists, indexInformation, insertOne, isCapped, mapReduce, optionsOp, parallelCollectionScan, prepareDocs, reIndex, removeDocuments, rename, replaceOne, save, stats, updateDocuments, updateMany, updateOne };
1
14,584
If `docs` is an empty array there will be an error
mongodb-node-mongodb-native
js
@@ -150,7 +150,8 @@ func (u *staticUpstream) From() string { func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) { if !strings.HasPrefix(host, "http") && - !strings.HasPrefix(host, "unix:") { + !strings.HasPrefix(host, "unix:") && + !strings.HasPrefix(host, "quic:") { host = "http://" + host } uh := &UpstreamHost{
1
package proxy import ( "bytes" "fmt" "io" "io/ioutil" "net" "net/http" "net/url" "path" "strconv" "strings" "sync" "sync/atomic" "time" "crypto/tls" "github.com/mholt/caddy/caddyfile" "github.com/mholt/caddy/caddyhttp/httpserver" ) var ( supportedPolicies = make(map[string]func(string) Policy) ) type staticUpstream struct { from string upstreamHeaders http.Header downstreamHeaders http.Header stop chan struct{} // Signals running goroutines to stop. wg sync.WaitGroup // Used to wait for running goroutines to stop. Hosts HostPool Policy Policy KeepAlive int FailTimeout time.Duration TryDuration time.Duration TryInterval time.Duration MaxConns int64 HealthCheck struct { Client http.Client Path string Interval time.Duration Timeout time.Duration Host string Port string ContentString string } WithoutPathPrefix string IgnoredSubPaths []string insecureSkipVerify bool MaxFails int32 } // NewStaticUpstreams parses the configuration input and sets up // static upstreams for the proxy middleware. The host string parameter, // if not empty, is used for setting the upstream Host header for the // health checks if the upstream header config requires it. func NewStaticUpstreams(c caddyfile.Dispenser, host string) ([]Upstream, error) { var upstreams []Upstream for c.Next() { upstream := &staticUpstream{ from: "", stop: make(chan struct{}), upstreamHeaders: make(http.Header), downstreamHeaders: make(http.Header), Hosts: nil, Policy: &Random{}, MaxFails: 1, TryInterval: 250 * time.Millisecond, MaxConns: 0, KeepAlive: http.DefaultMaxIdleConnsPerHost, } if !c.Args(&upstream.from) { return upstreams, c.ArgErr() } var to []string for _, t := range c.RemainingArgs() { parsed, err := parseUpstream(t) if err != nil { return upstreams, err } to = append(to, parsed...) } for c.NextBlock() { switch c.Val() { case "upstream": if !c.NextArg() { return upstreams, c.ArgErr() } parsed, err := parseUpstream(c.Val()) if err != nil { return upstreams, err } to = append(to, parsed...) default: if err := parseBlock(&c, upstream); err != nil { return upstreams, err } } } if len(to) == 0 { return upstreams, c.ArgErr() } upstream.Hosts = make([]*UpstreamHost, len(to)) for i, host := range to { uh, err := upstream.NewHost(host) if err != nil { return upstreams, err } upstream.Hosts[i] = uh } if upstream.HealthCheck.Path != "" { upstream.HealthCheck.Client = http.Client{ Timeout: upstream.HealthCheck.Timeout, Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: upstream.insecureSkipVerify}, }, } // set up health check upstream host if we have one if host != "" { hostHeader := upstream.upstreamHeaders.Get("Host") if strings.Contains(hostHeader, "{host}") { upstream.HealthCheck.Host = strings.Replace(hostHeader, "{host}", host, -1) } } upstream.wg.Add(1) go func() { defer upstream.wg.Done() upstream.HealthCheckWorker(upstream.stop) }() } upstreams = append(upstreams, upstream) } return upstreams, nil } func (u *staticUpstream) From() string { return u.from } func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) { if !strings.HasPrefix(host, "http") && !strings.HasPrefix(host, "unix:") { host = "http://" + host } uh := &UpstreamHost{ Name: host, Conns: 0, Fails: 0, FailTimeout: u.FailTimeout, Unhealthy: 0, UpstreamHeaders: u.upstreamHeaders, DownstreamHeaders: u.downstreamHeaders, CheckDown: func(u *staticUpstream) UpstreamHostDownFunc { return func(uh *UpstreamHost) bool { if atomic.LoadInt32(&uh.Unhealthy) != 0 { return true } if atomic.LoadInt32(&uh.Fails) >= u.MaxFails { return true } return false } }(u), WithoutPathPrefix: u.WithoutPathPrefix, MaxConns: u.MaxConns, } baseURL, err := url.Parse(uh.Name) if err != nil { return nil, err } uh.ReverseProxy = NewSingleHostReverseProxy(baseURL, uh.WithoutPathPrefix, u.KeepAlive) if u.insecureSkipVerify { uh.ReverseProxy.UseInsecureTransport() } return uh, nil } func parseUpstream(u string) ([]string, error) { if !strings.HasPrefix(u, "unix:") { colonIdx := strings.LastIndex(u, ":") protoIdx := strings.Index(u, "://") if colonIdx != -1 && colonIdx != protoIdx { us := u[:colonIdx] ue := "" portsEnd := len(u) if nextSlash := strings.Index(u[colonIdx:], "/"); nextSlash != -1 { portsEnd = colonIdx + nextSlash ue = u[portsEnd:] } ports := u[len(us)+1 : portsEnd] if separators := strings.Count(ports, "-"); separators == 1 { portsStr := strings.Split(ports, "-") pIni, err := strconv.Atoi(portsStr[0]) if err != nil { return nil, err } pEnd, err := strconv.Atoi(portsStr[1]) if err != nil { return nil, err } if pEnd <= pIni { return nil, fmt.Errorf("port range [%s] is invalid", ports) } hosts := []string{} for p := pIni; p <= pEnd; p++ { hosts = append(hosts, fmt.Sprintf("%s:%d%s", us, p, ue)) } return hosts, nil } } } return []string{u}, nil } func parseBlock(c *caddyfile.Dispenser, u *staticUpstream) error { switch c.Val() { case "policy": if !c.NextArg() { return c.ArgErr() } policyCreateFunc, ok := supportedPolicies[c.Val()] if !ok { return c.ArgErr() } arg := "" if c.NextArg() { arg = c.Val() } u.Policy = policyCreateFunc(arg) case "fail_timeout": if !c.NextArg() { return c.ArgErr() } dur, err := time.ParseDuration(c.Val()) if err != nil { return err } u.FailTimeout = dur case "max_fails": if !c.NextArg() { return c.ArgErr() } n, err := strconv.Atoi(c.Val()) if err != nil { return err } if n < 1 { return c.Err("max_fails must be at least 1") } u.MaxFails = int32(n) case "try_duration": if !c.NextArg() { return c.ArgErr() } dur, err := time.ParseDuration(c.Val()) if err != nil { return err } u.TryDuration = dur case "try_interval": if !c.NextArg() { return c.ArgErr() } interval, err := time.ParseDuration(c.Val()) if err != nil { return err } u.TryInterval = interval case "max_conns": if !c.NextArg() { return c.ArgErr() } n, err := strconv.ParseInt(c.Val(), 10, 64) if err != nil { return err } u.MaxConns = n case "health_check": if !c.NextArg() { return c.ArgErr() } u.HealthCheck.Path = c.Val() // Set defaults if u.HealthCheck.Interval == 0 { u.HealthCheck.Interval = 30 * time.Second } if u.HealthCheck.Timeout == 0 { u.HealthCheck.Timeout = 60 * time.Second } case "health_check_interval": var interval string if !c.Args(&interval) { return c.ArgErr() } dur, err := time.ParseDuration(interval) if err != nil { return err } u.HealthCheck.Interval = dur case "health_check_timeout": var interval string if !c.Args(&interval) { return c.ArgErr() } dur, err := time.ParseDuration(interval) if err != nil { return err } u.HealthCheck.Timeout = dur case "health_check_port": if !c.NextArg() { return c.ArgErr() } port := c.Val() n, err := strconv.Atoi(port) if err != nil { return err } if n < 0 { return c.Errf("invalid health_check_port '%s'", port) } u.HealthCheck.Port = port case "health_check_contains": if !c.NextArg() { return c.ArgErr() } u.HealthCheck.ContentString = c.Val() case "header_upstream": var header, value string if !c.Args(&header, &value) { // When removing a header, the value can be optional. if !strings.HasPrefix(header, "-") { return c.ArgErr() } } u.upstreamHeaders.Add(header, value) case "header_downstream": var header, value string if !c.Args(&header, &value) { // When removing a header, the value can be optional. if !strings.HasPrefix(header, "-") { return c.ArgErr() } } u.downstreamHeaders.Add(header, value) case "transparent": u.upstreamHeaders.Add("Host", "{host}") u.upstreamHeaders.Add("X-Real-IP", "{remote}") u.upstreamHeaders.Add("X-Forwarded-For", "{remote}") u.upstreamHeaders.Add("X-Forwarded-Proto", "{scheme}") case "websocket": u.upstreamHeaders.Add("Connection", "{>Connection}") u.upstreamHeaders.Add("Upgrade", "{>Upgrade}") case "without": if !c.NextArg() { return c.ArgErr() } u.WithoutPathPrefix = c.Val() case "except": ignoredPaths := c.RemainingArgs() if len(ignoredPaths) == 0 { return c.ArgErr() } u.IgnoredSubPaths = ignoredPaths case "insecure_skip_verify": u.insecureSkipVerify = true case "keepalive": if !c.NextArg() { return c.ArgErr() } n, err := strconv.Atoi(c.Val()) if err != nil { return err } if n < 0 { return c.ArgErr() } u.KeepAlive = n default: return c.Errf("unknown property '%s'", c.Val()) } return nil } func (u *staticUpstream) healthCheck() { for _, host := range u.Hosts { hostURL := host.Name if u.HealthCheck.Port != "" { hostURL = replacePort(host.Name, u.HealthCheck.Port) } hostURL += u.HealthCheck.Path unhealthy := func() bool { // set up request, needed to be able to modify headers // possible errors are bad HTTP methods or un-parsable urls req, err := http.NewRequest("GET", hostURL, nil) if err != nil { return true } // set host for request going upstream if u.HealthCheck.Host != "" { req.Host = u.HealthCheck.Host } r, err := u.HealthCheck.Client.Do(req) if err != nil { return true } defer func() { io.Copy(ioutil.Discard, r.Body) r.Body.Close() }() if r.StatusCode < 200 || r.StatusCode >= 400 { return true } if u.HealthCheck.ContentString == "" { // don't check for content string return false } // TODO ReadAll will be replaced if deemed necessary // See https://github.com/mholt/caddy/pull/1691 buf, err := ioutil.ReadAll(r.Body) if err != nil { return true } if bytes.Contains(buf, []byte(u.HealthCheck.ContentString)) { return false } return true }() if unhealthy { atomic.StoreInt32(&host.Unhealthy, 1) } else { atomic.StoreInt32(&host.Unhealthy, 0) } } } func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) { ticker := time.NewTicker(u.HealthCheck.Interval) u.healthCheck() for { select { case <-ticker.C: u.healthCheck() case <-stop: ticker.Stop() return } } } func (u *staticUpstream) Select(r *http.Request) *UpstreamHost { pool := u.Hosts if len(pool) == 1 { if !pool[0].Available() { return nil } return pool[0] } allUnavailable := true for _, host := range pool { if host.Available() { allUnavailable = false break } } if allUnavailable { return nil } if u.Policy == nil { return (&Random{}).Select(pool, r) } return u.Policy.Select(pool, r) } func (u *staticUpstream) AllowedPath(requestPath string) bool { for _, ignoredSubPath := range u.IgnoredSubPaths { if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(u.From(), ignoredSubPath)) { return false } } return true } // GetTryDuration returns u.TryDuration. func (u *staticUpstream) GetTryDuration() time.Duration { return u.TryDuration } // GetTryInterval returns u.TryInterval. func (u *staticUpstream) GetTryInterval() time.Duration { return u.TryInterval } func (u *staticUpstream) GetHostCount() int { return len(u.Hosts) } // Stop sends a signal to all goroutines started by this staticUpstream to exit // and waits for them to finish before returning. func (u *staticUpstream) Stop() error { close(u.stop) u.wg.Wait() return nil } // RegisterPolicy adds a custom policy to the proxy. func RegisterPolicy(name string, policy func(string) Policy) { supportedPolicies[name] = policy } func replacePort(originalURL string, newPort string) string { parsedURL, err := url.Parse(originalURL) if err != nil { return originalURL } // handles 'localhost' and 'localhost:8080' parsedHost, _, err := net.SplitHostPort(parsedURL.Host) if err != nil { parsedHost = parsedURL.Host } parsedURL.Host = net.JoinHostPort(parsedHost, newPort) return parsedURL.String() }
1
11,210
Is it really necessary to have the user specify this, or can the reverse proxy infer QUIC from the upstream's Alt-Svc headers?
caddyserver-caddy
go
@@ -55,7 +55,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests.Http2 get { var dataset = new TheoryData<H2SpecTestCase>(); - var toSkip = new[] { "http2/5.1/8" }; + var toSkip = new string[] { /*"http2/5.1/8"*/ }; foreach (var testcase in H2SpecCommands.EnumerateTestCases()) {
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. #if NETCOREAPP2_2 using System.IO; using System.Linq; using System.Net; using System.Threading.Tasks; using Microsoft.AspNetCore.Builder; using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Server.Kestrel.Core; using Microsoft.AspNetCore.Testing; using Microsoft.AspNetCore.Testing.xunit; using Xunit; using Xunit.Abstractions; namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests.Http2 { [OSSkipCondition(OperatingSystems.MacOSX, SkipReason = "Missing SslStream ALPN support: https://github.com/dotnet/corefx/issues/30492")] [MinimumOSVersion(OperatingSystems.Windows, WindowsVersions.Win81, SkipReason = "Missing Windows ALPN support: https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation#Support")] public class H2SpecTests : TestApplicationErrorLoggerLoggedTest { [ConditionalTheory] [MemberData(nameof(H2SpecTestCases))] public async Task RunIndividualTestCase(H2SpecTestCase testCase) { var hostBuilder = TransportSelector.GetWebHostBuilder() .UseKestrel(options => { options.Listen(IPAddress.Loopback, 0, listenOptions => { listenOptions.Protocols = HttpProtocols.Http2; if (testCase.Https) { listenOptions.UseHttps(TestResources.TestCertificatePath, "testPassword"); } }); }) .ConfigureServices(AddTestLogging) .Configure(ConfigureHelloWorld); using (var host = hostBuilder.Build()) { await host.StartAsync(); H2SpecCommands.RunTest(testCase.Id, host.GetPort(), testCase.Https, Logger); } } public static TheoryData<H2SpecTestCase> H2SpecTestCases { get { var dataset = new TheoryData<H2SpecTestCase>(); var toSkip = new[] { "http2/5.1/8" }; foreach (var testcase in H2SpecCommands.EnumerateTestCases()) { string skip = null; if (toSkip.Contains(testcase.Item1)) { skip = "https://github.com/aspnet/KestrelHttpServer/issues/2154"; } dataset.Add(new H2SpecTestCase { Id = testcase.Item1, Description = testcase.Item2, Https = false, Skip = skip, }); dataset.Add(new H2SpecTestCase { Id = testcase.Item1, Description = testcase.Item2, Https = true, Skip = skip, }); } return dataset; } } public class H2SpecTestCase : IXunitSerializable { // For the serializer public H2SpecTestCase() { } public string Id { get; set; } public string Description { get; set; } public bool Https { get; set; } public string Skip { get; set; } public void Deserialize(IXunitSerializationInfo info) { Id = info.GetValue<string>(nameof(Id)); Description = info.GetValue<string>(nameof(Description)); Https = info.GetValue<bool>(nameof(Https)); Skip = info.GetValue<string>(nameof(Skip)); } public void Serialize(IXunitSerializationInfo info) { info.AddValue(nameof(Id), Id, typeof(string)); info.AddValue(nameof(Description), Description, typeof(string)); info.AddValue(nameof(Https), Https, typeof(bool)); info.AddValue(nameof(Skip), Skip, typeof(string)); } public override string ToString() { return $"{Id}, HTTPS:{Https}, {Description}"; } } private void ConfigureHelloWorld(IApplicationBuilder app) { app.Run(async context => { // Read the whole request body to check for errors. await context.Request.Body.CopyToAsync(Stream.Null); await context.Response.WriteAsync("Hello World"); }); } } } #elif NET461 // HTTP/2 is not supported #else #error TFMs need updating #endif
1
16,748
just remove the entire variable.
aspnet-KestrelHttpServer
.cs
@@ -202,7 +202,10 @@ Available options are: end define('--modulepath MODULES', "List of directories containing modules, separated by '#{File::PATH_SEPARATOR}'") do |modulepath| - @options[:modulepath] = modulepath.split(File::PATH_SEPARATOR) + # When specified from the CLI, modulepath entries are relative to pwd + @options[:modulepath] = modulepath.split(File::PATH_SEPARATOR).map do |moduledir| + File.expand_path(moduledir) + end end define('--boltdir FILEPATH', 'Specify what Boltdir to load config from (default: autodiscovered from current working dir)') do |path|
1
# frozen_string_literal: true require 'optparse' module Bolt class BoltOptionParser < OptionParser def self.examples(cmd, desc) <<-EXAMP #{desc} a Windows host via WinRM, providing for the password bolt #{cmd} -n winrm://winhost -u Administrator -p #{desc} the local machine, a Linux host via SSH, and hosts from a group specified in an inventory file bolt #{cmd} -n localhost,nixhost,node_group #{desc} Windows hosts queried from PuppetDB via WinRM as a domain user, prompting for the password bolt #{cmd} -q 'inventory[certname] { facts.os.family = "windows" }' --transport winrm -u 'domain\\Administrator' -p EXAMP end BANNER = <<-HELP Usage: bolt <subcommand> <action> [options] Available subcommands: bolt command run <command> Run a command remotely bolt file upload <src> <dest> Upload a local file bolt script run <script> Upload a local script and run it remotely bolt task show Show list of available tasks bolt task show <task> Show documentation for task bolt task run <task> [params] Run a Puppet task bolt plan show Show list of available plans bolt plan show <plan> Show details for plan bolt plan run <plan> [params] Run a Puppet task plan bolt puppetfile install Install modules from a Puppetfile into a Boltdir Run `bolt <subcommand> --help` to view specific examples. where [options] are: HELP TASK_HELP = <<-HELP Usage: bolt task <action> <task> [options] [parameters] Available actions are: show Show list of available tasks show <task> Show documentation for task run Run a Puppet task Parameters are of the form <parameter>=<value>. #{examples('task run facts', 'run facter on')} Available options are: HELP COMMAND_HELP = <<-HELP Usage: bolt command <action> <command> [options] Available actions are: run Run a command remotely #{examples('command run hostname', 'run hostname on')} Available options are: HELP SCRIPT_HELP = <<-HELP Usage: bolt script <action> <script> [[arg1] ... [argN]] [options] Available actions are: run Upload a local script and run it remotely #{examples('script run my_script.ps1 some args', 'run a script on')} Available options are: HELP PLAN_HELP = <<-HELP Usage: bolt plan <action> <plan> [options] [parameters] Available actions are: show Show list of available plans show <plan> Show details for plan run Run a Puppet task plan Parameters are of the form <parameter>=<value>. #{examples('plan run canary command=hostname', 'run the canary plan on')} Available options are: HELP FILE_HELP = <<-HELP Usage: bolt file <action> [options] Available actions are: upload <src> <dest> Upload local file <src> to <dest> on each node #{examples('file upload /tmp/source /etc/profile.d/login.sh', 'upload a file to')} Available options are: HELP PUPPETFILE_HELP = <<-HELP Usage: bolt puppetfile <action> [options] Available actions are: install Install modules from a Puppetfile into a Boltdir Install modules into the local Boltdir bolt puppetfile install Available options are: HELP # A helper mixin for OptionParser::Switch instances which will allow # us to show/hide particular switch in the help message produced by # the OptionParser#help method on demand. module SwitchHider attr_accessor :hide def summarize(*args) return self if hide super end end def initialize(options) super() @options = options @nodes = define('-n', '--nodes NODES', 'Identifies the nodes to target.', 'Enter a comma-separated list of node URIs or group names.', "Or read a node list from an input file '@<file>' or stdin '-'.", 'Example: --nodes localhost,node_group,ssh://nix.com:23,winrm://windows.puppet.com', 'URI format is [protocol://]host[:port]', "SSH is the default protocol; may be #{TRANSPORTS.keys.join(', ')}", 'For Windows nodes, specify the winrm:// protocol if it has not be configured', 'For SSH, port defaults to `22`', 'For WinRM, port defaults to `5985` or `5986` based on the --[no-]ssl setting') do |nodes| @options[:nodes] << get_arg_input(nodes) end.extend(SwitchHider) @query = define('-q', '--query QUERY', 'Query PuppetDB to determine the targets') do |query| @options[:query] = query end.extend(SwitchHider) define('--noop', 'Execute a task that supports it in noop mode') do |_| @options[:noop] = true end define('--description DESCRIPTION', 'Description to use for the job') do |description| @options[:description] = description end define('--params PARAMETERS', "Parameters to a task or plan as json, a json file '@<file>', or on stdin '-'") do |params| @options[:task_options] = parse_params(params) end separator 'Authentication:' define('-u', '--user USER', 'User to authenticate as') do |user| @options[:user] = user end define('-p', '--password [PASSWORD]', 'Password to authenticate with. Omit the value to prompt for the password.') do |password| if password.nil? STDOUT.print "Please enter your password: " @options[:password] = STDIN.noecho(&:gets).chomp STDOUT.puts else @options[:password] = password end end define('--private-key KEY', 'Private ssh key to authenticate with') do |key| @options[:'private-key'] = key end define('--[no-]host-key-check', 'Check host keys with SSH') do |host_key_check| @options[:'host-key-check'] = host_key_check end define('--[no-]ssl', 'Use SSL with WinRM') do |ssl| @options[:ssl] = ssl end define('--[no-]ssl-verify', 'Verify remote host SSL certificate with WinRM') do |ssl_verify| @options[:'ssl-verify'] = ssl_verify end separator 'Escalation:' define('--run-as USER', 'User to run as using privilege escalation') do |user| @options[:'run-as'] = user end define('--sudo-password [PASSWORD]', 'Password for privilege escalation. Omit the value to prompt for the password.') do |password| if password.nil? STDOUT.print "Please enter your privilege escalation password: " @options[:'sudo-password'] = STDIN.noecho(&:gets).chomp STDOUT.puts else @options[:'sudo-password'] = password end end separator 'Run context:' define('-c', '--concurrency CONCURRENCY', Integer, 'Maximum number of simultaneous connections (default: 100)') do |concurrency| @options[:concurrency] = concurrency end define('--compile-concurrency CONCURRENCY', Integer, 'Maximum number of simultaneous manifest block compiles (default: number of cores)') do |concurrency| @options[:'compile-concurrency'] = concurrency end define('--modulepath MODULES', "List of directories containing modules, separated by '#{File::PATH_SEPARATOR}'") do |modulepath| @options[:modulepath] = modulepath.split(File::PATH_SEPARATOR) end define('--boltdir FILEPATH', 'Specify what Boltdir to load config from (default: autodiscovered from current working dir)') do |path| @options[:boltdir] = path end define('--configfile FILEPATH', 'Specify where to load config from (default: ~/.puppetlabs/bolt/bolt.yaml)') do |path| @options[:configfile] = path end define('--inventoryfile FILEPATH', 'Specify where to load inventory from (default: ~/.puppetlabs/bolt/inventory.yaml)') do |path| if ENV.include?(Bolt::Inventory::ENVIRONMENT_VAR) raise Bolt::CLIError, "Cannot pass inventory file when #{Bolt::Inventory::ENVIRONMENT_VAR} is set" end @options[:inventoryfile] = path end separator 'Transports:' define('--transport TRANSPORT', TRANSPORTS.keys.map(&:to_s), "Specify a default transport: #{TRANSPORTS.keys.join(', ')}") do |t| @options[:transport] = t end define('--connect-timeout TIMEOUT', Integer, 'Connection timeout (defaults vary)') do |timeout| @options[:'connect-timeout'] = timeout end define('--[no-]tty', 'Request a pseudo TTY on nodes that support it') do |tty| @options[:tty] = tty end define('--tmpdir DIR', 'The directory to upload and execute temporary files on the target') do |tmpdir| @options[:tmpdir] = tmpdir end separator 'Display:' define('--format FORMAT', 'Output format to use: human or json') do |format| @options[:format] = format end define('--[no-]color', 'Whether to show output in color') do |color| @options[:color] = color end define('-h', '--help', 'Display help') do |_| @options[:help] = true end define('--verbose', 'Display verbose logging') do |_| @options[:verbose] = true end define('--debug', 'Display debug logging') do |_| @options[:debug] = true end define('--trace', 'Display error stack traces') do |_| @options[:trace] = true end define('--version', 'Display the version') do |_| puts Bolt::VERSION raise Bolt::CLIExit end update end def update # show the --nodes and --query switches by default @nodes.hide = @query.hide = false # Update the banner according to the subcommand self.banner = case @options[:subcommand] when 'plan' # don't show the --nodes and --query switches in the plan help @nodes.hide = @query.hide = true PLAN_HELP when 'command' COMMAND_HELP when 'script' SCRIPT_HELP when 'task' TASK_HELP when 'file' FILE_HELP when 'puppetfile' PUPPETFILE_HELP else BANNER end end def parse_params(params) json = get_arg_input(params) JSON.parse(json) rescue JSON::ParserError => err raise Bolt::CLIError, "Unable to parse --params value as JSON: #{err}" end def get_arg_input(value) if value.start_with?('@') file = value.sub(/^@/, '') read_arg_file(file) elsif value == '-' STDIN.read else value end end def read_arg_file(file) File.read(File.expand_path(file)) rescue StandardError => err raise Bolt::FileError.new("Error attempting to read #{file}: #{err}", file) end end end
1
9,469
Maybe we should put that in the option description?
puppetlabs-bolt
rb
@@ -211,8 +211,8 @@ public class BesuNodeConfigurationBuilder { return this; } - public BesuNodeConfigurationBuilder keyFilePath(final String keyFilePath) { - this.keyFilePath = Optional.of(keyFilePath); + public BesuNodeConfigurationBuilder keyFilePath(final Optional<String> keyFilePath) { + this.keyFilePath = keyFilePath; return this; }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.tests.acceptance.dsl.node.configuration; import static com.google.common.base.Preconditions.checkNotNull; import static java.util.Collections.singletonList; import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis; import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration; import org.hyperledger.besu.ethereum.core.MiningParameters; import org.hyperledger.besu.ethereum.core.MiningParametersTestBuilder; import org.hyperledger.besu.ethereum.core.PrivacyParameters; import org.hyperledger.besu.ethereum.p2p.config.NetworkingConfiguration; import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration; import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration; import org.hyperledger.besu.tests.acceptance.dsl.node.configuration.genesis.GenesisConfigurationProvider; import java.io.File; import java.net.URISyntaxException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; public class BesuNodeConfigurationBuilder { private String name; private Optional<Path> dataPath = Optional.empty(); private MiningParameters miningParameters = new MiningParametersTestBuilder().enabled(false).build(); private JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault(); private WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault(); private MetricsConfiguration metricsConfiguration = MetricsConfiguration.builder().build(); private Optional<PermissioningConfiguration> permissioningConfiguration = Optional.empty(); private Optional<String> keyFilePath = Optional.empty(); private boolean devMode = true; private GenesisConfigurationProvider genesisConfigProvider = ignore -> Optional.empty(); private Boolean p2pEnabled = true; private final NetworkingConfiguration networkingConfiguration = NetworkingConfiguration.create(); private boolean discoveryEnabled = true; private boolean bootnodeEligible = true; private boolean revertReasonEnabled = false; private boolean secp256K1Native = false; private boolean altbn128Native = false; private final List<String> plugins = new ArrayList<>(); private final List<String> extraCLIOptions = new ArrayList<>(); private List<String> staticNodes = new ArrayList<>(); private boolean isDnsEnabled = false; private Optional<PrivacyParameters> privacyParameters = Optional.empty(); private List<String> runCommand = new ArrayList<>(); public BesuNodeConfigurationBuilder() { // Check connections more frequently during acceptance tests to cut down on // intermittent failures due to the fact that we're running over a real network networkingConfiguration.setInitiateConnectionsFrequency(5); } public BesuNodeConfigurationBuilder name(final String name) { this.name = name; return this; } public BesuNodeConfigurationBuilder dataPath(final Path dataPath) { checkNotNull(dataPath); this.dataPath = Optional.of(dataPath); return this; } public BesuNodeConfigurationBuilder miningEnabled() { this.miningParameters = new MiningParametersTestBuilder().enabled(true).build(); this.jsonRpcConfiguration.addRpcApi(RpcApis.MINER); return this; } public BesuNodeConfigurationBuilder miningConfiguration(final MiningParameters miningParameters) { this.miningParameters = miningParameters; this.jsonRpcConfiguration.addRpcApi(RpcApis.MINER); return this; } public BesuNodeConfigurationBuilder jsonRpcConfiguration( final JsonRpcConfiguration jsonRpcConfiguration) { this.jsonRpcConfiguration = jsonRpcConfiguration; return this; } public BesuNodeConfigurationBuilder jsonRpcEnabled() { this.jsonRpcConfiguration.setEnabled(true); this.jsonRpcConfiguration.setPort(0); this.jsonRpcConfiguration.setHostsAllowlist(singletonList("*")); return this; } public BesuNodeConfigurationBuilder metricsEnabled() { this.metricsConfiguration = MetricsConfiguration.builder() .enabled(true) .port(0) .hostsAllowlist(singletonList("*")) .build(); return this; } public BesuNodeConfigurationBuilder enablePrivateTransactions() { this.jsonRpcConfiguration.addRpcApi(RpcApis.EEA); this.jsonRpcConfiguration.addRpcApi(RpcApis.PRIV); return this; } public BesuNodeConfigurationBuilder jsonRpcTxPool() { this.jsonRpcConfiguration.addRpcApi(RpcApis.TX_POOL); return this; } public BesuNodeConfigurationBuilder jsonRpcAuthenticationConfiguration(final String authFile) throws URISyntaxException { final String authTomlPath = Paths.get(ClassLoader.getSystemResource(authFile).toURI()).toAbsolutePath().toString(); this.jsonRpcConfiguration.setAuthenticationEnabled(true); this.jsonRpcConfiguration.setAuthenticationCredentialsFile(authTomlPath); return this; } public BesuNodeConfigurationBuilder jsonRpcAuthenticationUsingPublicKeyEnabled() throws URISyntaxException { final File jwtPublicKey = Paths.get(ClassLoader.getSystemResource("authentication/jwt_public_key").toURI()) .toAbsolutePath() .toFile(); this.jsonRpcConfiguration.setAuthenticationEnabled(true); this.jsonRpcConfiguration.setAuthenticationPublicKeyFile(jwtPublicKey); return this; } public BesuNodeConfigurationBuilder webSocketConfiguration( final WebSocketConfiguration webSocketConfiguration) { this.webSocketConfiguration = webSocketConfiguration; return this; } public BesuNodeConfigurationBuilder metricsConfiguration( final MetricsConfiguration metricsConfiguration) { this.metricsConfiguration = metricsConfiguration; return this; } public BesuNodeConfigurationBuilder webSocketEnabled() { final WebSocketConfiguration config = WebSocketConfiguration.createDefault(); config.setEnabled(true); config.setPort(0); config.setHostsAllowlist(Collections.singletonList("*")); this.webSocketConfiguration = config; return this; } public BesuNodeConfigurationBuilder bootnodeEligible(final boolean bootnodeEligible) { this.bootnodeEligible = bootnodeEligible; return this; } public BesuNodeConfigurationBuilder webSocketAuthenticationEnabled() throws URISyntaxException { final String authTomlPath = Paths.get(ClassLoader.getSystemResource("authentication/auth.toml").toURI()) .toAbsolutePath() .toString(); this.webSocketConfiguration.setAuthenticationEnabled(true); this.webSocketConfiguration.setAuthenticationCredentialsFile(authTomlPath); return this; } public BesuNodeConfigurationBuilder webSocketAuthenticationUsingPublicKeyEnabled() throws URISyntaxException { final File jwtPublicKey = Paths.get(ClassLoader.getSystemResource("authentication/jwt_public_key").toURI()) .toAbsolutePath() .toFile(); this.webSocketConfiguration.setAuthenticationEnabled(true); this.webSocketConfiguration.setAuthenticationPublicKeyFile(jwtPublicKey); return this; } public BesuNodeConfigurationBuilder permissioningConfiguration( final PermissioningConfiguration permissioningConfiguration) { this.permissioningConfiguration = Optional.of(permissioningConfiguration); return this; } public BesuNodeConfigurationBuilder keyFilePath(final String keyFilePath) { this.keyFilePath = Optional.of(keyFilePath); return this; } public BesuNodeConfigurationBuilder devMode(final boolean devMode) { this.devMode = devMode; return this; } public BesuNodeConfigurationBuilder genesisConfigProvider( final GenesisConfigurationProvider genesisConfigProvider) { this.genesisConfigProvider = genesisConfigProvider; return this; } public BesuNodeConfigurationBuilder p2pEnabled(final Boolean p2pEnabled) { this.p2pEnabled = p2pEnabled; return this; } public BesuNodeConfigurationBuilder discoveryEnabled(final boolean discoveryEnabled) { this.discoveryEnabled = discoveryEnabled; return this; } public BesuNodeConfigurationBuilder plugins(final List<String> plugins) { this.plugins.clear(); this.plugins.addAll(plugins); return this; } public BesuNodeConfigurationBuilder extraCLIOptions(final List<String> extraCLIOptions) { this.extraCLIOptions.clear(); this.extraCLIOptions.addAll(extraCLIOptions); return this; } public BesuNodeConfigurationBuilder revertReasonEnabled() { this.revertReasonEnabled = true; return this; } public BesuNodeConfigurationBuilder secp256k1Native() { this.secp256K1Native = true; return this; } public BesuNodeConfigurationBuilder altbn128() { this.altbn128Native = true; return this; } public BesuNodeConfigurationBuilder staticNodes(final List<String> staticNodes) { this.staticNodes = staticNodes; return this; } public BesuNodeConfigurationBuilder dnsEnabled(final boolean isDnsEnabled) { this.isDnsEnabled = isDnsEnabled; return this; } public BesuNodeConfigurationBuilder privacyParameters(final PrivacyParameters privacyParameters) { this.privacyParameters = Optional.ofNullable(privacyParameters); return this; } public BesuNodeConfigurationBuilder run(final String... commands) { this.runCommand = List.of(commands); return this; } public BesuNodeConfiguration build() { return new BesuNodeConfiguration( name, dataPath, miningParameters, jsonRpcConfiguration, webSocketConfiguration, metricsConfiguration, permissioningConfiguration, keyFilePath, devMode, genesisConfigProvider, p2pEnabled, networkingConfiguration, discoveryEnabled, bootnodeEligible, revertReasonEnabled, secp256K1Native, altbn128Native, plugins, extraCLIOptions, staticNodes, isDnsEnabled, privacyParameters, runCommand); } }
1
23,551
Why do we need to make this optional? Isn't the idea of the builder that if you don't need this value you just don't call the `keyFilePath ` method?
hyperledger-besu
java
@@ -125,7 +125,7 @@ public class Parquet { public WriteBuilder forTable(Table table) { schema(table.schema()); setAll(table.properties()); - metricsConfig(MetricsConfig.fromProperties(table.properties())); + metricsConfig(MetricsConfig.forTable(table)); return this; }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.parquet; import java.io.File; import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; import org.apache.iceberg.FileFormat; import org.apache.iceberg.Files; import org.apache.iceberg.MetricsConfig; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; import org.apache.iceberg.SortOrder; import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.avro.AvroSchemaUtil; import org.apache.iceberg.data.parquet.GenericParquetWriter; import org.apache.iceberg.deletes.EqualityDeleteWriter; import org.apache.iceberg.deletes.PositionDeleteWriter; import org.apache.iceberg.encryption.EncryptionKeyMetadata; import org.apache.iceberg.exceptions.RuntimeIOException; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.hadoop.HadoopInputFile; import org.apache.iceberg.hadoop.HadoopOutputFile; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.io.DataWriter; import org.apache.iceberg.io.DeleteSchemaUtil; import org.apache.iceberg.io.FileAppender; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.mapping.NameMapping; import org.apache.iceberg.parquet.ParquetValueWriters.PositionDeleteStructWriter; import org.apache.iceberg.parquet.ParquetValueWriters.StructWriter; import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.util.ArrayUtil; import org.apache.iceberg.util.PropertyUtil; import org.apache.parquet.HadoopReadOptions; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.avro.AvroReadSupport; import org.apache.parquet.avro.AvroWriteSupport; import org.apache.parquet.column.ParquetProperties; import org.apache.parquet.column.ParquetProperties.WriterVersion; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.ParquetFileWriter; import org.apache.parquet.hadoop.ParquetReader; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.hadoop.api.WriteSupport; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.schema.MessageType; import static org.apache.iceberg.TableProperties.DELETE_PARQUET_COMPRESSION; import static org.apache.iceberg.TableProperties.DELETE_PARQUET_COMPRESSION_LEVEL; import static org.apache.iceberg.TableProperties.DELETE_PARQUET_DICT_SIZE_BYTES; import static org.apache.iceberg.TableProperties.DELETE_PARQUET_PAGE_SIZE_BYTES; import static org.apache.iceberg.TableProperties.DELETE_PARQUET_ROW_GROUP_SIZE_BYTES; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL; import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES; import static org.apache.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES; import static org.apache.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES_DEFAULT; import static org.apache.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES; import static org.apache.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT; public class Parquet { private Parquet() { } private static final Collection<String> READ_PROPERTIES_TO_REMOVE = Sets.newHashSet( "parquet.read.filter", "parquet.private.read.filter.predicate", "parquet.read.support.class"); public static WriteBuilder write(OutputFile file) { return new WriteBuilder(file); } public static class WriteBuilder { private final OutputFile file; private final Map<String, String> metadata = Maps.newLinkedHashMap(); private final Map<String, String> config = Maps.newLinkedHashMap(); private Schema schema = null; private String name = "table"; private WriteSupport<?> writeSupport = null; private Function<MessageType, ParquetValueWriter<?>> createWriterFunc = null; private MetricsConfig metricsConfig = MetricsConfig.getDefault(); private ParquetFileWriter.Mode writeMode = ParquetFileWriter.Mode.CREATE; private WriterVersion writerVersion = WriterVersion.PARQUET_1_0; private Function<Map<String, String>, Context> createContextFunc = Context::dataContext; private WriteBuilder(OutputFile file) { this.file = file; } public WriteBuilder forTable(Table table) { schema(table.schema()); setAll(table.properties()); metricsConfig(MetricsConfig.fromProperties(table.properties())); return this; } public WriteBuilder schema(Schema newSchema) { this.schema = newSchema; return this; } public WriteBuilder named(String newName) { this.name = newName; return this; } public WriteBuilder writeSupport(WriteSupport<?> newWriteSupport) { this.writeSupport = newWriteSupport; return this; } public WriteBuilder set(String property, String value) { config.put(property, value); return this; } public WriteBuilder setAll(Map<String, String> properties) { config.putAll(properties); return this; } public WriteBuilder meta(String property, String value) { metadata.put(property, value); return this; } public WriteBuilder createWriterFunc(Function<MessageType, ParquetValueWriter<?>> newCreateWriterFunc) { this.createWriterFunc = newCreateWriterFunc; return this; } public WriteBuilder metricsConfig(MetricsConfig newMetricsConfig) { this.metricsConfig = newMetricsConfig; return this; } public WriteBuilder overwrite() { return overwrite(true); } public WriteBuilder overwrite(boolean enabled) { this.writeMode = enabled ? ParquetFileWriter.Mode.OVERWRITE : ParquetFileWriter.Mode.CREATE; return this; } public WriteBuilder writerVersion(WriterVersion version) { this.writerVersion = version; return this; } @SuppressWarnings("unchecked") private <T> WriteSupport<T> getWriteSupport(MessageType type) { if (writeSupport != null) { return (WriteSupport<T>) writeSupport; } else { return new AvroWriteSupport<>( type, ParquetAvro.parquetAvroSchema(AvroSchemaUtil.convert(schema, name)), ParquetAvro.DEFAULT_MODEL); } } /* * Sets the writer version. Default value is PARQUET_1_0 (v1). */ @VisibleForTesting WriteBuilder withWriterVersion(WriterVersion version) { this.writerVersion = version; return this; } // supposed to always be a private method used strictly by data and delete write builders private WriteBuilder createContextFunc(Function<Map<String, String>, Context> newCreateContextFunc) { this.createContextFunc = newCreateContextFunc; return this; } public <D> FileAppender<D> build() throws IOException { Preconditions.checkNotNull(schema, "Schema is required"); Preconditions.checkNotNull(name, "Table name is required and cannot be null"); // add the Iceberg schema to keyValueMetadata meta("iceberg.schema", SchemaParser.toJson(schema)); // Map Iceberg properties to pass down to the Parquet writer Context context = createContextFunc.apply(config); int rowGroupSize = context.rowGroupSize(); int pageSize = context.pageSize(); int dictionaryPageSize = context.dictionaryPageSize(); String compressionLevel = context.compressionLevel(); CompressionCodecName codec = context.codec(); if (compressionLevel != null) { switch (codec) { case GZIP: config.put("zlib.compress.level", compressionLevel); break; case BROTLI: config.put("compression.brotli.quality", compressionLevel); break; case ZSTD: config.put("io.compression.codec.zstd.level", compressionLevel); break; default: // compression level is not supported; ignore it } } set("parquet.avro.write-old-list-structure", "false"); MessageType type = ParquetSchemaUtil.convert(schema, name); if (createWriterFunc != null) { Preconditions.checkArgument(writeSupport == null, "Cannot write with both write support and Parquet value writer"); Configuration conf; if (file instanceof HadoopOutputFile) { conf = ((HadoopOutputFile) file).getConf(); } else { conf = new Configuration(); } for (Map.Entry<String, String> entry : config.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } ParquetProperties parquetProperties = ParquetProperties.builder() .withWriterVersion(writerVersion) .withPageSize(pageSize) .withDictionaryPageSize(dictionaryPageSize) .build(); return new org.apache.iceberg.parquet.ParquetWriter<>( conf, file, schema, rowGroupSize, metadata, createWriterFunc, codec, parquetProperties, metricsConfig, writeMode); } else { return new ParquetWriteAdapter<>(new ParquetWriteBuilder<D>(ParquetIO.file(file)) .withWriterVersion(writerVersion) .setType(type) .setConfig(config) .setKeyValueMetadata(metadata) .setWriteSupport(getWriteSupport(type)) .withCompressionCodec(codec) .withWriteMode(writeMode) .withRowGroupSize(rowGroupSize) .withPageSize(pageSize) .withDictionaryPageSize(dictionaryPageSize) .build(), metricsConfig); } } private static class Context { private final int rowGroupSize; private final int pageSize; private final int dictionaryPageSize; private final CompressionCodecName codec; private final String compressionLevel; private Context(int rowGroupSize, int pageSize, int dictionaryPageSize, CompressionCodecName codec, String compressionLevel) { this.rowGroupSize = rowGroupSize; this.pageSize = pageSize; this.dictionaryPageSize = dictionaryPageSize; this.codec = codec; this.compressionLevel = compressionLevel; } static Context dataContext(Map<String, String> config) { int rowGroupSize = Integer.parseInt(config.getOrDefault( PARQUET_ROW_GROUP_SIZE_BYTES, PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT)); int pageSize = Integer.parseInt(config.getOrDefault( PARQUET_PAGE_SIZE_BYTES, PARQUET_PAGE_SIZE_BYTES_DEFAULT)); int dictionaryPageSize = Integer.parseInt(config.getOrDefault( PARQUET_DICT_SIZE_BYTES, PARQUET_DICT_SIZE_BYTES_DEFAULT)); String codecAsString = config.getOrDefault(PARQUET_COMPRESSION, PARQUET_COMPRESSION_DEFAULT); CompressionCodecName codec = toCodec(codecAsString); String compressionLevel = config.getOrDefault(PARQUET_COMPRESSION_LEVEL, PARQUET_COMPRESSION_LEVEL_DEFAULT); return new Context(rowGroupSize, pageSize, dictionaryPageSize, codec, compressionLevel); } static Context deleteContext(Map<String, String> config) { // default delete config using data config Context dataContext = dataContext(config); int rowGroupSize = PropertyUtil.propertyAsInt(config, DELETE_PARQUET_ROW_GROUP_SIZE_BYTES, dataContext.rowGroupSize()); int pageSize = PropertyUtil.propertyAsInt(config, DELETE_PARQUET_PAGE_SIZE_BYTES, dataContext.pageSize()); int dictionaryPageSize = PropertyUtil.propertyAsInt(config, DELETE_PARQUET_DICT_SIZE_BYTES, dataContext.dictionaryPageSize()); String codecAsString = config.get(DELETE_PARQUET_COMPRESSION); CompressionCodecName codec = codecAsString != null ? toCodec(codecAsString) : dataContext.codec(); String compressionLevel = config.getOrDefault(DELETE_PARQUET_COMPRESSION_LEVEL, dataContext.compressionLevel()); return new Context(rowGroupSize, pageSize, dictionaryPageSize, codec, compressionLevel); } private static CompressionCodecName toCodec(String codecAsString) { try { return CompressionCodecName.valueOf(codecAsString.toUpperCase(Locale.ENGLISH)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Unsupported compression codec: " + codecAsString); } } int rowGroupSize() { return rowGroupSize; } int pageSize() { return pageSize; } int dictionaryPageSize() { return dictionaryPageSize; } CompressionCodecName codec() { return codec; } String compressionLevel() { return compressionLevel; } } } public static DataWriteBuilder writeData(OutputFile file) { return new DataWriteBuilder(file); } public static class DataWriteBuilder { private final WriteBuilder appenderBuilder; private final String location; private PartitionSpec spec = null; private StructLike partition = null; private EncryptionKeyMetadata keyMetadata = null; private SortOrder sortOrder = null; private DataWriteBuilder(OutputFile file) { this.appenderBuilder = write(file); this.location = file.location(); } public DataWriteBuilder forTable(Table table) { schema(table.schema()); withSpec(table.spec()); setAll(table.properties()); metricsConfig(MetricsConfig.fromProperties(table.properties())); return this; } public DataWriteBuilder schema(Schema newSchema) { appenderBuilder.schema(newSchema); return this; } public DataWriteBuilder set(String property, String value) { appenderBuilder.set(property, value); return this; } public DataWriteBuilder setAll(Map<String, String> properties) { appenderBuilder.setAll(properties); return this; } public DataWriteBuilder meta(String property, String value) { appenderBuilder.meta(property, value); return this; } public DataWriteBuilder overwrite() { return overwrite(true); } public DataWriteBuilder overwrite(boolean enabled) { appenderBuilder.overwrite(enabled); return this; } public DataWriteBuilder metricsConfig(MetricsConfig newMetricsConfig) { appenderBuilder.metricsConfig(newMetricsConfig); return this; } public DataWriteBuilder createWriterFunc(Function<MessageType, ParquetValueWriter<?>> newCreateWriterFunc) { appenderBuilder.createWriterFunc(newCreateWriterFunc); return this; } public DataWriteBuilder withSpec(PartitionSpec newSpec) { this.spec = newSpec; return this; } public DataWriteBuilder withPartition(StructLike newPartition) { this.partition = newPartition; return this; } public DataWriteBuilder withKeyMetadata(EncryptionKeyMetadata metadata) { this.keyMetadata = metadata; return this; } public DataWriteBuilder withSortOrder(SortOrder newSortOrder) { this.sortOrder = newSortOrder; return this; } public <T> DataWriter<T> build() throws IOException { Preconditions.checkArgument(spec != null, "Cannot create data writer without spec"); Preconditions.checkArgument(spec.isUnpartitioned() || partition != null, "Partition must not be null when creating data writer for partitioned spec"); FileAppender<T> fileAppender = appenderBuilder.build(); return new DataWriter<>(fileAppender, FileFormat.PARQUET, location, spec, partition, keyMetadata, sortOrder); } } public static DeleteWriteBuilder writeDeletes(OutputFile file) { return new DeleteWriteBuilder(file); } public static class DeleteWriteBuilder { private final WriteBuilder appenderBuilder; private final String location; private Function<MessageType, ParquetValueWriter<?>> createWriterFunc = null; private Schema rowSchema = null; private PartitionSpec spec = null; private StructLike partition = null; private EncryptionKeyMetadata keyMetadata = null; private int[] equalityFieldIds = null; private SortOrder sortOrder; private Function<CharSequence, ?> pathTransformFunc = Function.identity(); private DeleteWriteBuilder(OutputFile file) { this.appenderBuilder = write(file); this.location = file.location(); } public DeleteWriteBuilder forTable(Table table) { rowSchema(table.schema()); withSpec(table.spec()); setAll(table.properties()); metricsConfig(MetricsConfig.fromProperties(table.properties())); return this; } public DeleteWriteBuilder set(String property, String value) { appenderBuilder.set(property, value); return this; } public DeleteWriteBuilder setAll(Map<String, String> properties) { appenderBuilder.setAll(properties); return this; } public DeleteWriteBuilder meta(String property, String value) { appenderBuilder.meta(property, value); return this; } public DeleteWriteBuilder overwrite() { return overwrite(true); } public DeleteWriteBuilder overwrite(boolean enabled) { appenderBuilder.overwrite(enabled); return this; } public DeleteWriteBuilder metricsConfig(MetricsConfig newMetricsConfig) { // TODO: keep full metrics for position delete file columns appenderBuilder.metricsConfig(newMetricsConfig); return this; } public DeleteWriteBuilder createWriterFunc(Function<MessageType, ParquetValueWriter<?>> newCreateWriterFunc) { this.createWriterFunc = newCreateWriterFunc; return this; } public DeleteWriteBuilder rowSchema(Schema newSchema) { this.rowSchema = newSchema; return this; } public DeleteWriteBuilder withSpec(PartitionSpec newSpec) { this.spec = newSpec; return this; } public DeleteWriteBuilder withPartition(StructLike key) { this.partition = key; return this; } public DeleteWriteBuilder withKeyMetadata(EncryptionKeyMetadata metadata) { this.keyMetadata = metadata; return this; } public DeleteWriteBuilder equalityFieldIds(List<Integer> fieldIds) { this.equalityFieldIds = ArrayUtil.toIntArray(fieldIds); return this; } public DeleteWriteBuilder equalityFieldIds(int... fieldIds) { this.equalityFieldIds = fieldIds; return this; } public DeleteWriteBuilder transformPaths(Function<CharSequence, ?> newPathTransformFunc) { this.pathTransformFunc = newPathTransformFunc; return this; } public DeleteWriteBuilder withSortOrder(SortOrder newSortOrder) { this.sortOrder = newSortOrder; return this; } public <T> EqualityDeleteWriter<T> buildEqualityWriter() throws IOException { Preconditions.checkState(rowSchema != null, "Cannot create equality delete file without a schema`"); Preconditions.checkState(equalityFieldIds != null, "Cannot create equality delete file without delete field ids"); Preconditions.checkState(createWriterFunc != null, "Cannot create equality delete file unless createWriterFunc is set"); Preconditions.checkArgument(spec != null, "Spec must not be null when creating equality delete writer"); Preconditions.checkArgument(spec.isUnpartitioned() || partition != null, "Partition must not be null for partitioned writes"); meta("delete-type", "equality"); meta("delete-field-ids", IntStream.of(equalityFieldIds) .mapToObj(Objects::toString) .collect(Collectors.joining(", "))); // the appender uses the row schema without extra columns appenderBuilder.schema(rowSchema); appenderBuilder.createWriterFunc(createWriterFunc); appenderBuilder.createContextFunc(WriteBuilder.Context::deleteContext); return new EqualityDeleteWriter<>( appenderBuilder.build(), FileFormat.PARQUET, location, spec, partition, keyMetadata, sortOrder, equalityFieldIds); } public <T> PositionDeleteWriter<T> buildPositionWriter() throws IOException { Preconditions.checkState(equalityFieldIds == null, "Cannot create position delete file using delete field ids"); Preconditions.checkArgument(spec != null, "Spec must not be null when creating position delete writer"); Preconditions.checkArgument(spec.isUnpartitioned() || partition != null, "Partition must not be null for partitioned writes"); meta("delete-type", "position"); if (rowSchema != null && createWriterFunc != null) { // the appender uses the row schema wrapped with position fields appenderBuilder.schema(DeleteSchemaUtil.posDeleteSchema(rowSchema)); appenderBuilder.createWriterFunc(parquetSchema -> { ParquetValueWriter<?> writer = createWriterFunc.apply(parquetSchema); if (writer instanceof StructWriter) { return new PositionDeleteStructWriter<T>((StructWriter<?>) writer, pathTransformFunc); } else { throw new UnsupportedOperationException("Cannot wrap writer for position deletes: " + writer.getClass()); } }); } else { appenderBuilder.schema(DeleteSchemaUtil.pathPosSchema()); appenderBuilder.createWriterFunc(parquetSchema -> new PositionDeleteStructWriter<T>((StructWriter<?>) GenericParquetWriter.buildWriter(parquetSchema), Function.identity())); } appenderBuilder.createContextFunc(WriteBuilder.Context::deleteContext); return new PositionDeleteWriter<>( appenderBuilder.build(), FileFormat.PARQUET, location, spec, partition, keyMetadata); } } private static class ParquetWriteBuilder<T> extends ParquetWriter.Builder<T, ParquetWriteBuilder<T>> { private Map<String, String> keyValueMetadata = Maps.newHashMap(); private Map<String, String> config = Maps.newHashMap(); private MessageType type; private WriteSupport<T> writeSupport; private ParquetWriteBuilder(org.apache.parquet.io.OutputFile path) { super(path); } @Override protected ParquetWriteBuilder<T> self() { return this; } public ParquetWriteBuilder<T> setKeyValueMetadata(Map<String, String> keyValueMetadata) { this.keyValueMetadata = keyValueMetadata; return self(); } public ParquetWriteBuilder<T> setConfig(Map<String, String> config) { this.config = config; return self(); } public ParquetWriteBuilder<T> setType(MessageType type) { this.type = type; return self(); } public ParquetWriteBuilder<T> setWriteSupport(WriteSupport<T> writeSupport) { this.writeSupport = writeSupport; return self(); } @Override protected WriteSupport<T> getWriteSupport(Configuration configuration) { for (Map.Entry<String, String> entry : config.entrySet()) { configuration.set(entry.getKey(), entry.getValue()); } return new ParquetWriteSupport<>(type, keyValueMetadata, writeSupport); } } public static ReadBuilder read(InputFile file) { return new ReadBuilder(file); } public static class ReadBuilder { private final InputFile file; private final Map<String, String> properties = Maps.newHashMap(); private Long start = null; private Long length = null; private Schema schema = null; private Expression filter = null; private ReadSupport<?> readSupport = null; private Function<MessageType, VectorizedReader<?>> batchedReaderFunc = null; private Function<MessageType, ParquetValueReader<?>> readerFunc = null; private boolean filterRecords = true; private boolean caseSensitive = true; private boolean callInit = false; private boolean reuseContainers = false; private int maxRecordsPerBatch = 10000; private NameMapping nameMapping = null; private ReadBuilder(InputFile file) { this.file = file; } /** * Restricts the read to the given range: [start, start + length). * * @param newStart the start position for this read * @param newLength the length of the range this read should scan * @return this builder for method chaining */ public ReadBuilder split(long newStart, long newLength) { this.start = newStart; this.length = newLength; return this; } public ReadBuilder project(Schema newSchema) { this.schema = newSchema; return this; } public ReadBuilder caseInsensitive() { return caseSensitive(false); } public ReadBuilder caseSensitive(boolean newCaseSensitive) { this.caseSensitive = newCaseSensitive; return this; } public ReadBuilder filterRecords(boolean newFilterRecords) { this.filterRecords = newFilterRecords; return this; } public ReadBuilder filter(Expression newFilter) { this.filter = newFilter; return this; } public ReadBuilder readSupport(ReadSupport<?> newFilterSupport) { this.readSupport = newFilterSupport; return this; } public ReadBuilder createReaderFunc(Function<MessageType, ParquetValueReader<?>> newReaderFunction) { Preconditions.checkArgument(this.batchedReaderFunc == null, "Reader function cannot be set since the batched version is already set"); this.readerFunc = newReaderFunction; return this; } public ReadBuilder createBatchedReaderFunc(Function<MessageType, VectorizedReader<?>> func) { Preconditions.checkArgument(this.readerFunc == null, "Batched reader function cannot be set since the non-batched version is already set"); this.batchedReaderFunc = func; return this; } public ReadBuilder set(String key, String value) { properties.put(key, value); return this; } public ReadBuilder callInit() { this.callInit = true; return this; } public ReadBuilder reuseContainers() { this.reuseContainers = true; return this; } public ReadBuilder recordsPerBatch(int numRowsPerBatch) { this.maxRecordsPerBatch = numRowsPerBatch; return this; } public ReadBuilder withNameMapping(NameMapping newNameMapping) { this.nameMapping = newNameMapping; return this; } @SuppressWarnings({"unchecked", "checkstyle:CyclomaticComplexity"}) public <D> CloseableIterable<D> build() { if (readerFunc != null || batchedReaderFunc != null) { ParquetReadOptions.Builder optionsBuilder; if (file instanceof HadoopInputFile) { // remove read properties already set that may conflict with this read Configuration conf = new Configuration(((HadoopInputFile) file).getConf()); for (String property : READ_PROPERTIES_TO_REMOVE) { conf.unset(property); } optionsBuilder = HadoopReadOptions.builder(conf); } else { optionsBuilder = ParquetReadOptions.builder(); } for (Map.Entry<String, String> entry : properties.entrySet()) { optionsBuilder.set(entry.getKey(), entry.getValue()); } if (start != null) { optionsBuilder.withRange(start, start + length); } ParquetReadOptions options = optionsBuilder.build(); if (batchedReaderFunc != null) { return new VectorizedParquetReader<>(file, schema, options, batchedReaderFunc, nameMapping, filter, reuseContainers, caseSensitive, maxRecordsPerBatch); } else { return new org.apache.iceberg.parquet.ParquetReader<>( file, schema, options, readerFunc, nameMapping, filter, reuseContainers, caseSensitive); } } ParquetReadBuilder<D> builder = new ParquetReadBuilder<>(ParquetIO.file(file)); builder.project(schema); if (readSupport != null) { builder.readSupport((ReadSupport<D>) readSupport); } else { builder.readSupport(new AvroReadSupport<>(ParquetAvro.DEFAULT_MODEL)); } // default options for readers builder.set("parquet.strict.typing", "false") // allow type promotion .set("parquet.avro.compatible", "false") // use the new RecordReader with Utf8 support .set("parquet.avro.add-list-element-records", "false"); // assume that lists use a 3-level schema for (Map.Entry<String, String> entry : properties.entrySet()) { builder.set(entry.getKey(), entry.getValue()); } if (filter != null) { // TODO: should not need to get the schema to push down before opening the file. // Parquet should allow setting a filter inside its read support MessageType type; try (ParquetFileReader schemaReader = ParquetFileReader.open(ParquetIO.file(file))) { type = schemaReader.getFileMetaData().getSchema(); } catch (IOException e) { throw new RuntimeIOException(e); } Schema fileSchema = ParquetSchemaUtil.convert(type); builder.useStatsFilter() .useDictionaryFilter() .useRecordFilter(filterRecords) .withFilter(ParquetFilters.convert(fileSchema, filter, caseSensitive)); } else { // turn off filtering builder.useStatsFilter(false) .useDictionaryFilter(false) .useRecordFilter(false); } if (callInit) { builder.callInit(); } if (start != null) { builder.withFileRange(start, start + length); } if (nameMapping != null) { builder.withNameMapping(nameMapping); } return new ParquetIterable<>(builder); } } private static class ParquetReadBuilder<T> extends ParquetReader.Builder<T> { private Schema schema = null; private ReadSupport<T> readSupport = null; private boolean callInit = false; private NameMapping nameMapping = null; private ParquetReadBuilder(org.apache.parquet.io.InputFile file) { super(file); } public ParquetReadBuilder<T> project(Schema newSchema) { this.schema = newSchema; return this; } public ParquetReadBuilder<T> withNameMapping(NameMapping newNameMapping) { this.nameMapping = newNameMapping; return this; } public ParquetReadBuilder<T> readSupport(ReadSupport<T> newReadSupport) { this.readSupport = newReadSupport; return this; } public ParquetReadBuilder<T> callInit() { this.callInit = true; return this; } @Override protected ReadSupport<T> getReadSupport() { return new ParquetReadSupport<>(schema, readSupport, callInit, nameMapping); } } /** * Combines several files into one * * @param inputFiles an {@link Iterable} of parquet files. The order of iteration determines the order in which * content of files are read and written to the {@code outputFile} * @param outputFile the output parquet file containing all the data from {@code inputFiles} * @param rowGroupSize the row group size to use when writing the {@code outputFile} * @param schema the schema of the data * @param metadata extraMetadata to write at the footer of the {@code outputFile} */ public static void concat(Iterable<File> inputFiles, File outputFile, int rowGroupSize, Schema schema, Map<String, String> metadata) throws IOException { OutputFile file = Files.localOutput(outputFile); ParquetFileWriter writer = new ParquetFileWriter( ParquetIO.file(file), ParquetSchemaUtil.convert(schema, "table"), ParquetFileWriter.Mode.CREATE, rowGroupSize, 0); writer.start(); for (File inputFile : inputFiles) { writer.appendFile(ParquetIO.file(Files.localInput(inputFile))); } writer.end(metadata); } }
1
33,716
Shall we do the same for ORC and Avro?
apache-iceberg
java
@@ -1,4 +1,5 @@ -// Copyright (c) Microsoft. All rights reserved. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. namespace Microsoft.VisualStudio.TestPlatform.ObjectModel.Navigation { #if NET46
1
// Copyright (c) Microsoft. All rights reserved. namespace Microsoft.VisualStudio.TestPlatform.ObjectModel.Navigation { #if NET46 using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Reflection; using System.Runtime.InteropServices; using Dia; /// <summary> /// To get method's file name, startline and endline from desktop assembly file. /// </summary> internal class FullSymbolReader : ISymbolReader { /// <summary> /// To check isDisposed /// </summary> private bool isDisposed; private IDiaDataSource source; private IDiaSession session; /// <summary> /// Holds type symbols avaiable in the source. /// </summary> private Dictionary<string, IDiaSymbol> typeSymbols = new Dictionary<string, IDiaSymbol>(); /// <summary> /// Holds method symbols for all types in the source. /// Methods in different types can have same name, hence seprated dicitionary is created for each type. /// Bug: Method overrides in same type are not handled (not a regression) /// </summary> private Dictionary<string, Dictionary<string, IDiaSymbol>> methodSymbols = new Dictionary<string, Dictionary<string, IDiaSymbol>>(); /// <summary> /// Manifest files for Reg Free Com. This is essentially put in place for the msdia dependency. /// </summary> private const string ManifestFileNameX86 = "TestPlatform.ObjectModel.x86.manifest"; private const string ManifestFileNameX64 = "TestPlatform.ObjectModel.manifest"; /// <summary> /// dispose caches /// </summary> public void Dispose() { this.Dispose(true); // Use SupressFinalize in case a subclass // of this type implements a finalizer. GC.SuppressFinalize(this); } /// <summary> /// Cache symbols from binary path /// </summary> /// <param name="binaryPath"> /// The binary path is assembly path Ex: \path\to\bin\Debug\simpleproject.dll /// </param> /// <param name="searchPath"> /// search path. /// </param> public void CacheSymbols(string binaryPath, string searchPath) { using (var activationContext = new RegistryFreeActivationContext(this.GetManifestFileForRegFreeCom())) { // Activating and Deactivating the context here itself since deactivating context from a different thread would throw an SEH exception. // We do not need the activation context post this point since the DIASession COM object is created here only. try { activationContext.ActivateContext(); this.source = new DiaSource(); this.source.loadDataForExe(binaryPath, searchPath, null); this.source.openSession(out this.session); this.PopulateCacheForTypeAndMethodSymbols(); } catch (COMException) { this.Dispose(); throw; } } } /// <summary> /// Gets Navigation data from caches /// </summary> /// <param name="declaringTypeName"> /// Type name Ex: MyNameSpace.MyType /// </param> /// <param name="methodName"> /// Method name in declaringTypeName Ex: Method1 /// </param> /// <returns> /// <see cref="INavigationData"/>. /// Returns INavigationData which contains filename and linenumber. /// </returns> public INavigationData GetNavigationData(string declaringTypeName, string methodName) { INavigationData navigationData = null; IDiaSymbol methodSymbol = null; IDiaSymbol typeSymbol = this.GetTypeSymbol(declaringTypeName, SymTagEnum.SymTagCompiland); if (typeSymbol != null) { methodSymbol = this.GetMethodSymbol(typeSymbol, methodName); } else { // May be a managed C++ test assembly... string fullMethodName = declaringTypeName.Replace(".", "::"); fullMethodName = fullMethodName + "::" + methodName; methodSymbol = this.GetTypeSymbol(fullMethodName, SymTagEnum.SymTagFunction); } if (methodSymbol != null) { navigationData = this.GetSymbolNavigationData(methodSymbol); } return navigationData; } private DiaNavigationData GetSymbolNavigationData(IDiaSymbol symbol) { ValidateArg.NotNull(symbol, "symbol"); DiaNavigationData navigationData = new DiaNavigationData(null, int.MaxValue, int.MinValue); IDiaEnumLineNumbers lines = null; try { this.session.findLinesByAddr(symbol.addressSection, symbol.addressOffset, (uint)symbol.length, out lines); uint celt; IDiaLineNumber lineNumber; while (true) { lines.Next(1, out lineNumber, out celt); if (celt != 1) { break; } IDiaSourceFile sourceFile = null; try { sourceFile = lineNumber.sourceFile; // The magic hex constant below works around weird data reported from GetSequencePoints. // The constant comes from ILDASM's source code, which performs essentially the same test. const uint Magic = 0xFEEFEE; if (lineNumber.lineNumber >= Magic || lineNumber.lineNumberEnd >= Magic) { continue; } navigationData.FileName = sourceFile.fileName; navigationData.MinLineNumber = Math.Min(navigationData.MinLineNumber, (int)lineNumber.lineNumber); navigationData.MaxLineNumber = Math.Max(navigationData.MaxLineNumber, (int)lineNumber.lineNumberEnd); } finally { ReleaseComObject(ref sourceFile); ReleaseComObject(ref lineNumber); } } } finally { ReleaseComObject(ref lines); } return navigationData; } private void PopulateCacheForTypeAndMethodSymbols() { IDiaEnumSymbols enumTypeSymbols = null; IDiaSymbol global = null; try { global = this.session.globalScope; global.findChildren(SymTagEnum.SymTagCompiland, null, 0, out enumTypeSymbols); uint celtTypeSymbol; IDiaSymbol typeSymbol = null; // NOTE:: // If foreach loop is used instead of Enumerator iterator, for some reason it leaves // the reference to pdb active, which prevents pdb from being rebuilt (in VS IDE scenario). enumTypeSymbols.Next(1, out typeSymbol, out celtTypeSymbol); while (celtTypeSymbol == 1 && null != typeSymbol) { this.typeSymbols[typeSymbol.name] = typeSymbol; IDiaEnumSymbols enumMethodSymbols = null; try { Dictionary<string, IDiaSymbol> methodSymbolsForType = new Dictionary<string, IDiaSymbol>(); typeSymbol.findChildren(SymTagEnum.SymTagFunction, null, 0, out enumMethodSymbols); uint celtMethodSymbol; IDiaSymbol methodSymbol = null; enumMethodSymbols.Next(1, out methodSymbol, out celtMethodSymbol); while (celtMethodSymbol == 1 && null != methodSymbol) { UpdateMethodSymbolCache(methodSymbol.name, methodSymbol, methodSymbolsForType); enumMethodSymbols.Next(1, out methodSymbol, out celtMethodSymbol); } this.methodSymbols[typeSymbol.name] = methodSymbolsForType; } catch (Exception ex) { if (EqtTrace.IsErrorEnabled) { EqtTrace.Error( "Ignoring the exception while iterating method symbols:{0} for type:{1}", ex, typeSymbol.name); } } finally { ReleaseComObject(ref enumMethodSymbols); } enumTypeSymbols.Next(1, out typeSymbol, out celtTypeSymbol); } } catch (Exception ex) { if (EqtTrace.IsErrorEnabled) { EqtTrace.Error("Ignoring the exception while iterating type symbols:{0}", ex); } } finally { ReleaseComObject(ref enumTypeSymbols); ReleaseComObject(ref global); } } private IDiaSymbol GetTypeSymbol(string typeName, SymTagEnum symTag) { ValidateArg.NotNullOrEmpty(typeName, "typeName"); IDiaEnumSymbols enumSymbols = null; IDiaSymbol typeSymbol = null; IDiaSymbol global = null; uint celt; try { typeName = typeName.Replace('+', '.'); if (this.typeSymbols.ContainsKey(typeName)) { return this.typeSymbols[typeName]; } global = this.session.globalScope; global.findChildren(symTag, typeName, 0, out enumSymbols); enumSymbols.Next(1, out typeSymbol, out celt); #if DEBUG if (typeSymbol == null) { IDiaEnumSymbols enumAllSymbols = null; try { global.findChildren(symTag, null, 0, out enumAllSymbols); List<string> children = new List<string>(); IDiaSymbol childSymbol = null; uint fetchedCount = 0; while (true) { enumAllSymbols.Next(1, out childSymbol, out fetchedCount); if (fetchedCount == 0 || childSymbol == null) { break; } children.Add(childSymbol.name); ReleaseComObject(ref childSymbol); } Debug.Assert(children.Count > 0); } finally { ReleaseComObject(ref enumAllSymbols); } } #endif } finally { ReleaseComObject(ref enumSymbols); ReleaseComObject(ref global); } if (null != typeSymbol) { this.typeSymbols[typeName] = typeSymbol; } return typeSymbol; } private IDiaSymbol GetMethodSymbol(IDiaSymbol typeSymbol, string methodName) { ValidateArg.NotNull(typeSymbol, "typeSymbol"); ValidateArg.NotNullOrEmpty(methodName, "methodName"); IDiaEnumSymbols enumSymbols = null; IDiaSymbol methodSymbol = null; Dictionary<string, IDiaSymbol> methodSymbolsForType; try { if (this.methodSymbols.ContainsKey(typeSymbol.name)) { methodSymbolsForType = this.methodSymbols[typeSymbol.name]; if (methodSymbolsForType.ContainsKey(methodName)) { return methodSymbolsForType[methodName]; } } else { methodSymbolsForType = new Dictionary<string, IDiaSymbol>(); this.methodSymbols[typeSymbol.name] = methodSymbolsForType; } typeSymbol.findChildren(SymTagEnum.SymTagFunction, methodName, 0, out enumSymbols); uint celtFetched; enumSymbols.Next(1, out methodSymbol, out celtFetched); #if DEBUG if (methodSymbol == null) { IDiaEnumSymbols enumAllSymbols = null; try { typeSymbol.findChildren(SymTagEnum.SymTagFunction, null, 0, out enumAllSymbols); List<string> children = new List<string>(); IDiaSymbol childSymbol = null; uint fetchedCount = 0; while (true) { enumAllSymbols.Next(1, out childSymbol, out fetchedCount); if (fetchedCount == 0 || childSymbol == null) { break; } children.Add(childSymbol.name); ReleaseComObject(ref childSymbol); } Debug.Assert(children.Count > 0); } finally { ReleaseComObject(ref enumAllSymbols); } } #endif } finally { ReleaseComObject(ref enumSymbols); } if (null != methodSymbol) { methodSymbolsForType[methodName] = methodSymbol; } return methodSymbol; } /// <summary> /// Update the method symbol cache. /// </summary> private static void UpdateMethodSymbolCache(string methodName, IDiaSymbol methodSymbol, Dictionary<string, IDiaSymbol> methodSymbolCache) { Debug.Assert(!string.IsNullOrEmpty(methodName), "MethodName cannot be empty."); Debug.Assert(methodSymbol != null, "Method symbol cannot be null."); Debug.Assert(methodSymbolCache != null, "Method symbol cache cannot be null."); // #827589, In case a type has overloaded methods, then there could be a method already in the // cache which should be disposed. IDiaSymbol oldSymbol; if (methodSymbolCache.TryGetValue(methodName, out oldSymbol)) { ReleaseComObject(ref oldSymbol); } methodSymbolCache[methodName] = methodSymbol; } private static void ReleaseComObject<T>(ref T obj) where T : class { if (obj != null) { Marshal.FinalReleaseComObject(obj); obj = null; } } private string GetManifestFileForRegFreeCom() { var currentDirectory = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location); var manifestFieName = string.Empty; if (IntPtr.Size == 4) { manifestFieName = ManifestFileNameX86; } else if (IntPtr.Size == 8) { manifestFieName = ManifestFileNameX64; } var manifestFile = Path.Combine(currentDirectory, manifestFieName); if (!File.Exists(manifestFile)) { throw new TestPlatformException(string.Format("Could not find the manifest file {0} for Registry free Com registration.", manifestFile)); } return manifestFile; } private void Dispose(bool disposing) { if (!this.isDisposed) { if (disposing) { foreach (Dictionary<string, IDiaSymbol> methodSymbolsForType in this.methodSymbols.Values) { foreach (IDiaSymbol methodSymbol in methodSymbolsForType.Values) { IDiaSymbol symToRelease = methodSymbol; ReleaseComObject(ref symToRelease); } methodSymbolsForType.Clear(); } this.methodSymbols.Clear(); this.methodSymbols = null; foreach (IDiaSymbol typeSymbol in this.typeSymbols.Values) { IDiaSymbol symToRelease = typeSymbol; ReleaseComObject(ref symToRelease); } this.typeSymbols.Clear(); this.typeSymbols = null; ReleaseComObject(ref this.session); ReleaseComObject(ref this.source); } this.isDisposed = true; } } } #endif }
1
11,393
Should be a space between line
microsoft-vstest
.cs
@@ -6,7 +6,14 @@ public static byte[] GeneratePrivateKey() { - byte[] bytes = new byte[32]; + var bytes = new byte[32]; + SecureRandom.GetBytes(bytes); + return bytes; + } + + public static byte[] GenerateRandomBytes(int lenght) + { + var bytes = new byte[lenght]; SecureRandom.GetBytes(bytes); return bytes; }
1
namespace Nevermind.Core.Crypto { public static class Random { private static readonly System.Security.Cryptography.RandomNumberGenerator SecureRandom = new System.Security.Cryptography.RNGCryptoServiceProvider(); public static byte[] GeneratePrivateKey() { byte[] bytes = new byte[32]; SecureRandom.GetBytes(bytes); return bytes; } } }
1
22,272
if not behind interface then equally we can use SecureRandom.GetBytes directly, otherwise let us push it behind ISecureRandom so we can test with this class wherever used
NethermindEth-nethermind
.cs
@@ -1,4 +1,4 @@ -<figure class="video_tutorial card"> +<figure class="video_tutorial tile"> <%= link_to video_tutorial, title: "The #{video_tutorial.name} online video tutorial details" do %> <h5>Video Tutorial</h5> <%= image_tag(video_tutorial.product_image.url) %>
1
<figure class="video_tutorial card"> <%= link_to video_tutorial, title: "The #{video_tutorial.name} online video tutorial details" do %> <h5>Video Tutorial</h5> <%= image_tag(video_tutorial.product_image.url) %> <h4><%= video_tutorial.name %></h4> <p><%= video_tutorial.tagline %></p> <% end %> </figure>
1
13,403
Probably will undo this change for now, as I'm just targeting Weekly Iteration in this PR.
thoughtbot-upcase
rb
@@ -53,8 +53,6 @@ static size_t expectedNumberOfContextVariables; static int64_t expectedContextVariableValue; -static std::unordered_map<size_t, Kokkos::Tools::Experimental::SetOrRange> - candidate_value_map; int main() { Kokkos::initialize();
1
/* //@HEADER // ************************************************************************ // // Kokkos v. 3.0 // Copyright (2020) National Technology & Engineering // Solutions of Sandia, LLC (NTESS). // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact Christian R. Trott ([email protected]) // // ************************************************************************ //@HEADER */ // This file tests the primitives of the Tuning system #include <iostream> #include <Kokkos_Core.hpp> #include <stdexcept> #include <string> #include <unordered_map> #include <vector> static size_t expectedNumberOfContextVariables; static int64_t expectedContextVariableValue; static std::unordered_map<size_t, Kokkos::Tools::Experimental::SetOrRange> candidate_value_map; int main() { Kokkos::initialize(); { auto context = Kokkos::Tools::Experimental::get_new_context_id(); Kokkos::Tools::Experimental::VariableInfo contextVariableInfo; contextVariableInfo.category = Kokkos::Tools::Experimental:: StatisticalCategory::kokkos_value_categorical; contextVariableInfo.type = Kokkos::Tools::Experimental::ValueType::kokkos_value_int64; contextVariableInfo.valueQuantity = Kokkos::Tools::Experimental::CandidateValueType::kokkos_value_unbounded; Kokkos::Tools::Experimental::VariableInfo tuningVariableInfo; tuningVariableInfo.category = Kokkos::Tools::Experimental:: StatisticalCategory::kokkos_value_categorical; tuningVariableInfo.type = Kokkos::Tools::Experimental::ValueType::kokkos_value_int64; tuningVariableInfo.valueQuantity = Kokkos::Tools::Experimental::CandidateValueType::kokkos_value_set; std::vector<int64_t> candidate_value_vector = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; Kokkos::Tools::Experimental::SetOrRange allowed_values = Kokkos::Tools::Experimental::make_candidate_set( candidate_value_vector.size(), candidate_value_vector.data()); // test that ID's are transmitted to the tool Kokkos::Tools::Experimental::set_declare_output_type_callback( [](const char*, const size_t, Kokkos::Tools::Experimental::VariableInfo* info) { if (info->type != Kokkos::Tools::Experimental::ValueType::kokkos_value_int64) { throw(std::runtime_error("Tuning Variable has wrong type")); } }); Kokkos::Tools::Experimental::set_declare_input_type_callback( [](const char*, const size_t, Kokkos::Tools::Experimental::VariableInfo* info) { if (info->type != Kokkos::Tools::Experimental::ValueType::kokkos_value_int64) { throw(std::runtime_error("Context Variable has wrong type")); } }); tuningVariableInfo.candidates = allowed_values; auto contextVariableId = Kokkos::Tools::Experimental::declare_input_type( "kokkos.testing.context_variable", contextVariableInfo); auto tuningVariableId = Kokkos::Tools::Experimental::declare_output_type( "kokkos.testing.tuning_variable", tuningVariableInfo); // test that we correctly pass context values, and receive tuning variables // back in return Kokkos::Tools::Experimental::VariableValue contextValues[] = { Kokkos::Tools::Experimental::make_variable_value(contextVariableId, int64_t(0))}; Kokkos::Tools::Experimental::set_input_values(context, 1, contextValues); Kokkos::Tools::Experimental::set_request_output_values_callback( [](const size_t, const size_t, const Kokkos::Tools::Experimental::VariableValue* context_values, const size_t, Kokkos::Tools::Experimental::VariableValue* tuning_values) { auto candidate_values = tuning_values[0].metadata->candidates; if (context_values[0].value.int_value != expectedContextVariableValue) { throw std::runtime_error( "Context variables not correctly passed to tuning callbacks"); } int tuningVariableSetSize = candidate_values.set.size; std::cout << "Set of size " << tuningVariableSetSize << std::endl; // tuning methodology via https://xkcd.com/221/ tuning_values[0].value.int_value = candidate_values.set.values.int_value[4 % tuningVariableSetSize]; }); Kokkos::Tools::Experimental::VariableValue tuningValues[] = { Kokkos::Tools::Experimental::make_variable_value(tuningVariableId, int64_t(0))}; Kokkos::Tools::Experimental::request_output_values(context, 1, tuningValues); std::cout << tuningValues[0].value.int_value << "," << candidate_value_vector[4] << std::endl; if (tuningValues[0].value.int_value != candidate_value_vector[4]) { throw std::runtime_error("Tuning value return is incorrect"); } Kokkos::Tools::Experimental::end_context(context); // test nested contexts auto outerContext = Kokkos::Tools::Experimental::get_new_context_id(); auto innerContext = Kokkos::Tools::Experimental::get_new_context_id(); Kokkos::Tools::Experimental::VariableInfo secondContextVariableInfo; secondContextVariableInfo.category = Kokkos::Tools::Experimental:: StatisticalCategory::kokkos_value_categorical; secondContextVariableInfo.type = Kokkos::Tools::Experimental::ValueType::kokkos_value_int64; secondContextVariableInfo.valueQuantity = Kokkos::Tools::Experimental::CandidateValueType::kokkos_value_unbounded; auto secondContextVariableId = Kokkos::Tools::Experimental::declare_output_type( "kokkos.testing.second_context_variable", secondContextVariableInfo); Kokkos::Tools::Experimental::VariableValue contextValueTwo[] = { Kokkos::Tools::Experimental::make_variable_value( secondContextVariableId, int64_t(1))}; Kokkos::Tools::Experimental::set_request_output_values_callback( [](const size_t, const size_t num_context_variables, const Kokkos::Tools::Experimental::VariableValue*, const size_t, Kokkos::Tools::Experimental::VariableValue*) { std::cout << "Expect " << expectedNumberOfContextVariables << ", have " << num_context_variables << std::endl; if (num_context_variables != expectedNumberOfContextVariables) { throw( std::runtime_error("Incorrect number of context variables in " "nested tuning contexts")); } }); Kokkos::Tools::Experimental::set_input_values(outerContext, 1, contextValues); expectedNumberOfContextVariables = 1; Kokkos::Tools::Experimental::request_output_values(outerContext, 1, tuningValues); Kokkos::Tools::Experimental::set_input_values(innerContext, 1, contextValueTwo); expectedNumberOfContextVariables = 2; Kokkos::Tools::Experimental::request_output_values(innerContext, 1, tuningValues); } // end Kokkos block Kokkos::finalize(); }
1
25,537
Unused parameter warning? How comes we did not catch that before? In any case please open another PR for this.
kokkos-kokkos
cpp
@@ -60,9 +60,12 @@ def average_precision(recalls, precisions, mode='area'): def tpfp_imagenet(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, + gt_bboxes_group_of=None, default_iou_thr=0.5, + ioa_thr=None, area_ranges=None, - use_legacy_coordinate=False): + use_legacy_coordinate=False, + use_group_of=False): """Check if detected bboxes are true positive or false positive. Args:
1
# Copyright (c) OpenMMLab. All rights reserved. from multiprocessing import Pool import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .bbox_overlaps import bbox_overlaps from .class_names import get_classes def average_precision(recalls, precisions, mode='area'): """Calculate average precision (for single or multiple scales). Args: recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) mode (str): 'area' or '11points', 'area' means calculating the area under precision-recall curve, '11points' means calculating the average precision of recalls at [0, 0.1, ..., 1] Returns: float or ndarray: calculated average precision """ no_scale = False if recalls.ndim == 1: no_scale = True recalls = recalls[np.newaxis, :] precisions = precisions[np.newaxis, :] assert recalls.shape == precisions.shape and recalls.ndim == 2 num_scales = recalls.shape[0] ap = np.zeros(num_scales, dtype=np.float32) if mode == 'area': zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) ones = np.ones((num_scales, 1), dtype=recalls.dtype) mrec = np.hstack((zeros, recalls, ones)) mpre = np.hstack((zeros, precisions, zeros)) for i in range(mpre.shape[1] - 1, 0, -1): mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) for i in range(num_scales): ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] ap[i] = np.sum( (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) elif mode == '11points': for i in range(num_scales): for thr in np.arange(0, 1 + 1e-3, 0.1): precs = precisions[i, recalls[i, :] >= thr] prec = precs.max() if precs.size > 0 else 0 ap[i] += prec ap /= 11 else: raise ValueError( 'Unrecognized mode, only "area" and "11points" are supported') if no_scale: ap = ap[0] return ap def tpfp_imagenet(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, default_iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None default_iou_thr (float): IoU threshold to be considered as matched for medium and large bboxes (small ones have special rules). Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=np.bool), np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp # of a certain scale. tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate) gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), default_iou_thr) # sort all detections by scores in descending order sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = gt_w * gt_h gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: max_iou = -1 matched_gt = -1 # find best overlapped available gt for j in range(num_gts): # different from PASCAL VOC: allow finding other gts if the # best overlapped ones are already matched by other det bboxes if gt_covered[j]: continue elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: max_iou = ious[i, j] matched_gt = j # there are 4 cases for a det bbox: # 1. it matches a gt, tp = 1, fp = 0 # 2. it matches an ignored gt, tp = 0, fp = 0 # 3. it matches no gt and within area range, tp = 0, fp = 1 # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 if matched_gt >= 0: gt_covered[matched_gt] = 1 if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): tp[k, i] = 1 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def tpfp_default(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=np.bool), np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of # a certain scale tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) # if there is no gt bboxes in this image, then all det bboxes # within area range are false positives if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) # for each det, the max iou with all gts ious_max = ious.max(axis=1) # for each det, which gt overlaps most with it ious_argmax = ious.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: if ious_max[i] >= iou_thr: matched_gt = ious_argmax[i] if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not gt_covered[matched_gt]: gt_covered[matched_gt] = True tp[k, i] = 1 else: fp[k, i] = 1 # otherwise ignore this detected bbox, tp = 0, fp = 0 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def get_cls_results(det_results, annotations, class_id): """Get det results and gt information of a certain class. Args: det_results (list[list]): Same as `eval_map()`. annotations (list[dict]): Same as `eval_map()`. class_id (int): ID of a specific class. Returns: tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes """ cls_dets = [img_res[class_id] for img_res in det_results] cls_gts = [] cls_gts_ignore = [] for ann in annotations: gt_inds = ann['labels'] == class_id cls_gts.append(ann['bboxes'][gt_inds, :]) if ann.get('labels_ignore', None) is not None: ignore_inds = ann['labels_ignore'] == class_id cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) else: cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) return cls_dets, cls_gts, cls_gts_ignore def eval_map(det_results, annotations, scale_ranges=None, iou_thr=0.5, dataset=None, logger=None, tpfp_fn=None, nproc=4, use_legacy_coordinate=False): """Evaluate mAP of a dataset. Args: det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. The outer list indicates images, and the inner list indicates per-class detected bboxes. annotations (list[dict]): Ground truth annotations where each item of the list indicates an image. Keys of annotations are: - `bboxes`: numpy array of shape (n, 4) - `labels`: numpy array of shape (n, ) - `bboxes_ignore` (optional): numpy array of shape (k, 4) - `labels_ignore` (optional): numpy array of shape (k, ) scale_ranges (list[tuple] | None): Range of scales to be evaluated, in the format [(min1, max1), (min2, max2), ...]. A range of (32, 64) means the area range between (32**2, 64**2). Default: None. iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. dataset (list[str] | str | None): Dataset name or dataset classes, there are minor differences in metrics for different datasets, e.g. "voc07", "imagenet_det", etc. Default: None. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. tpfp_fn (callable | None): The function used to determine true/ false positives. If None, :func:`tpfp_default` is used as default unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this case). If it is given as a function, then this function is used to evaluate tp & fp. Default None. nproc (int): Processes used for computing TP and FP. Default: 4. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple: (mAP, [dict, dict, ...]) """ assert len(det_results) == len(annotations) if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. num_imgs = len(det_results) num_scales = len(scale_ranges) if scale_ranges is not None else 1 num_classes = len(det_results[0]) # positive class num area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] if scale_ranges is not None else None) pool = Pool(nproc) eval_results = [] for i in range(num_classes): # get gt and det bboxes of this class cls_dets, cls_gts, cls_gts_ignore = get_cls_results( det_results, annotations, i) # choose proper function according to datasets to compute tp and fp if tpfp_fn is None: if dataset in ['det', 'vid']: tpfp_fn = tpfp_imagenet else: tpfp_fn = tpfp_default if not callable(tpfp_fn): raise ValueError( f'tpfp_fn has to be a function or None, but got {tpfp_fn}') # compute tp and fp for each image with multiple processes tpfp = pool.starmap( tpfp_fn, zip(cls_dets, cls_gts, cls_gts_ignore, [iou_thr for _ in range(num_imgs)], [area_ranges for _ in range(num_imgs)], [use_legacy_coordinate for _ in range(num_imgs)])) tp, fp = tuple(zip(*tpfp)) # calculate gt number of each scale # ignored gts or gts beyond the specific scale are not counted num_gts = np.zeros(num_scales, dtype=int) for j, bbox in enumerate(cls_gts): if area_ranges is None: num_gts[0] += bbox.shape[0] else: gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * ( bbox[:, 3] - bbox[:, 1] + extra_length) for k, (min_area, max_area) in enumerate(area_ranges): num_gts[k] += np.sum((gt_areas >= min_area) & (gt_areas < max_area)) # sort all det bboxes by score, also sort tp and fp cls_dets = np.vstack(cls_dets) num_dets = cls_dets.shape[0] sort_inds = np.argsort(-cls_dets[:, -1]) tp = np.hstack(tp)[:, sort_inds] fp = np.hstack(fp)[:, sort_inds] # calculate recall and precision with tp and fp tp = np.cumsum(tp, axis=1) fp = np.cumsum(fp, axis=1) eps = np.finfo(np.float32).eps recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) precisions = tp / np.maximum((tp + fp), eps) # calculate AP if scale_ranges is None: recalls = recalls[0, :] precisions = precisions[0, :] num_gts = num_gts.item() mode = 'area' if dataset != 'voc07' else '11points' ap = average_precision(recalls, precisions, mode) eval_results.append({ 'num_gts': num_gts, 'num_dets': num_dets, 'recall': recalls, 'precision': precisions, 'ap': ap }) pool.close() if scale_ranges is not None: # shape (num_classes, num_scales) all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) all_num_gts = np.vstack( [cls_result['num_gts'] for cls_result in eval_results]) mean_ap = [] for i in range(num_scales): if np.any(all_num_gts[:, i] > 0): mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) else: mean_ap.append(0.0) else: aps = [] for cls_result in eval_results: if cls_result['num_gts'] > 0: aps.append(cls_result['ap']) mean_ap = np.array(aps).mean().item() if aps else 0.0 print_map_summary( mean_ap, eval_results, dataset, area_ranges, logger=logger) return mean_ap, eval_results def print_map_summary(mean_ap, results, dataset=None, scale_ranges=None, logger=None): """Print mAP and results of each class. A table will be printed to show the gts/dets/recall/AP of each class and the mAP. Args: mean_ap (float): Calculated from `eval_map()`. results (list[dict]): Calculated from `eval_map()`. dataset (list[str] | str | None): Dataset name or dataset classes. scale_ranges (list[tuple] | None): Range of scales to be evaluated. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. """ if logger == 'silent': return if isinstance(results[0]['ap'], np.ndarray): num_scales = len(results[0]['ap']) else: num_scales = 1 if scale_ranges is not None: assert len(scale_ranges) == num_scales num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) aps = np.zeros((num_scales, num_classes), dtype=np.float32) num_gts = np.zeros((num_scales, num_classes), dtype=int) for i, cls_result in enumerate(results): if cls_result['recall'].size > 0: recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] aps[:, i] = cls_result['ap'] num_gts[:, i] = cls_result['num_gts'] if dataset is None: label_names = [str(i) for i in range(num_classes)] elif mmcv.is_str(dataset): label_names = get_classes(dataset) else: label_names = dataset if not isinstance(mean_ap, list): mean_ap = [mean_ap] header = ['class', 'gts', 'dets', 'recall', 'ap'] for i in range(num_scales): if scale_ranges is not None: print_log(f'Scale range {scale_ranges[i]}', logger=logger) table_data = [header] for j in range(num_classes): row_data = [ label_names[j], num_gts[i, j], results[j]['num_dets'], f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' ] table_data.append(row_data) table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) table = AsciiTable(table_data) table.inner_footing_row_border = True print_log('\n' + table.table, logger=logger)
1
26,137
I recommend move all openimage related logic to a new function
open-mmlab-mmdetection
py
@@ -12,6 +12,8 @@ const ( type Cgroup struct { Name string `json:"name"` + // indicates that this is an externally passed, fully created cgroup + ExternalCgroup bool `json:"external_cgroup"` // name of parent cgroup or slice Parent string `json:"parent"`
1
// +build linux freebsd package configs type FreezerState string const ( Undefined FreezerState = "" Frozen FreezerState = "FROZEN" Thawed FreezerState = "THAWED" ) type Cgroup struct { Name string `json:"name"` // name of parent cgroup or slice Parent string `json:"parent"` // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. AllowAllDevices bool `json:"allow_all_devices"` AllowedDevices []*Device `json:"allowed_devices"` DeniedDevices []*Device `json:"denied_devices"` // Memory limit (in bytes) Memory int64 `json:"memory"` // Memory reservation or soft_limit (in bytes) MemoryReservation int64 `json:"memory_reservation"` // Total memory usage (memory + swap); set `-1' to disable swap MemorySwap int64 `json:"memory_swap"` // Kernel memory limit (in bytes) KernelMemory int64 `json:"kernel_memory"` // CPU shares (relative weight vs. other containers) CpuShares int64 `json:"cpu_shares"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. CpuQuota int64 `json:"cpu_quota"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. CpuPeriod int64 `json:"cpu_period"` // How many time CPU will use in realtime scheduling (in usecs). CpuRtRuntime int64 `json:"cpu_quota"` // CPU period to be used for realtime scheduling (in usecs). CpuRtPeriod int64 `json:"cpu_period"` // CPU to use CpusetCpus string `json:"cpuset_cpus"` // MEM to use CpusetMems string `json:"cpuset_mems"` // Specifies per cgroup weight, range is from 10 to 1000. BlkioWeight uint16 `json:"blkio_weight"` // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only BlkioLeafWeight uint16 `json:"blkio_leaf_weight"` // Weight per cgroup per device, can override BlkioWeight. BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"` // IO read rate limit per cgroup per device, bytes per second. BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"` // IO write rate limit per cgroup per divice, bytes per second. BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"` // IO read rate limit per cgroup per device, IO per second. BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"` // IO write rate limit per cgroup per device, IO per second. BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"` // set the freeze value for the process Freezer FreezerState `json:"freezer"` // Hugetlb limit (in bytes) HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"` // Parent slice to use for systemd TODO: remove in favor or parent Slice string `json:"slice"` // Whether to disable OOM Killer OomKillDisable bool `json:"oom_kill_disable"` // Tuning swappiness behaviour per cgroup MemorySwappiness int64 `json:"memory_swappiness"` // Set priority of network traffic for container NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"` // Set class identifier for container's network packets NetClsClassid string `json:"net_cls_classid"` }
1
8,796
We shouldn't have a bool for this. If there is a path passed, then we use it. Resource may or may not be empty.
opencontainers-runc
go
@@ -0,0 +1,16 @@ +<ul class="teachers"> + <li> + <%= teacher_gravatar section_teacher.teacher, 92 %> + <h2> + <span> + <%= t '.header', + city: section_teacher.section.city, + count: @section_teachers.size %> + </span> + <%= section_teacher.teacher.name %> + </h2> + <section class="bio"> + <%= raw textilize(section_teacher.teacher.bio) %> + </section> + </li> +</ul>
1
1
6,609
I like how you used the count on the translate call to handle this.
thoughtbot-upcase
rb
@@ -139,11 +139,13 @@ class QueryBuilder implements QueryBuilderInterface $finalQuery->setString($this->getNormalizedQueryString($query)); } $string = $finalQuery->getString() ?: '*:*'; - // Highlighting is enabled if we have a field list set. $highlight = !empty($this->fieldsToHighlight); if ($handler = $this->getSearchHandler($finalQuery->getHandler(), $string)) { + if ($handler->hasDismax()) { + $string = $handler->prepareDismaxQueryString($string); + } if (!$handler->hasExtendedDismax() && $this->getLuceneHelper()->containsAdvancedLuceneSyntax($string) ) {
1
<?php /** * SOLR QueryBuilder. * * PHP version 7 * * Copyright (C) Villanova University 2010. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @category VuFind * @package Search * @author Andrew S. Nagy <[email protected]> * @author David Maus <[email protected]> * @author Demian Katz <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org */ namespace VuFindSearch\Backend\Solr; use VuFindSearch\ParamBag; use VuFindSearch\Query\AbstractQuery; use VuFindSearch\Query\Query; use VuFindSearch\Query\QueryGroup; /** * SOLR QueryBuilder. * * @category VuFind * @package Search * @author Andrew S. Nagy <[email protected]> * @author David Maus <[email protected]> * @author Demian Katz <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org */ class QueryBuilder implements QueryBuilderInterface { /** * Default dismax handler (if no DismaxHandler set in specs). * * @var string */ protected $defaultDismaxHandler; /** * Search specs. * * @var array */ protected $specs = []; /** * Search specs for exact searches. * * @var array */ protected $exactSpecs = []; /** * Solr fields to highlight. Also serves as a flag for whether to perform * highlight-specific behavior; if the field list is empty, highlighting is * skipped. * * @var string */ protected $fieldsToHighlight = ''; /** * Should we create the spellcheck.q parameter when appropriate? * * @var bool */ protected $createSpellingQuery = false; /** * Lucene syntax helper * * @var LuceneSyntaxHelper */ protected $luceneHelper = null; /** * Constructor. * * @param array $specs Search handler specifications * @param string $defaultDismaxHandler Default dismax handler (if no * DismaxHandler set in specs). * * @return void */ public function __construct(array $specs = [], $defaultDismaxHandler = 'dismax' ) { $this->defaultDismaxHandler = $defaultDismaxHandler; $this->setSpecs($specs); } /// Public API /** * Return SOLR search parameters based on a user query and params. * * @param AbstractQuery $query User query * * @return ParamBag */ public function build(AbstractQuery $query) { $params = new ParamBag(); // Add spelling query if applicable -- note that we must set this up before // we process the main query in order to avoid unwanted extra syntax: if ($this->createSpellingQuery) { $params->set( 'spellcheck.q', $this->getLuceneHelper()->extractSearchTerms($query->getAllTerms()) ); } if ($query instanceof QueryGroup) { $finalQuery = $this->reduceQueryGroup($query); } else { // Clone the query to avoid modifying the original user-visible query $finalQuery = clone $query; $finalQuery->setString($this->getNormalizedQueryString($query)); } $string = $finalQuery->getString() ?: '*:*'; // Highlighting is enabled if we have a field list set. $highlight = !empty($this->fieldsToHighlight); if ($handler = $this->getSearchHandler($finalQuery->getHandler(), $string)) { if (!$handler->hasExtendedDismax() && $this->getLuceneHelper()->containsAdvancedLuceneSyntax($string) ) { $string = $this->createAdvancedInnerSearchString($string, $handler); if ($handler->hasDismax()) { $oldString = $string; $string = $handler->createBoostQueryString($string); // If a boost was added, we don't want to highlight based on // the boost query, so we should use the non-boosted version: if ($highlight && $oldString != $string) { $params->set('hl.q', $oldString); } } } elseif ($handler->hasDismax()) { $params->set('qf', implode(' ', $handler->getDismaxFields())); $params->set('qt', $handler->getDismaxHandler()); foreach ($handler->getDismaxParams() as $param) { $params->add(reset($param), next($param)); } if ($handler->hasFilterQuery()) { $params->add('fq', $handler->getFilterQuery()); } } else { $string = $handler->createSimpleQueryString($string); } } // Set an appropriate highlight field list when applicable: if ($highlight) { $filter = $handler ? $handler->getAllFields() : []; $params->add('hl.fl', $this->getFieldsToHighlight($filter)); } $params->set('q', $string); return $params; } /** * Get list of fields to highlight, filtered by array. * * @param array $filter Field list to use as a filter. * * @return string */ protected function getFieldsToHighlight(array $filter = []) { // No filter? Return unmodified default: if (empty($filter)) { return $this->fieldsToHighlight; } // Account for possibility of comma OR space delimiters: $fields = array_map('trim', preg_split('/[, ]/', $this->fieldsToHighlight)); // Wildcard in field list? Return filter as-is; otherwise, use intersection. $list = in_array('*', $fields) ? $filter : array_intersect($fields, $filter); return implode(',', $list); } /** * Set list of fields to highlight, if any (or '*' for all). Set to an * empty string (the default) to completely disable highlighting-related * functionality. * * @param string $list Highlighting field list * * @return QueryBuilder */ public function setFieldsToHighlight($list) { $this->fieldsToHighlight = $list; return $this; } /** * Control whether or not the QueryBuilder should create a spellcheck.q * parameter. (Turned off by default). * * @param bool $enable Should spelling query generation be enabled? * * @return void */ public function setCreateSpellingQuery($enable) { $this->createSpellingQuery = $enable; } /** * Set query builder search specs. * * @param array $specs Search specs * * @return void */ public function setSpecs(array $specs) { foreach ($specs as $handler => $spec) { if (isset($spec['ExactSettings'])) { $this->exactSpecs[strtolower($handler)] = new SearchHandler( $spec['ExactSettings'], $this->defaultDismaxHandler ); unset($spec['ExactSettings']); } $this->specs[strtolower($handler)] = new SearchHandler($spec, $this->defaultDismaxHandler); } } /** * Get Lucene syntax helper * * @return LuceneSyntaxHelper */ public function getLuceneHelper() { if (null === $this->luceneHelper) { $this->luceneHelper = new LuceneSyntaxHelper(); } return $this->luceneHelper; } /** * Set Lucene syntax helper * * @param LuceneSyntaxHelper $helper Lucene syntax helper * * @return void */ public function setLuceneHelper(LuceneSyntaxHelper $helper) { $this->luceneHelper = $helper; } /// Internal API /** * Return named search handler. * * @param string $handler Search handler name * @param string $searchString Search query * * @return SearchHandler|null */ protected function getSearchHandler($handler, $searchString) { $handler = $handler ? strtolower($handler) : $handler; if ($handler) { // Since we will rarely have exactSpecs set, it is less expensive // to check for a handler first before doing multiple string // operations to determine eligibility for exact handling. if (isset($this->exactSpecs[$handler])) { $searchString = isset($searchString) ? trim($searchString) : ''; if (strlen($searchString) > 1 && substr($searchString, 0, 1) == '"' && substr($searchString, -1, 1) == '"' ) { return $this->exactSpecs[$handler]; } } if (isset($this->specs[$handler])) { return $this->specs[$handler]; } } return null; } /** * Reduce query group a single query. * * @param QueryGroup $group Query group to reduce * * @return Query */ protected function reduceQueryGroup(QueryGroup $group) { $searchString = $this->reduceQueryGroupComponents($group); $searchHandler = $group->getReducedHandler(); return new Query($searchString, $searchHandler); } /** * Reduce components of query group to a search string of a simple query. * * This function implements the recursive reduction of a query group. * * @param AbstractQuery $component Component * * @return string * * @see self::reduceQueryGroup() */ protected function reduceQueryGroupComponents(AbstractQuery $component) { if ($component instanceof QueryGroup) { $reduced = array_map( [$this, 'reduceQueryGroupComponents'], $component->getQueries() ); $searchString = $component->isNegated() ? 'NOT ' : ''; $reduced = array_filter( $reduced, function ($s) { return '' !== $s; } ); if ($reduced) { $searchString .= sprintf( '(%s)', implode(" {$component->getOperator()} ", $reduced) ); } } else { $searchString = $this->getNormalizedQueryString($component); $searchHandler = $this->getSearchHandler( $component->getHandler(), $searchString ); if ($searchHandler && '' !== $searchString) { $searchString = $this->createSearchString($searchString, $searchHandler); } } return $searchString; } /** * Return search string based on input and handler. * * @param string $string Input search string * @param SearchHandler $handler Search handler * * @return string */ protected function createSearchString($string, SearchHandler $handler = null) { $advanced = $this->getLuceneHelper()->containsAdvancedLuceneSyntax($string); if (null === $string) { return ''; } if ($advanced && $handler) { return $handler->createAdvancedQueryString($string); } elseif ($handler) { return $handler->createSimpleQueryString($string); } else { return $string; } } /** * If the query ends in a non-escaped question mark, the user may not really * intend to use the question mark as a wildcard -- let's account for that * possibility. * * @param string $string Search query to adjust * * @return string */ protected function fixTrailingQuestionMarks($string) { // Treat colon and whitespace as word separators -- in either case, we // should add parentheses for accuracy. $multiword = preg_match('/[^\s][\s:]+[^\s]/', $string); $callback = function ($matches) use ($multiword) { // Make sure all question marks are properly escaped (first unescape // any that are already escaped to prevent double-escapes, then escape // all of them): $s = $matches[1]; $escaped = str_replace('?', '\?', str_replace('\?', '?', $s)); $s = "($s) OR ($escaped)"; if ($multiword) { $s = "($s) "; } return $s; }; // Use a lookahead to skip matches found within quoted phrases. $lookahead = '(?=(?:[^\"]*+\"[^\"]*+\")*+[^\"]*+$)'; $string = preg_replace_callback( '/([^\s:()]+\?)(\s|$)' . $lookahead . '/', $callback, $string ); return rtrim($string); } /** * Given a Query object, return a fully normalized version of the query string. * * @param Query $query Query object * * @return string */ protected function getNormalizedQueryString($query) { return $this->fixTrailingQuestionMarks( $this->getLuceneHelper()->normalizeSearchString( $query->getString() ) ); } /** * Return advanced inner search string based on input and handler. * * @param string $string Input search string * @param SearchHandler $handler Search handler * * @return string */ protected function createAdvancedInnerSearchString($string, SearchHandler $handler ) { // Special case -- if the user wants all records but the current handler // has a filter query, apply the filter query: if (trim($string) === '*:*' && $handler && $handler->hasFilterQuery()) { return $handler->getFilterQuery(); } // If the query already includes field specifications, we can't easily // apply it to other fields through our defined handlers, so we'll leave // it as-is: if (strstr($string, ':')) { return $string; } return $handler ? $handler->createAdvancedQueryString($string) : $string; } }
1
28,037
Do we need to do this from the outside, or is there a spot in the handler code where we can do this automatically? (I haven't studied it closely yet... just wondering if you've already thought it through or if it's worth taking a closer look).
vufind-org-vufind
php
@@ -0,0 +1,9 @@ +package com.fsck.k9; + +import java.util.*; + +public class SputnikTest { + public void sputnikTest() { + return; + } +}
1
1
13,776
[Checkstyle] INFO: Using the '._' form of import should be avoided - java.util._.
k9mail-k-9
java
@@ -42,6 +42,9 @@ class BaseWebTest(object): def get_app_settings(self, additional_settings=None): settings = cliquet_support.DEFAULT_SETTINGS.copy() + settings['cliquet.cache_backend'] = 'cliquet.cache.memory' + settings['cliquet.storage_backend'] = 'cliquet.storage.memory' + settings['cliquet.permission_backend'] = 'cliquet.permission.redis' settings['cliquet.project_name'] = 'cloud storage' settings['cliquet.project_docs'] = 'https://kinto.rtfd.org/' settings['multiauth.authorization_policy'] = (
1
try: import unittest2 as unittest except ImportError: import unittest # NOQA import webtest from cliquet import utils from pyramid.security import IAuthorizationPolicy from zope.interface import implementer from cliquet.tests import support as cliquet_support from kinto import main as testapp MINIMALIST_BUCKET = {'data': dict()} MINIMALIST_COLLECTION = {'data': dict()} MINIMALIST_GROUP = {'data': dict(members=['fxa:user'])} MINIMALIST_RECORD = {'data': dict(name="Hulled Barley", type="Whole Grain")} USER_PRINCIPAL = 'basicauth_8a931a10fc88ab2f6d1cc02a07d3a81b5d4768f' \ '6f13e85c5d8d4180419acb1b4' class BaseWebTest(object): def __init__(self, *args, **kwargs): super(BaseWebTest, self).__init__(*args, **kwargs) self.app = self._get_test_app() self.principal = USER_PRINCIPAL self.storage = self.app.app.registry.storage self.permission = self.app.app.registry.permission self.permission.initialize_schema() self.storage.initialize_schema() self.headers = { 'Content-Type': 'application/json', } self.headers.update(get_user_headers('mat')) def _get_test_app(self, settings=None): app = webtest.TestApp(testapp({}, **self.get_app_settings(settings))) app.RequestClass = cliquet_support.get_request_class(prefix="v1") return app def get_app_settings(self, additional_settings=None): settings = cliquet_support.DEFAULT_SETTINGS.copy() settings['cliquet.project_name'] = 'cloud storage' settings['cliquet.project_docs'] = 'https://kinto.rtfd.org/' settings['multiauth.authorization_policy'] = ( 'kinto.tests.support.AllowAuthorizationPolicy') if additional_settings is not None: settings.update(additional_settings) return settings def tearDown(self): super(BaseWebTest, self).tearDown() self.storage.flush() self.permission.flush() def add_permission(self, object_id, permission): self.permission.add_principal_to_ace( object_id, permission, self.principal) @implementer(IAuthorizationPolicy) class AllowAuthorizationPolicy(object): def permits(self, context, principals, permission): if USER_PRINCIPAL in principals: return True return False def principals_allowed_by_permission(self, context, permission): raise NotImplementedError() # PRAGMA NOCOVER def get_user_headers(user): credentials = "%s:secret" % user authorization = 'Basic {0}'.format(utils.encode64(credentials)) return { 'Authorization': authorization }
1
7,450
So, why everything is in memory but the permission backend?
Kinto-kinto
py
@@ -283,8 +283,9 @@ public class UiSetupWizardImplementation implements SetupWizard { */ @Override public String browse(String textToShow, String directory, List<String> allowedExtensions) { + final List<String> copiedAllowedExtensions = allowedExtensions != null ? List.copyOf(allowedExtensions) : null; return messageSender.runAndWait( - message -> setupUi.showBrowseStep(message, textToShow, new File(directory), allowedExtensions)); + message -> setupUi.showBrowseStep(message, textToShow, new File(directory), copiedAllowedExtensions)); } /**
1
/* * Copyright (C) 2015-2017 PÂRIS Quentin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package org.phoenicis.scripts.wizard; import org.apache.commons.io.IOUtils; import org.phoenicis.scripts.interpreter.ScriptException; import org.phoenicis.scripts.ui.*; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.StringWriter; import java.net.URI; import java.util.List; import java.util.Optional; import static org.phoenicis.configuration.localisation.Localisation.tr; public class UiSetupWizardImplementation implements SetupWizard { private final String title; private final InstallationType installationType; private Optional<URI> miniature; private final UiMessageSender messageSender; private final SetupUiFactory setupUiFactory; private SetupUi setupUi; private final String userHome; private final String applicationUserRoot; private final String applicationName; /** * constructor * @param title wizard title * @param miniature miniature for the setup wizard (usually miniature of the application which shall be installed) * @param installationType apps/engines * @param messageSender * @param setupUiFactory * @param userHome * @param applicationUserRoot * @param applicationName Phoenicis PlayOnLinux/PlayOnMac */ public UiSetupWizardImplementation( String title, Optional<URI> miniature, InstallationType installationType, UiMessageSender messageSender, SetupUiFactory setupUiFactory, String userHome, String applicationUserRoot, String applicationName) { this.title = title; this.miniature = miniature; this.installationType = installationType; this.messageSender = messageSender; this.setupUiFactory = setupUiFactory; this.userHome = userHome; this.applicationUserRoot = applicationUserRoot; this.applicationName = applicationName; } /** * Creates the window */ @Override public void init() { messageSender.run( () -> setupUi = setupUiFactory.createSetupWindow(this.title, this.miniature, this.installationType)); } /** * Set the left image text * * @param leftImageText text for the left image */ @Override public void setLeftImageText(String leftImageText) { setupUi.setLeftImageText(leftImageText); } /** * Set the top image * * @param topImage URL of the top image */ @Override public void setTopImage(String topImage) throws IOException { setupUi.setTopImage(new File(topImage)); } /** * Closes the setupUi */ @Override public void close() { messageSender.run(() -> { setupUi.close(); return null; }); } /** * Shows a simple showSimpleMessageStep * * @param textToShow the text to showRightView */ @Override public Void message(String textToShow) { return messageSender.runAndWait(message -> setupUi.showSimpleMessageStep(message, textToShow)); } /** * Show a free script presentation * * @param htmlToShow the free presentation HTML to showRightView */ @Override public Void presentationHtml(String htmlToShow) { return messageSender.runAndWait(message -> setupUi.showHtmlPresentationStep(message, htmlToShow)); } /** * Show a default script presentation * * @param programName the name of the program * @param programEditor the editor of the program * @param applicationHomepage homepage of the application * @param scriptorName the scriptor name */ @Override public Void presentation(String programName, String programEditor, String applicationHomepage, String scriptorName) { final String htmlToShow = "<body>" + tr("Installation wizard for {0}", programName) + ".<br><br>" + tr("Program by {0}", programEditor) + "<br><br>" + tr("For more information about this program, visit:") + String.format("<br><a href=\"%1$s\">%1$s</a><br><br>", applicationHomepage) + tr("Installation script by {0}", scriptorName) + "<br><br>" + "<br><br>" + tr("{0} will be installed in: {1}", programName, applicationUserRoot) + "<br><br>" + tr("{0} is not responsible for anything that might happen as a result of using these scripts.", applicationName) + "<br><br>" + tr("Click Next to start.") + "</body>"; return messageSender.runAndWait(message -> setupUi.showHtmlPresentationStep(message, htmlToShow)); } /** * Show a free script presentation * * @param textToShow the free presentation text to showRightView */ @Override public Void presentation(String textToShow) { return messageSender.runAndWait(message -> setupUi.showPresentationStep(message, textToShow)); } /** * Show the content of a licence file * * @param textToShow a message above the licence * @param licenceFile the licence file to display (with 'from java.io import File') */ @Override public Void licenceFile(String textToShow, File licenceFile) { try { try (final FileInputStream content = new FileInputStream(licenceFile)) { final StringWriter writer = new StringWriter(); IOUtils.copy(content, writer, "UTF-8"); return licence(textToShow, writer.toString()); } } catch (IOException e) { throw new ScriptException("Cannot acces the licence file", e); } } /** * Show the content of a licence file * * @param textToShow a message above the licence * @param licenceFilePath the path of the licence file to display */ @Override public Void licenceFile(String textToShow, String licenceFilePath) { return licenceFile(textToShow, new File(licenceFilePath)); } /** * Show a custom licence message * * @param textToShow a message above the licence * @param licenceText the licence text to showRightView */ @Override public Void licence(String textToShow, String licenceText) { return messageSender.runAndWait(message -> setupUi.showLicenceStep(message, textToShow, licenceText)); } /** * Ask the user to enter a value * * @param textToShow a text that will be shown * @return the value the user entered */ @Override public String textbox(String textToShow) { return this.textbox(textToShow, ""); } /** * Asks the user to enter a value * * @param textToShow a text that will be shown * @param defaultValue a default value * @return the value the user entered */ @Override public String textbox(String textToShow, String defaultValue) { return messageSender.runAndWait(message -> setupUi.showTextBoxStep(message, textToShow, defaultValue)); } /** * Displays a showMenuStep so that the user can make a choice * * @param textToShow a text that will be shown * @param menuItems a list containing the elements of the showMenuStep * @return the value the user entered (as string) */ @Override public MenuItem menu(String textToShow, List<String> menuItems) { return this.menu(textToShow, menuItems, ""); } /** * Displays a showMenuStep so that the user can make a choice * * @param textToShow a text that will be shown * @param menuItems a list containing the elements of the showMenuStep * @param defaultValue item which is selected by default * @return the value the user entered (as string) */ @Override public MenuItem menu(String textToShow, List<String> menuItems, String defaultValue) { final List<String> copiedMenuItems = List.copyOf(menuItems); return messageSender .runAndWait(message -> setupUi.showMenuStep(message, textToShow, copiedMenuItems, defaultValue)); } /** * Asks the user to choose a file a file * * @param textToShow text to show * @return The path of the file */ @Override public String browse(String textToShow) { return browse(textToShow, this.userHome, null); } /** * Ask the user to choose a file * * @param textToShow text to show * @param directory default directory to browse in * @param allowedExtensions A list containing allowed extensions. All extensions will be allowed if this parameter * is set to null * @return The path of the file */ @Override public String browse(String textToShow, String directory, List<String> allowedExtensions) { return messageSender.runAndWait( message -> setupUi.showBrowseStep(message, textToShow, new File(directory), allowedExtensions)); } /** * Displays a showSimpleMessageStep to the user with a waiting symbol, and releases the script just afterward * * @param textToShow a text that will be shown */ @Override public Void wait(String textToShow) { return messageSender.runAndWait(message -> setupUi.showSpinnerStep(message, textToShow)); } @Override public ProgressControl progressBar(String textToShow) { return messageSender.runAndWait(message -> setupUi.showProgressBar(message, textToShow)); } @Override public String getTitle() { return title; } @Override public BrowserControl createBrowser(String textToShow) { return messageSender.runAndWait(message -> setupUi.showBrowser(message, textToShow)); } }
1
13,907
Are you sure it should be null and not an empty List?
PhoenicisOrg-phoenicis
java
@@ -22,10 +22,12 @@ namespace Microsoft.DotNet.Build.Tasks // Additional Dependencies to add to the project.json. May Optionally contain a version. // Will Override dependencies present in the project if there is a conflict. // AdditionalDependencies required metadata: Name, Version - [Required] public ITaskItem[] AdditionalDependencies { get; set; } + // Framework section which the additional dependencies apply to. Empty is the default dependencies section. + public string[] Frameworks { get; set; } + public string BuildNumberOverrideStructureRegex { get; set; } // Permit overriding package versions found in project.json with custom build number version.
1
using Microsoft.Build.Utilities; using Microsoft.Build.Framework; using Newtonsoft.Json; using Newtonsoft.Json.Linq; using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text.RegularExpressions; using NuGet.Versioning; namespace Microsoft.DotNet.Build.Tasks { /// <summary> /// Parse a project.json, and add additional dependencies, then write a out new project.json. /// </summary> public class AddDependenciesToProjectJson : Task { [Required] public string VersionStructureRegex { get; set; } // Additional Dependencies to add to the project.json. May Optionally contain a version. // Will Override dependencies present in the project if there is a conflict. // AdditionalDependencies required metadata: Name, Version [Required] public ITaskItem[] AdditionalDependencies { get; set; } public string BuildNumberOverrideStructureRegex { get; set; } // Permit overriding package versions found in project.json with custom build number version. public string PackageBuildNumberOverride { get; set; } // Original package version which is used to seed the output project.json [Required] public string ProjectJson { get; set; } // The directory to put the generated project.json in [Required] public string OutputProjectJson { get; set; } private Regex versionStructureRegex; private Regex buildNumberOverrideStructureRegex; public override bool Execute() { if (!File.Exists(ProjectJson)) { Log.LogError("Cannot find specified project.json - '{0}'", ProjectJson); return false; } if (!string.IsNullOrWhiteSpace(PackageBuildNumberOverride)) { if(string.IsNullOrWhiteSpace(BuildNumberOverrideStructureRegex)) { Log.LogError("Missing required parameter. BuildNumberOverrideStructureRegex must be provided if PackageBuildNumberOverride is provided."); } buildNumberOverrideStructureRegex = new Regex(BuildNumberOverrideStructureRegex); if(!buildNumberOverrideStructureRegex.IsMatch(PackageBuildNumberOverride)) { Log.LogError("Invalid package version format: '{0}'", PackageBuildNumberOverride); return false; } } JObject projectRoot = ReadProject(ProjectJson); versionStructureRegex = new Regex(VersionStructureRegex); JObject dependencies = GenerateDependencies(projectRoot); projectRoot = UpdateProperty("dependencies", projectRoot, dependencies); WriteProject(projectRoot, OutputProjectJson); return true; } private static JObject ReadProject(string projectJsonPath) { using (TextReader projectFileReader = File.OpenText(projectJsonPath)) { var projectJsonReader = new JsonTextReader(projectFileReader); var serializer = new JsonSerializer(); return serializer.Deserialize<JObject>(projectJsonReader); } } // Generate the combines dependencies from the projectjson jObject and from AdditionalDependencies private JObject GenerateDependencies(JObject projectJsonRoot) { var originalDependenciesList = new List<JToken>(); var returnDependenciesList = new List<JToken>(); originalDependenciesList = projectJsonRoot["dependencies"]?.Children().ToList(); // No PackageBuildNumberOverride was specified, so try to find one to associate with our AdditionalDependencies PackageBuildNumberOverride = PackageBuildNumberOverride ?? DeriveBuildNumber(originalDependenciesList); // Update versions in dependencies foreach(JProperty property in originalDependenciesList.Select(od => od)) { string version = NuGetVersion.Parse(property.Value.ToString()).ToString(); Match m = versionStructureRegex.Match(version); if (m.Success) { NuGetVersion dependencyVersion = NuGetVersion.Parse(version); version = string.Join(".", dependencyVersion.Major, dependencyVersion.Minor, dependencyVersion.Patch) + "-" + PackageBuildNumberOverride; } // Only add the original dependency if it wasn't passed as an AdditionalDependency, ie. AdditionalDependencies may override dependencies in project.json if (AdditionalDependencies?.Where(d => d.GetMetadata("Name").Equals(property.Name, StringComparison.OrdinalIgnoreCase)).Count() == 0) { JProperty addProperty = new JProperty(property.Name, version); returnDependenciesList.Add(addProperty); } } foreach (var dependency in AdditionalDependencies) { string name = dependency.GetMetadata("Name"); // Don't add a new dependency if one already exists. if (returnDependenciesList.Count(rd => ((JProperty)rd).Name.Equals(name)) == 0) { NuGetVersion dependencyVersion = NuGetVersion.Parse(dependency.GetMetadata("Version")); string version = string.Join(".", dependencyVersion.Major, dependencyVersion.Minor, dependencyVersion.Patch); if (!string.IsNullOrWhiteSpace(PackageBuildNumberOverride)) { version += "-" + PackageBuildNumberOverride; } JProperty property = new JProperty(name, version); returnDependenciesList.Add(property); } else { Log.LogMessage("Ignoring AdditionalDependency '{0}', dependency is already present in {1}", name, ProjectJson); } } return new JObject(returnDependenciesList.ToArray()); } /* No build number was specified, determine the build number by examining the other packages in the dependencies list */ private string DeriveBuildNumber(List<JToken> dependenciesList) { foreach (JProperty property in dependenciesList.Select(dl => (JProperty)dl)) { string version = property.Value.ToString(); Match m = versionStructureRegex.Match(version); if (m.Success) { string buildNumber = m.Groups[2].Value; Log.LogMessage("Determined buildnumber using existing package dependencies as '{0}'", buildNumber); return buildNumber; } } return PackageBuildNumberOverride; } /* Given a project.json as a JObject, replace it's dependencies property with a new dependencies property. */ private JObject UpdateProperty(string propertyName, JObject projectJsonRoot, JObject updatedProperties) { if (projectJsonRoot.Property(propertyName) != null) { JObject returnJsonRoot = new JObject(); Dictionary<string, JToken> properties = new Dictionary<string, JToken>(); // Collect all properties from jObject which are not the dependencies property foreach (var property in projectJsonRoot.Properties().Where(p => !p.Name.Equals(propertyName, StringComparison.OrdinalIgnoreCase))) { properties.Add(property.Name, property.Value); } // Add new dependencies to our jObject returnJsonRoot[propertyName] = updatedProperties; // Add back all of the properties we collected from previous jObject foreach (string property in properties.Keys) { returnJsonRoot.Add(property, properties[property]); } return returnJsonRoot; } return projectJsonRoot; } private static void WriteProject(JObject projectRoot, string projectJsonPath) { string projectJson = JsonConvert.SerializeObject(projectRoot, Formatting.Indented); Directory.CreateDirectory(Path.GetDirectoryName(projectJsonPath)); File.WriteAllText(projectJsonPath, projectJson + Environment.NewLine); } } }
1
8,649
Unfortunately I think the assumption of "empty" means the default dependency section is not correct. For a project.json file the default dependencies section is a shared section between all the different target frameworks and our TargetGroup being empty may map to many different target frameworks so they aren't really equated.
dotnet-buildtools
.cs
@@ -121,8 +121,8 @@ namespace Microsoft.AspNet.Server.KestrelTests IDictionary<string, StringValues> headers = new FrameRequestHeaders(); var kv1 = new KeyValuePair<string, StringValues>("host", new[] { "localhost" }); var kv2 = new KeyValuePair<string, StringValues>("custom", new[] { "value" }); - var kv1b = new KeyValuePair<string, StringValues>("host", new[] { "localhost" }); - var kv2b = new KeyValuePair<string, StringValues>("custom", new[] { "value" }); + var kv1b = new KeyValuePair<string, StringValues>("host", new[] { "not-localhost" }); + var kv2b = new KeyValuePair<string, StringValues>("custom", new[] { "not-value" }); Assert.False(headers.ContainsKey("host")); Assert.False(headers.ContainsKey("custom"));
1
using System; using System.Collections.Generic; using Microsoft.AspNet.Server.Kestrel.Http; using Microsoft.Extensions.Primitives; using Xunit; namespace Microsoft.AspNet.Server.KestrelTests { public class FrameRequestHeadersTests { [Fact] public void InitialDictionaryIsEmpty() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); Assert.Equal(0, headers.Count); Assert.False(headers.IsReadOnly); } [Fact] public void SettingUnknownHeadersWorks() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); headers["custom"] = new[] { "value" }; Assert.NotNull(headers["custom"]); Assert.Equal(1, headers["custom"].Count); Assert.Equal("value", headers["custom"][0]); } [Fact] public void SettingKnownHeadersWorks() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); headers["host"] = new[] { "value" }; Assert.NotNull(headers["host"]); Assert.Equal(1, headers["host"].Count); Assert.Equal("value", headers["host"][0]); } [Fact] public void KnownAndCustomHeaderCountAddedTogether() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); headers["host"] = new[] { "value" }; headers["custom"] = new[] { "value" }; Assert.Equal(2, headers.Count); } [Fact] public void TryGetValueWorksForKnownAndUnknownHeaders() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); StringValues value; Assert.False(headers.TryGetValue("host", out value)); Assert.False(headers.TryGetValue("custom", out value)); headers["host"] = new[] { "value" }; Assert.True(headers.TryGetValue("host", out value)); Assert.False(headers.TryGetValue("custom", out value)); headers["custom"] = new[] { "value" }; Assert.True(headers.TryGetValue("host", out value)); Assert.True(headers.TryGetValue("custom", out value)); } [Fact] public void SameExceptionThrownForMissingKey() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); Assert.Throws<KeyNotFoundException>(() => headers["custom"]); Assert.Throws<KeyNotFoundException>(() => headers["host"]); } [Fact] public void EntriesCanBeEnumerated() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); var v1 = new[] { "localhost" }; var v2 = new[] { "value" }; headers["host"] = v1; headers["custom"] = v2; Assert.Equal( new[] { new KeyValuePair<string, StringValues>("Host", v1), new KeyValuePair<string, StringValues>("custom", v2), }, headers); } [Fact] public void KeysAndValuesCanBeEnumerated() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); StringValues v1 = new[] { "localhost" }; StringValues v2 = new[] { "value" }; headers["host"] = v1; headers["custom"] = v2; Assert.Equal<string>( new[] { "Host", "custom" }, headers.Keys); Assert.Equal<StringValues>( new[] { v1, v2 }, headers.Values); } [Fact] public void ContainsAndContainsKeyWork() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); var kv1 = new KeyValuePair<string, StringValues>("host", new[] { "localhost" }); var kv2 = new KeyValuePair<string, StringValues>("custom", new[] { "value" }); var kv1b = new KeyValuePair<string, StringValues>("host", new[] { "localhost" }); var kv2b = new KeyValuePair<string, StringValues>("custom", new[] { "value" }); Assert.False(headers.ContainsKey("host")); Assert.False(headers.ContainsKey("custom")); Assert.False(headers.Contains(kv1)); Assert.False(headers.Contains(kv2)); headers["host"] = kv1.Value; Assert.True(headers.ContainsKey("host")); Assert.False(headers.ContainsKey("custom")); Assert.True(headers.Contains(kv1)); Assert.False(headers.Contains(kv2)); Assert.False(headers.Contains(kv1b)); Assert.False(headers.Contains(kv2b)); headers["custom"] = kv2.Value; Assert.True(headers.ContainsKey("host")); Assert.True(headers.ContainsKey("custom")); Assert.True(headers.Contains(kv1)); Assert.True(headers.Contains(kv2)); Assert.False(headers.Contains(kv1b)); Assert.False(headers.Contains(kv2b)); } [Fact] public void AddWorksLikeSetAndThrowsIfKeyExists() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); StringValues value; Assert.False(headers.TryGetValue("host", out value)); Assert.False(headers.TryGetValue("custom", out value)); headers.Add("host", new[] { "localhost" }); headers.Add("custom", new[] { "value" }); Assert.True(headers.TryGetValue("host", out value)); Assert.True(headers.TryGetValue("custom", out value)); Assert.Throws<ArgumentException>(() => headers.Add("host", new[] { "localhost" })); Assert.Throws<ArgumentException>(() => headers.Add("custom", new[] { "value" })); Assert.True(headers.TryGetValue("host", out value)); Assert.True(headers.TryGetValue("custom", out value)); } [Fact] public void ClearRemovesAllHeaders() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); headers.Add("host", new[] { "localhost" }); headers.Add("custom", new[] { "value" }); StringValues value; Assert.Equal(2, headers.Count); Assert.True(headers.TryGetValue("host", out value)); Assert.True(headers.TryGetValue("custom", out value)); headers.Clear(); Assert.Equal(0, headers.Count); Assert.False(headers.TryGetValue("host", out value)); Assert.False(headers.TryGetValue("custom", out value)); } [Fact] public void RemoveTakesHeadersOutOfDictionary() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); headers.Add("host", new[] { "localhost" }); headers.Add("custom", new[] { "value" }); StringValues value; Assert.Equal(2, headers.Count); Assert.True(headers.TryGetValue("host", out value)); Assert.True(headers.TryGetValue("custom", out value)); Assert.True(headers.Remove("host")); Assert.False(headers.Remove("host")); Assert.Equal(1, headers.Count); Assert.False(headers.TryGetValue("host", out value)); Assert.True(headers.TryGetValue("custom", out value)); Assert.True(headers.Remove("custom")); Assert.False(headers.Remove("custom")); Assert.Equal(0, headers.Count); Assert.False(headers.TryGetValue("host", out value)); Assert.False(headers.TryGetValue("custom", out value)); } [Fact] public void CopyToMovesDataIntoArray() { IDictionary<string, StringValues> headers = new FrameRequestHeaders(); headers.Add("host", new[] { "localhost" }); headers.Add("custom", new[] { "value" }); var entries = new KeyValuePair<string, StringValues>[4]; headers.CopyTo(entries, 1); Assert.Null(entries[0].Key); Assert.Equal(new StringValues(), entries[0].Value); Assert.Equal("Host", entries[1].Key); Assert.Equal(new[] { "localhost" }, entries[1].Value); Assert.Equal("custom", entries[2].Key); Assert.Equal(new[] { "value" }, entries[2].Value); Assert.Null(entries[3].Key); Assert.Equal(new StringValues(), entries[0].Value); } } }
1
6,274
This test was verifying the wrong thing.
aspnet-KestrelHttpServer
.cs
@@ -9,7 +9,7 @@ const build = (buildConfig = config.defaultBuildConfig, options) => { util.updateBranding() } - util.buildMuon() + util.buildMuon('brave') } module.exports = build
1
const config = require('../lib/config') const util = require('../lib/util') const build = (buildConfig = config.defaultBuildConfig, options) => { config.buildConfig = buildConfig config.update(options) if (!options.no_branding_update) { util.updateBranding() } util.buildMuon() } module.exports = build
1
5,286
pls pr a name change fro muon here too separately.
brave-brave-browser
js
@@ -198,7 +198,11 @@ func (opts *InitAppOpts) deployEnv() error { return err } + if err := opts.envStore.CreateEnvironment(&env); err != nil { + opts.spinner.Stop("Error!") + return err + } opts.spinner.Stop("Done!") } return nil
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package cli contains the archer subcommands. package cli import ( "errors" "fmt" "os" "github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2/terminal" "github.com/aws/PRIVATE-amazon-ecs-archer/cmd/archer/template" "github.com/aws/PRIVATE-amazon-ecs-archer/internal/pkg/archer" "github.com/aws/PRIVATE-amazon-ecs-archer/internal/pkg/deploy/cloudformation" "github.com/aws/PRIVATE-amazon-ecs-archer/internal/pkg/manifest" spin "github.com/aws/PRIVATE-amazon-ecs-archer/internal/pkg/spinner" "github.com/aws/PRIVATE-amazon-ecs-archer/internal/pkg/store" "github.com/aws/PRIVATE-amazon-ecs-archer/internal/pkg/store/ssm" "github.com/aws/PRIVATE-amazon-ecs-archer/internal/pkg/workspace" "github.com/aws/aws-sdk-go/aws/session" "github.com/spf13/cobra" ) const defaultEnvironmentName = "test" // InitAppOpts holds the fields to bootstrap a new application. type InitAppOpts struct { // User provided fields Project string `survey:"project"` // namespace that this application belongs to. Name string `survey:"name"` // unique identifier to logically group AWS resources together. Type string `survey:"Type"` // type of application you're trying to build (LoadBalanced, Backend, etc.) existingProjects []string projStore archer.ProjectStore envStore archer.EnvironmentStore deployer archer.EnvironmentDeployer ws archer.Workspace spinner spinner prompt terminal.Stdio // interfaces to receive and output app configuration data to the terminal. } // Ask prompts the user for the value of any required fields that are not already provided. func (opts *InitAppOpts) Ask() error { var qs []*survey.Question if opts.Project == "" { qs = append(qs, opts.projectQuestion()) } if opts.Name == "" { qs = append(qs, &survey.Question{ Name: "name", Prompt: &survey.Input{ Message: "What is your application's name?", Help: "Collection of AWS services to achieve a business capability. Must be unique within a project.", }, Validate: validateApplicationName, }) } if opts.Type == "" { qs = append(qs, opts.manifestQuestion()) } return survey.Ask(qs, opts, survey.WithStdio(opts.prompt.In, opts.prompt.Out, opts.prompt.Err)) } func (opts InitAppOpts) manifestQuestion() *survey.Question { return &survey.Question{ Prompt: &survey.Select{ Message: "Which template would you like to use?", Help: "Pre-defined infrastructure templates.", Options: []string{ manifest.LoadBalancedWebApplication, }, Default: manifest.LoadBalancedWebApplication, }, Name: "Type", } } func (opts InitAppOpts) projectQuestion() *survey.Question { if len(opts.existingProjects) > 0 { return &survey.Question{ Name: "project", Prompt: &survey.Select{ Message: "Which project should we use?", Help: "Choose a project to create a new application in. Applications in the same project share the same VPC, ECS Cluster and are discoverable via service discovery", Options: opts.existingProjects, }, } } return &survey.Question{ Name: "project", Prompt: &survey.Input{ Message: "What is your project's name?", Help: "Applications under the same project share the same VPC and ECS Cluster and are discoverable via service discovery.", }, Validate: validateProjectName, } } // Validate returns an error if a command line flag provided value is invalid func (opts *InitAppOpts) Validate() error { if err := validateProjectName(opts.Project); err != nil && err != errValueEmpty { return fmt.Errorf("project name invalid: %v", err) } if err := validateApplicationName(opts.Name); err != nil && err != errValueEmpty { return fmt.Errorf("application name invalid: %v", err) } return nil } // Prepare loads contextual data such as any existing projects, the current workspace, etc func (opts *InitAppOpts) Prepare() { // If there's a local project, we'll use that and just skip the project question. // Otherwise, we'll load a list of existing projects that the customer can select from. if opts.Project != "" { return } if summary, err := opts.ws.Summary(); err == nil { // use the project name from the workspace opts.Project = summary.ProjectName return } // load all existing project names existingProjects, _ := opts.projStore.ListProjects() var projectNames []string for _, p := range existingProjects { projectNames = append(projectNames, p.Name) } opts.existingProjects = projectNames } // Execute creates a project and initializes the workspace. func (opts *InitAppOpts) Execute() error { if err := opts.createProjectIfNotExists(); err != nil { return err } if err := opts.ws.Create(opts.Project); err != nil { return err } return opts.deployEnv() } func (opts *InitAppOpts) createProjectIfNotExists() error { err := opts.projStore.CreateProject(&archer.Project{ Name: opts.Project, }) // If the project already exists, that's ok - otherwise // return the error. var projectAlreadyExistsError *store.ErrProjectAlreadyExists if !errors.As(err, &projectAlreadyExistsError) { return err } return nil } // deployEnv prompts the user to deploy a test environment if the project doesn't already have one. func (opts *InitAppOpts) deployEnv() error { existingEnvs, _ := opts.envStore.ListEnvironments(opts.Project) if len(existingEnvs) > 0 { return nil } deployEnv := false prompt := &survey.Confirm{ Message: "Would you like to set up a test environment?", Help: "You can deploy your app into your test environment.", } survey.AskOne(prompt, &deployEnv, survey.WithStdio(opts.prompt.In, opts.prompt.Out, opts.prompt.Err)) if deployEnv { opts.spinner.Start("Deploying env...") // TODO: prompt the user for an environment name with default value "test" // https://github.com/aws/PRIVATE-amazon-ecs-archer/issues/56 env := archer.Environment{ Project: opts.Project, Name: defaultEnvironmentName, PublicLoadBalancer: true, // TODO: configure this value based on user input or Application type needs? } if err := opts.deployer.DeployEnvironment(env); err != nil { opts.spinner.Stop("Error!") return err } if err := opts.deployer.Wait(env); err != nil { opts.spinner.Stop("Error!") return err } opts.spinner.Stop("Done!") } return nil } // BuildInitCmd builds the command for bootstrapping an application. func BuildInitCmd() *cobra.Command { opts := InitAppOpts{ prompt: terminal.Stdio{ In: os.Stdin, Out: os.Stderr, Err: os.Stderr, }, } cmd := &cobra.Command{ Use: "init", Short: "Create a new ECS application", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { ws, err := workspace.New() if err != nil { return err } opts.ws = ws ssm, err := ssm.NewStore() if err != nil { return err } opts.projStore = ssm opts.envStore = ssm sess, err := session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, }) if err != nil { return err } opts.deployer = cloudformation.New(sess) opts.spinner = spin.New() opts.Prepare() return opts.Ask() }, RunE: func(cmd *cobra.Command, args []string) error { return opts.Execute() }, } cmd.Flags().StringVarP(&opts.Project, "project", "p", "", "Name of the project (required).") cmd.Flags().StringVarP(&opts.Name, "app", "a", "", "Name of the application (required).") cmd.Flags().StringVarP(&opts.Type, "type", "t", "", "Type of application to create.") cmd.SetUsageTemplate(template.Usage) cmd.Annotations = map[string]string{ "group": "Getting Started ✨", } return cmd }
1
10,284
nit: we can use `defer opts.spinner.Stop("Error!")` after l.180 to make it slightly neater
aws-copilot-cli
go
@@ -7367,6 +7367,7 @@ check_thread_vm_area(dcontext_t *dcontext, app_pc pc, app_pc tag, void **vmlist, read_lock(&executable_areas->lock); ok = lookup_addr(executable_areas, pc, &area); if (ok && TEST(VM_DELAY_READONLY, area->vm_flags)) { + bool is_allocated_mem; /* need to mark region read only for consistency * need to upgrade to write lock, have to release lock first * then recheck conditions after grabbing hotp + write lock */
1
/* ********************************************************** * Copyright (c) 2010-2017 Google, Inc. All rights reserved. * Copyright (c) 2002-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2002-2003 Massachusetts Institute of Technology */ /* * vmareas.c - virtual memory executable areas */ #include "globals.h" /* all of this for selfmod handling */ #include "fragment.h" #include "instr.h" #include "decode.h" #include "decode_fast.h" #include "link.h" #include "disassemble.h" #include "fcache.h" #include "hotpatch.h" #include "moduledb.h" #include "module_shared.h" #include "perscache.h" #include "translate.h" #include "jit_opt.h" #ifdef WINDOWS # include "events.h" /* event log messages - not supported yet on Linux */ #endif #ifdef CLIENT_INTERFACE # include "instrument.h" #endif #ifdef DEBUG # include "synch.h" /* all_threads_synch_lock */ #endif #include <string.h> enum { /* VM_ flags to distinguish region types * We also use some FRAG_ flags (but in a separate field so no value space overlap) * Adjacent regions w/ different flags are never merged. */ VM_WRITABLE = 0x0001, /* app memory writable? */ /* UNMOD_IMAGE means the region was mmapped in and has been read-only since then * this excludes even loader modifications (IAT update, relocate, etc.) on win32! */ VM_UNMOD_IMAGE = 0x0002, VM_DELETE_ME = 0x0004, /* on delete queue -- for thread-local only */ /* NOTE : if a new area is added that overlaps an existing area with a * different VM_WAS_FUTURE flag, the areas will be merged with the flag * taken from the new area, see FIXME in add_vm_area */ VM_WAS_FUTURE = 0x0008, /* moved from future list to exec list */ VM_DR_HEAP = 0x0010, /* DR heap area */ VM_ONCE_ONLY = 0x0020, /* on future list but should be removed on * first exec */ /* FIXME case 7877, 3744: need to properly merge pageprot regions with * existing selfmod regions before we can truly separate this. For now we * continue to treat selfmod as pageprot. * Once we separate, we should update DR_MADE_READONLY. */ VM_MADE_READONLY = VM_WRITABLE/* FIXME: should be 0x0040 -- see above */, /* DR has marked this region read * only for consistency, should only be used * in conjunction with VM_WRITABLE */ VM_DELAY_READONLY = 0x0080, /* dr has not yet marked this region read * only for consistency, should only be used * in conjunction with VM_WRITABLE */ #ifdef PROGRAM_SHEPHERDING /* re-verify this region for code origins policies every time it is * encountered. only used with selfmod regions that are only allowed if * they match patterns, to prevent other threads from writing non-pattern * code though and executing after the region has been approved. * xref case 4020. can remove once we split code origins list from * cache consistency list (case 3744). */ VM_PATTERN_REVERIFY = 0x0100, #endif VM_DRIVER_ADDRESS = 0x0200, /* a driver hooker area, needed for case 9022. Note we can * normally read properties only of user mode addresses, so we * have to probe addresses in this area. Also note that we're * still executing all of this code in user mode e.g. there is no * mode switch, no conforming segments, etc. */ /* Does this region contain a persisted cache? * Must also be FRAG_COARSE_GRAIN of course. * This is a shortcut to reading custom.client->persisted. * This is not guaranteed to be set on shared_data: only on executable_areas. */ VM_PERSISTED_CACHE = 0x0400, /* Case 10584: avoid flush synch when no code has been executed */ VM_EXECUTED_FROM = 0x0800, /* A workaround for lock rank issues: we delay adding loaded persisted * units to shared_data until first asked about. * This flags is NOT propagated on vmarea splits. */ VM_ADD_TO_SHARED_DATA = 0x1000, /* i#1114: for areas containing JIT code flushed via annotation or inference */ VM_JIT_MANAGED = 0x2000, }; /* simple way to disable sandboxing */ #define SANDBOX_FLAG() \ (INTERNAL_OPTION(hw_cache_consistency) ? FRAG_SELFMOD_SANDBOXED : 0) /* Because VM_MADE_READONLY == VM_WRITABLE it's not sufficient on its own */ #define DR_MADE_READONLY(flags) \ (INTERNAL_OPTION(hw_cache_consistency) && TEST(VM_MADE_READONLY, flags)) /* Fields only used for written_areas */ typedef struct _ro_vs_sandbox_data_t { /* written_count only used for written_areas vector. * if > 0, areas will NOT be merged, so we can keep separate * counts by page (hopefully not making the list too long). */ uint written_count; /* Used only for -sandbox2ro_threshold. It's only in the * written_areas vector b/c executable_areas has its regions removed * on a flush while threads could still be accessing counters in * selfmod fragments in the cache. We lose some granularity here but * it's not a big deal. * We could make these both ushorts, but it'd be more of a pain * to increment this counter from the cache then, worrying about overflows. */ uint selfmod_execs; #ifdef DEBUG uint ro2s_xfers; uint s2ro_xfers; #endif } ro_vs_sandbox_data_t; /* Our executable area list has three types of areas. Each type can be merged * with adjacent areas of the same type but not with any of the other types! * 1) originally RO code == we leave alone * 2) originally RW code == we mark RO * 3) originally RW code, written to from within itself == we leave RW and sandbox * We keep all three types in the same list b/c any particular address interval * can only be of one type at any one time, and all three are executable, meaning * code cache code was copied from there. */ typedef struct vm_area_t { app_pc start; app_pc end; /* open end interval */ /* We have two different flags fields to allow easy use of the FRAG_ flags. * The two combined are used to distinguish different regions. * Adjacent regions w/ different flags are never merged. */ /* Flags that start with VM_ */ uint vm_flags; /* Flags that start with FRAG_ * In use now are FRAG_SELFMOD_SANDBOXED and FRAG_DYNGEN. */ uint frag_flags; #ifdef DEBUG char *comment; #endif /******************** * custom fields not used in all vectors * FIXME: separate into separately-allocated piece? or have a struct * extension (poor man's subclass, like trace_t, etc.) and make our vector * iterators handle it? * once we have a generic interval data structure (case 6208) this * hardcoding of individual uses will go away. */ union { /* Used in per-thread and shared vectors, not in master area lists. * We identify vectors using this via VECTOR_FRAGMENT_LIST, needed * b/c {add,remove}_vm_area have special behavior for frags. */ fragment_t *frags; /* for clients' custom use via vmvector interfaces */ void *client; } custom; } vm_area_t; /* for each thread we record all executable areas, to make it faster * to decide whether we need to flush any fragments on an munmap */ typedef struct thread_data_t { vm_area_vector_t areas; /* cached pointer to last area encountered by thread */ vm_area_t *last_area; /* FIXME: for locality would be nice to have per-thread last_shared_area * (cannot put shared in private last_area, that would void its usefulness * since couldn't tell if area really in shared list or not) * but then have to update all other threads whenever change shared * vmarea vector, so for now we use a global last_area */ /* cached pointer of a PC in the last page decoded by thread -- set only * in thread-private structures, not in shared structures like shared_data */ app_pc last_decode_area_page_pc; bool last_decode_area_valid; /* since no sentinel exists */ #ifdef PROGRAM_SHEPHERDING uint thrown_exceptions; /* number of responses to execution violations */ #endif } thread_data_t; #define SHOULD_LOCK_VECTOR(v) \ (TEST(VECTOR_SHARED, (v)->flags) && \ !TEST(VECTOR_NO_LOCK, (v)->flags) && \ !self_owns_write_lock(&(v)->lock)) #define LOCK_VECTOR(v, release_lock, RW) do { \ if (SHOULD_LOCK_VECTOR(v)) { \ (release_lock) = true; \ RW##_lock(&(v)->lock); \ } \ else \ (release_lock) = false; \ } while (0); #define UNLOCK_VECTOR(v, release_lock, RW) do { \ if ((release_lock)) { \ ASSERT(TEST(VECTOR_SHARED, (v)->flags)); \ ASSERT(!TEST(VECTOR_NO_LOCK, (v)->flags)); \ ASSERT_OWN_READWRITE_LOCK(true, &(v)->lock); \ RW##_unlock(&v->lock); \ } \ } while (0); /* these two global vectors store all executable areas and all dynamo * areas (executable or otherwise). * executable_areas' custom field is used to store coarse unit info. * for a FRAG_COARSE_GRAIN region, an info struct is always present, even * if not yet executed from (initially, or after a flush). */ static vm_area_vector_t *executable_areas; static vm_area_vector_t *dynamo_areas; /* Protected by executable_areas lock; used only to delete coarse_info_t * while holding executable_areas lock during execute-less flushes * (case 10995). Extra layer of indirection to get on heap and avoid .data * unprotection. */ static coarse_info_t **coarse_to_delete; /* used for DYNAMO_OPTION(handle_DR_modify), * DYNAMO_OPTION(handle_ntdll_modify) == DR_MODIFY_NOP or * DYNAMO_OPTION(patch_proof_list) */ static vm_area_vector_t *pretend_writable_areas; /* used for DYNAMO_OPTION(patch_proof_list) areas to watch */ vm_area_vector_t *patch_proof_areas; /* used for DYNAMO_OPTION(emulate_IAT_writes), though in future may be * expanded, so not just ifdef WINDOWS or ifdef PROGRAM_SHEPHERDING */ vm_area_vector_t *emulate_write_areas; /* used for DYNAMO_OPTION(IAT_convert) * IAT or GOT areas of all mapped DLLs - note the exact regions are added here. * While the IATs for modules in native_exec_areas are not added here - * note that any module's IAT may still be importing native modules. */ vm_area_vector_t *IAT_areas; /* Keeps persistent written-to and execution counts for switching back and * forth from page prot to sandboxing. */ static vm_area_vector_t *written_areas; static void free_written_area(void *data); #ifdef PROGRAM_SHEPHERDING /* for executable_if_flush and executable_if_alloc, we need a future list, so their regions * are considered executable until de-allocated -- even if written to! */ static vm_area_vector_t *futureexec_areas; # ifdef WINDOWS /* FIXME: for -xdata_rct we only need start pc called on, so htable would do, * once we have reusable htable for storing single pc */ static vm_area_vector_t *app_flushed_areas; # endif #endif /* tamper resistant region see tamper_resistant_region_add() for current use. * If needed this should be turned into a vm_area_vector_t as well. */ static app_pc tamper_resistant_region_start, tamper_resistant_region_end; /* shared_data is synchronized via either single_thread_in_DR or * the vector lock (cannot use bb_building_lock b/c both trace building * and pc translation need read access and neither can/should grab * the bb building lock, plus it's cleaner to not depend on it, and now * with -shared_traces it's not sufficient). * N.B.: the vector lock is used to protect not just the vector, but also * the whole thread_data_t struct (including last_area) and sequences * of vector operations. * Kept on the heap for selfprot (case 7957). */ static thread_data_t *shared_data; /* set in vm_areas_reset_init() */ typedef struct _pending_delete_t { #ifdef DEBUG /* record bounds of original deleted region, for debugging only */ app_pc start; app_pc end; #endif /* list of unlinked fragments that are waiting to be deleted */ fragment_t *frags; /* ref count and timestamp to determine when it's safe to delete them */ uint ref_count; uint flushtime_deleted; /* we use a simple linked list of entries */ struct _pending_delete_t *next; } pending_delete_t; /* We keep these list pointers on the heap for selfprot (case 8074). */ typedef struct _deletion_lists_t { /* Unlike private vm lists, we cannot simply mark shared_data vm areas as * deleted since new fragments come in concurrently, so we have to have a * separate list of flushed-but-not-yet-deleted areas. We can't use a * vm_area_vector_t b/c newly flushed fragments spoil our ref count by resetting * it, so we keep a linked list of fragment lists. */ pending_delete_t *shared_delete; /* We maintain the tail solely for fcache_free_pending_units() */ pending_delete_t *shared_delete_tail; /* count used for reset threshold */ uint shared_delete_count; /* shared lazy deletion: a list of fragment_t chained via next_vmarea that * are pending deletion, but are only freed when a shared deletion event * shows that it is safe to do so. */ fragment_t *lazy_delete_list; /* stores the end of the list, for appending */ fragment_t *lazy_delete_tail; /* stores the length of the lazy list */ uint lazy_delete_count; /* ensure only one thread tries to move to pending deletion list */ bool move_pending; } deletion_lists_t; static deletion_lists_t *todelete; typedef struct _last_deallocated_t { /* case 9330 - we want to detect races during DLL unloads, and to * silence a reported violation during unload. At least DLLs are * expected to be already serialized by the loader so keeping only * one is sufficient (note Win2K3 doesn't hold lock only during * process initialization). We'll also keep references to the * last DLL that was unloaded for diagnostics. Although, that is * not reliable enough when multiple DLLs are involved - case 6061 * should be used for better tracking after unload. */ /* Yet loss of integrity is tolerable, as long as detected. Since * we currently mark all mappings they are not necessarily * serialized (and potentially other apps can directly map, so * can't really count on the loader lock for integrity). We * should make sure that we do not set unload_in_progress unless * [last_unload_base, last_unload_size) is really still the * current module. */ bool unload_in_progress; app_pc last_unload_base; size_t last_unload_size; /* FIXME: we may want to overload the above or add different * fields for non image (MEM_MAPPED) unmaps, and DGC (MEM_PRIVATE) * frees. Note that we avoid keeping lists of active unloads, or * even to deal with case 9371 we would need intersection of * overlapping app syscalls. If we serialize app syscalls as * proposed case 545 a single one will be sufficient. */ } last_deallocated_t; static last_deallocated_t *last_deallocated; /* synchronization currently used only for the contents of * last_deallocated: last_unload_base and last_unload_size */ DECLARE_CXTSWPROT_VAR(static mutex_t last_deallocated_lock, INIT_LOCK_FREE(last_deallocated_lock)); /* synchronization for shared_delete, not a rw lock since readers usually write */ DECLARE_CXTSWPROT_VAR(mutex_t shared_delete_lock, INIT_LOCK_FREE(shared_delete_lock)); /* synchronization for the lazy deletion list */ DECLARE_CXTSWPROT_VAR(static mutex_t lazy_delete_lock, INIT_LOCK_FREE(lazy_delete_lock)); /* multi_entry_t allocation is either global or local heap */ #define MULTI_ALLOC_DC(dc, flags) FRAGMENT_ALLOC_DC(dc, flags) #define GET_DATA(dc, flags) \ (((dc) == GLOBAL_DCONTEXT || TEST(FRAG_SHARED, (flags))) ? shared_data : \ (thread_data_t *) (dc)->vm_areas_field) #define GET_VECTOR(dc, flags) \ (((dc) == GLOBAL_DCONTEXT || TEST(FRAG_SHARED, (flags))) ? \ (TEST(FRAG_WAS_DELETED, (flags)) ? NULL : &shared_data->areas) : \ (&((thread_data_t *)(dc)->vm_areas_field)->areas)) #define SHARED_VECTOR_RWLOCK(v, rw, op) do { \ if (TEST(VECTOR_SHARED, (v)->flags)) { \ ASSERT(SHARED_FRAGMENTS_ENABLED()); \ rw##_##op(&(v)->lock); \ } \ } while (0) #define ASSERT_VMAREA_DATA_PROTECTED(data, RW) \ ASSERT_OWN_##RW##_LOCK((data == shared_data && \ !INTERNAL_OPTION(single_thread_in_DR)), \ &shared_data->areas.lock) /* FIXME: find a way to assert that an area by itself is synchronized if * it points into a vector for the routines that take in only areas */ #ifdef DEBUG # define ASSERT_VMAREA_VECTOR_PROTECTED(v, RW) do { \ ASSERT_OWN_##RW##_LOCK(SHOULD_LOCK_VECTOR(v) && \ !dynamo_exited, &(v)->lock); \ if ((v) == dynamo_areas) { \ ASSERT(dynamo_areas_uptodate || dynamo_areas_synching); \ } \ } while (0); #else # define ASSERT_VMAREA_VECTOR_PROTECTED(v, RW) /* nothing */ #endif /* size of security violation string - must be at least 16 */ #define MAXIMUM_VIOLATION_NAME_LENGTH 16 #define VMVECTOR_INITIALIZE_VECTOR(v, flags, lockname) do { \ vmvector_init_vector((v), (flags)); \ ASSIGN_INIT_READWRITE_LOCK_FREE((v)->lock, lockname); \ } while (0); /* forward declarations */ static void vmvector_free_vector(dcontext_t *dcontext, vm_area_vector_t *v); static void vm_area_clean_fraglist(dcontext_t *dcontext, vm_area_t *area); static bool lookup_addr(vm_area_vector_t *v, app_pc addr, vm_area_t **area); #if defined(DEBUG) && defined(INTERNAL) static void print_fraglist(dcontext_t *dcontext, vm_area_t *area, const char *prefix); static void print_written_areas(file_t outf); #endif #ifdef DEBUG static void exec_area_bounds_match(dcontext_t *dcontext, thread_data_t *data); #endif static void update_dynamo_vm_areas(bool have_writelock); static void dynamo_vm_areas_start_reading(void); static void dynamo_vm_areas_done_reading(void); #ifdef PROGRAM_SHEPHERDING static bool remove_futureexec_vm_area(app_pc start, app_pc end); DECLARE_CXTSWPROT_VAR(static mutex_t threads_killed_lock, INIT_LOCK_FREE(threads_killed_lock)); void mark_unload_future_added(app_pc module_base, size_t size); #endif static void vm_area_coarse_region_freeze(dcontext_t *dcontext, coarse_info_t *info, vm_area_t *area, bool in_place); #ifdef SIMULATE_ATTACK /* synch simulate_at string parsing */ DECLARE_CXTSWPROT_VAR(static mutex_t simulate_lock, INIT_LOCK_FREE(simulate_lock)); #endif /* used to determine when we need to do another heap walk to keep * dynamo vm areas up to date (can't do it incrementally b/c of * circular dependencies). * protected for both read and write by dynamo_areas->lock */ /* Case 3045: areas inside the vmheap reservation are not added to the list, * so the vector is considered uptodate until we run out of reservation */ DECLARE_FREQPROT_VAR(static bool dynamo_areas_uptodate, true); #ifdef DEBUG /* used for debugging to tell when uptodate can be false. * protected for both read and write by dynamo_areas->lock */ DECLARE_FREQPROT_VAR(static bool dynamo_areas_synching, false); #endif /* HACK to make dynamo_areas->lock recursive * protected for both read and write by dynamo_areas->lock * FIXME: provide general rwlock w/ write portion recursive */ DECLARE_CXTSWPROT_VAR(uint dynamo_areas_recursion, 0); /* used for DR area debugging */ bool vm_areas_exited = false; /*************************************************** * flushing by walking entire hashtable is too slow, so we keep a list of * all fragments in each region. * to save memory, we use the fragment_t struct as the linked list entry * for these lists. However, some fragments are on multiple lists due to * crossing boundaries (usually traces). For those, the other entries are * pointed to by an "also" field, and the entries themselves use this struct, * which plays games (similar to fcache's empty_slot_t) to be able to be used * like a fragment_t struct in the lists. * * this is better than the old fragment_t->app_{min,max}_pc performance wise, * and granularity-wise for blocks that bounce over regions, but worse * granularity-wise since if want to flush singe page in text * section, will end up flushing entire region. especially scary in face of * merges of adjacent regions, but merges are rare for images since * they usually have more than just text, so texts aren't adjacent. * * FIXME: better way, now that fcache supports multiple units, is to have * a separate unit for each source vmarea. common case will be a flush to * an un-merged or clipped area, so just toss whole unit. */ typedef struct _multi_entry_t { fragment_t *f; /* backpointer */ /* flags MUST be at same location as fragment_t->flags * we set flags==FRAG_IS_EXTRA_VMAREA to indicate a multi_entry_t * we also use FRAG_SHARED to indicate that a multi_entry_t is on global heap */ uint flags; /* officially all list entries are fragment_t *, really some are multi_entry_t */ fragment_t *next_vmarea; fragment_t *prev_vmarea; fragment_t *also_vmarea; /* if in multiple areas */ /* need to be able to look up vmarea: area not stored since vmareas * shift and merge, so we store original pc */ app_pc pc; } multi_entry_t; /* macros to make dealing with both fragment_t and multi_entry_t easier */ #define FRAG_MULTI(f) (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) #define FRAG_MULTI_INIT(f) (TESTALL((FRAG_IS_EXTRA_VMAREA|FRAG_IS_EXTRA_VMAREA_INIT), (f)->flags)) #define FRAG_NEXT(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \ ((multi_entry_t *)(f))->next_vmarea : (f)->next_vmarea) #define FRAG_NEXT_ASSIGN(f, val) do { \ if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \ ((multi_entry_t *)(f))->next_vmarea = (val); \ else \ (f)->next_vmarea = (val); \ } while (0) #define FRAG_PREV(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \ ((multi_entry_t *)(f))->prev_vmarea : (f)->prev_vmarea) #define FRAG_PREV_ASSIGN(f, val) do { \ if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \ ((multi_entry_t *)(f))->prev_vmarea = (val); \ else \ (f)->prev_vmarea = (val); \ } while (0) /* Case 8419: also_vmarea is invalid once we 1st-stage-delete a fragment */ #define FRAG_ALSO(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \ ((multi_entry_t *)(f))->also_vmarea : \ (ASSERT(!TEST(FRAG_WAS_DELETED, (f)->flags)), (f)->also.also_vmarea)) /* Only call this one to avoid the assert when you know it's safe */ #define FRAG_ALSO_DEL_OK(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \ ((multi_entry_t *)(f))->also_vmarea : (f)->also.also_vmarea) #define FRAG_ALSO_ASSIGN(f, val) do { \ if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \ ((multi_entry_t *)(f))->also_vmarea = (val); \ else { \ ASSERT(!TEST(FRAG_WAS_DELETED, (f)->flags)); \ (f)->also.also_vmarea = (val); \ } \ } while (0) /* assumption: if multiple units, fragment_t is on list of region owning tag */ #define FRAG_PC(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \ ((multi_entry_t *)(f))->pc : (f)->tag) #define FRAG_PC_ASSIGN(f, val) do { \ if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \ ((multi_entry_t *)(f))->pc = (val); \ else \ ASSERT_NOT_REACHED(); \ } while (0) #define FRAG_FRAG(fr) ((TEST(FRAG_IS_EXTRA_VMAREA, (fr)->flags)) ? \ ((multi_entry_t *)(fr))->f : (fr)) #define FRAG_FRAG_ASSIGN(fr, val) do { \ if (TEST(FRAG_IS_EXTRA_VMAREA, (fr)->flags)) \ ((multi_entry_t *)(fr))->f = (val); \ else \ ASSERT_NOT_REACHED(); \ } while (0) #define FRAG_ID(fr) ((TEST(FRAG_IS_EXTRA_VMAREA, (fr)->flags)) ? \ ((multi_entry_t *)(fr))->f->id : (fr)->id) /***************************************************/ /* FIXME : is problematic to page align subpage regions */ static void vm_make_writable(byte *pc, size_t size) { byte *start_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE); size_t final_size = ALIGN_FORWARD(size + (pc - start_pc), PAGE_SIZE); DEBUG_DECLARE(bool ok = ) make_writable(start_pc, final_size); ASSERT(ok); ASSERT(INTERNAL_OPTION(hw_cache_consistency)); } static void vm_make_unwritable(byte *pc, size_t size) { byte *start_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE); size_t final_size = ALIGN_FORWARD(size + (pc - start_pc), PAGE_SIZE); ASSERT(INTERNAL_OPTION(hw_cache_consistency)); make_unwritable(start_pc, final_size); /* case 8308: We should never call vm_make_unwritable if * -sandbox_writable is on, or if -sandbox_non_text is on and this * is a non-text region. */ ASSERT(!DYNAMO_OPTION(sandbox_writable)); DOCHECK(1, { if (DYNAMO_OPTION(sandbox_non_text)) { app_pc modbase = get_module_base(pc); ASSERT(modbase != NULL && is_range_in_code_section(modbase, pc, pc + size, NULL, NULL)); } }); } /* since dynamorio changes some readwrite memory regions to read only, * this changes all regions memory permissions back to what they should be, * since dynamorio uses this mechanism to ensure code cache coherency, * once this method is called stale code could be executed out of the * code cache */ void revert_memory_regions() { int i; /* executable_areas doesn't exist in thin_client mode. */ ASSERT(!DYNAMO_OPTION(thin_client)); read_lock(&executable_areas->lock); for (i = 0; i < executable_areas->length; i++) { if (DR_MADE_READONLY(executable_areas->buf[i].vm_flags)) { /* this is a region that dynamorio has marked read only, fix */ LOG(GLOBAL, LOG_VMAREAS, 1, " fixing permissions for RW executable area "PFX"-"PFX" %s\n", executable_areas->buf[i].start, executable_areas->buf[i].end, executable_areas->buf[i].comment); vm_make_writable(executable_areas->buf[i].start, executable_areas->buf[i].end - executable_areas->buf[i].start); } } read_unlock(&executable_areas->lock); } static void print_vm_flags(uint vm_flags, uint frag_flags, file_t outf) { print_file(outf, " %s%s%s%s", (vm_flags & VM_WRITABLE) != 0 ? "W" : "-", (vm_flags & VM_WAS_FUTURE) != 0 ? "F" : "-", (frag_flags & FRAG_SELFMOD_SANDBOXED) != 0 ? "S" : "-", TEST(FRAG_COARSE_GRAIN, frag_flags) ? "C" : "-"); #ifdef PROGRAM_SHEPHERDING print_file(outf, "%s%s", TEST(VM_PATTERN_REVERIFY, vm_flags) ? "P" : "-", (frag_flags & FRAG_DYNGEN) != 0 ? "D" : "-"); #endif } /* ok to pass NULL for v, only used to identify use of custom field */ static void print_vm_area(vm_area_vector_t *v, vm_area_t *area, file_t outf, const char *prefix) { print_file(outf, "%s"PFX"-"PFX, prefix, area->start, area->end); print_vm_flags(area->vm_flags, area->frag_flags, outf); if (v == executable_areas && TEST(FRAG_COARSE_GRAIN, area->frag_flags)) { coarse_info_t *info = (coarse_info_t *) area->custom.client; if (info != NULL) { if (info->persisted) print_file(outf, "R"); else if (info->frozen) print_file(outf, "Z"); else print_file(outf, "-"); } } #ifdef DEBUG print_file(outf, " %s", area->comment); DOLOG(1, LOG_VMAREAS, { IF_NO_MEMQUERY(extern vm_area_vector_t *all_memory_areas;) app_pc modbase = /* avoid rank order violation */ IF_NO_MEMQUERY(v == all_memory_areas ? NULL :) /* i#1649: avoid rank order for dynamo_areas */ (v == dynamo_areas ? NULL : get_module_base(area->start)); if (modbase != NULL && /* avoid rank order violations */ v != dynamo_areas && v != written_areas && /* we free module list before vmareas */ !dynamo_exited_and_cleaned && is_mapped_as_image(modbase)/*avoid asserts in getting name */) { const char *name; os_get_module_info_lock(); os_get_module_name(modbase, &name); print_file(outf, " %s", name == NULL ? "" : name); os_get_module_info_unlock(); } }); #endif if (v == written_areas) { ro_vs_sandbox_data_t *ro2s = (ro_vs_sandbox_data_t *) area->custom.client; #ifdef DEBUG if (ro2s != NULL) { /* can be null if in middle of adding */ uint tot_w = ro2s->ro2s_xfers * DYNAMO_OPTION(ro2sandbox_threshold); uint tot_s = ro2s->s2ro_xfers * DYNAMO_OPTION(sandbox2ro_threshold); print_file(outf, " w %3d, %3d tot; x %3d, %5d tot; ro2s %d, s2ro %d", ro2s->written_count, tot_w, ro2s->selfmod_execs, tot_s, ro2s->ro2s_xfers, ro2s->s2ro_xfers); } #else print_file(outf, " written %3d, exec %5d", ro2s->written_count, ro2s->selfmod_execs); #endif } print_file(outf, "\n"); } /* Assumes caller holds v->lock for coherency */ static void print_vm_areas(vm_area_vector_t *v, file_t outf) { int i; ASSERT_VMAREA_VECTOR_PROTECTED(v, READWRITE); for(i = 0; i < v->length; i++) { print_vm_area(v, &v->buf[i], outf, " "); } } #if defined(DEBUG) && defined(INTERNAL) static void print_contig_vm_areas(vm_area_vector_t *v, app_pc start, app_pc end, file_t outf, const char *prefix) { vm_area_t *new_area; app_pc pc = start; do { lookup_addr(v, pc, &new_area); if (new_area == NULL) break; print_vm_area(v, new_area, outf, prefix); pc = new_area->end + 1; } while (new_area->end < end); } #endif #if defined(DEBUG) && defined(INTERNAL) static void print_pending_list(file_t outf) { pending_delete_t *pend; int i; ASSERT_OWN_MUTEX(true, &shared_delete_lock); for (i = 0, pend = todelete->shared_delete; pend != NULL; i++, pend = pend->next) { print_file(outf, "%d: "PFX"-"PFX" ref=%d, stamp=%d\n", i, pend->start, pend->end, pend->ref_count, pend->flushtime_deleted); } } #endif /* If v requires a lock and the calling thread does not hold that lock, * this routine acquires the lock and returns true; else it returns false. */ static bool writelock_if_not_already(vm_area_vector_t *v) { if (TEST(VECTOR_SHARED, v->flags) && !self_owns_write_lock(&v->lock)) { SHARED_VECTOR_RWLOCK(v, write, lock); return true; } return false; } static void vm_area_vector_check_size(vm_area_vector_t *v) { /* only called by add_vm_area which does the assert that the vector is * protected */ /* check if at capacity */ if (v->size == v->length){ if (v->length == 0) { v->size = INTERNAL_OPTION(vmarea_initial_size); v->buf = (vm_area_t*) global_heap_alloc(v->size*sizeof(struct vm_area_t) HEAPACCT(ACCT_VMAREAS)); } else { /* FIXME: case 4471 we should be doubling size here */ int new_size = (INTERNAL_OPTION(vmarea_increment_size) + v->length); STATS_INC(num_vmareas_resized); v->buf = global_heap_realloc(v->buf, v->size, new_size, sizeof(struct vm_area_t) HEAPACCT(ACCT_VMAREAS)); v->size = new_size; } ASSERT(v->buf != NULL); } } static void vm_area_merge_fraglists(vm_area_t *dst, vm_area_t *src) { /* caller must hold write lock for vector of course: FIXME: assert that here */ LOG(THREAD_GET, LOG_VMAREAS, 2, "\tmerging frag lists for "PFX"-"PFX" and "PFX"-"PFX"\n", src->start, src->end, dst->start, dst->end); if (dst->custom.frags == NULL) dst->custom.frags = src->custom.frags; else if (src->custom.frags == NULL) return; else { /* put src's frags at end of dst's frags */ fragment_t *top1 = dst->custom.frags; fragment_t *top2 = src->custom.frags; fragment_t *tmp = FRAG_PREV(top1); FRAG_NEXT_ASSIGN(tmp, top2); FRAG_PREV_ASSIGN(top1, FRAG_PREV(top2)); FRAG_PREV_ASSIGN(top2, tmp); DOLOG(4, LOG_VMAREAS, { print_fraglist(get_thread_private_dcontext(), dst, "after merging fraglists:"); }); } } /* Assumes caller holds v->lock, if necessary. * Does not return the area added since it may be merged or split depending * on existing areas-> * If a last_area points into this vector, the caller must make sure to * clear or update the last_area pointer. * FIXME: make it easier to keep them in synch -- too easy to add_vm_area * somewhere to a thread vector and forget to clear last_area. * Adds a new area to v, merging it with adjacent areas of the same type. * A new area is only allowed to overlap an old area of a different type if it * meets certain criteria (see asserts below). For VM_WAS_FUTURE and * VM_ONCE_ONLY we may clear the flag from an existing region if the new * region doesn't have the flag and overlaps the existing region. Otherwise * the new area is split such that the overlapping portion remains part of * the old area. This tries to keep entire new area from becoming selfmod * for instance. FIXME : for VM_WAS_FUTURE and VM_ONCE_ONLY may want to split * region if only paritally overlapping * * FIXME: change add_vm_area to return NULL when merged, and otherwise * return the new complete area, so callers don't have to do a separate lookup * to access the added area. */ static void add_vm_area(vm_area_vector_t *v, app_pc start, app_pc end, uint vm_flags, uint frag_flags, void *data _IF_DEBUG(const char *comment)) { int i, j, diff; /* if we have overlap, we extend an existing area -- else we add a new area */ int overlap_start = -1, overlap_end = -1; DEBUG_DECLARE(uint flagignore;) IF_UNIX(IF_DEBUG(IF_NO_MEMQUERY(extern vm_area_vector_t *all_memory_areas;))) ASSERT(start < end); ASSERT_VMAREA_VECTOR_PROTECTED(v, WRITE); LOG(GLOBAL, LOG_VMAREAS, 4, "in add_vm_area%s "PFX" "PFX" %s\n", (v == executable_areas ? " executable_areas" : (v == IF_LINUX_ELSE(all_memory_areas, NULL) ? " all_memory_areas" : (v == dynamo_areas ? " dynamo_areas" : ""))), start, end, comment); /* N.B.: new area could span multiple existing areas! */ for (i = 0; i < v->length; i++) { /* look for overlap, or adjacency of same type (including all flags, and never * merge adjacent if keeping write counts) */ if ((start < v->buf[i].end && end > v->buf[i].start) || (start <= v->buf[i].end && end >= v->buf[i].start && vm_flags == v->buf[i].vm_flags && frag_flags == v->buf[i].frag_flags && /* never merge coarse-grain */ !TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags) && !TEST(VECTOR_NEVER_MERGE_ADJACENT, v->flags) && (v->should_merge_func == NULL || v->should_merge_func(true/*adjacent*/, data, v->buf[i].custom.client)))) { ASSERT(!(start < v->buf[i].end && end > v->buf[i].start) || !TEST(VECTOR_NEVER_OVERLAP, v->flags)); if (overlap_start == -1) { /* assume we'll simply expand an existing area rather than * add a new one -- we'll reset this if we hit merge conflicts */ overlap_start = i; } /* overlapping regions of different properties are often * problematic so we add a lot of debugging output */ DOLOG(4, LOG_VMAREAS, { LOG(GLOBAL, LOG_VMAREAS, 1, "==================================================\n" "add_vm_area "PFX"-"PFX" %s %x-%x overlaps "PFX"-"PFX" %s %x-%x\n", start, end, comment, vm_flags, frag_flags, v->buf[i].start, v->buf[i].end, v->buf[i].comment, v->buf[i].vm_flags, v->buf[i].frag_flags); print_vm_areas(v, GLOBAL); /* rank order problem if holding heap_unit_lock, so only print * if not holding a lock for v right now, though ok to print * for shared vm areas since its lock is higher than the lock * for executable/written areas */ if (v != dynamo_areas && (!TEST(VECTOR_SHARED, v->flags) || v == &shared_data->areas)) { LOG(GLOBAL, LOG_VMAREAS, 1, "\nexecutable areas:\n"); print_executable_areas(GLOBAL); LOG(GLOBAL, LOG_VMAREAS, 1, "\nwritten areas:\n"); print_written_areas(GLOBAL); } LOG(GLOBAL, LOG_VMAREAS, 1, "==================================================\n\n"); }); /* we have some restrictions on overlapping regions with * different flags */ /* no restrictions on WAS_FUTURE flag, but if new region is * not was future and old region is then should drop from old * region FIXME : partial overlap? we don't really care about * this flag anyways */ if (TEST(VM_WAS_FUTURE, v->buf[i].vm_flags) && !TEST(VM_WAS_FUTURE, vm_flags)) { v->buf[i].vm_flags &= ~VM_WAS_FUTURE; LOG(GLOBAL, LOG_VMAREAS, 1, "Warning : removing was_future flag from area "PFX "-"PFX" %s that overlaps new area "PFX"-"PFX" %s\n", v->buf[i].start, v->buf[i].end, v->buf[i].comment, start, end, comment); } /* no restrictions on ONCE_ONLY flag, but if new region is not * should drop fom existing region FIXME : partial overlap? is * not much of an additional security risk */ if (TEST(VM_ONCE_ONLY, v->buf[i].vm_flags) && !TEST(VM_ONCE_ONLY, vm_flags)) { v->buf[i].vm_flags &= ~VM_ONCE_ONLY; LOG(GLOBAL, LOG_VMAREAS, 1, "Warning : removing once_only flag from area "PFX "-"PFX" %s that overlaps new area "PFX"-"PFX" %s\n", v->buf[i].start, v->buf[i].end, v->buf[i].comment, start, end, comment); } /* shouldn't be adding unmod image over existing not unmod image, * reverse could happen with os region merging though */ ASSERT(TEST(VM_UNMOD_IMAGE, v->buf[i].vm_flags) || !TEST(VM_UNMOD_IMAGE, vm_flags)); /* for VM_WRITABLE only allow new region to not be writable and * existing region to be writable to handle cases of os region * merging due to our consistency protection changes */ ASSERT(TEST(VM_WRITABLE, v->buf[i].vm_flags) || !TEST(VM_WRITABLE, vm_flags) || !INTERNAL_OPTION(hw_cache_consistency)); /* FIXME: case 7877: if new is VM_MADE_READONLY and old is not, we * must mark old overlapping portion as VM_MADE_READONLY. Things only * worked now b/c VM_MADE_READONLY==VM_WRITABLE, so we can add * pageprot regions that overlap w/ selfmod. */ #ifdef PROGRAM_SHEPHERDING /* !VM_PATTERN_REVERIFY trumps having the flag on, so for new having * the flag and old not, we're fine, but when old has it we'd like * to remove it from the overlap portion: FIXME: need better merging * control, also see all the partial overlap fixmes above. * for this flag not a big deal, just a possible perf hit as we * re-check every time. */ #endif /* disallow any other vm_flag differences */ DODEBUG({ flagignore = VM_UNMOD_IMAGE | VM_WAS_FUTURE | VM_ONCE_ONLY | VM_WRITABLE; }); #ifdef PROGRAM_SHEPHERDING DODEBUG({ flagignore = flagignore | VM_PATTERN_REVERIFY; }); #endif ASSERT((v->buf[i].vm_flags & ~flagignore) == (vm_flags & ~flagignore)); /* new region must be more innocent with respect to selfmod */ ASSERT(TEST(FRAG_SELFMOD_SANDBOXED, v->buf[i].frag_flags) || !TEST(FRAG_SELFMOD_SANDBOXED, frag_flags)); /* disallow other frag_flag differences */ #ifndef PROGRAM_SHEPHERDING ASSERT((v->buf[i].frag_flags & ~FRAG_SELFMOD_SANDBOXED) == (frag_flags & ~FRAG_SELFMOD_SANDBOXED)); #else # ifdef DGC_DIAGNOSTICS /* FIXME : no restrictions on differing FRAG_DYNGEN_RESTRICTED * flags? */ ASSERT((v->buf[i].frag_flags & ~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN|FRAG_DYNGEN_RESTRICTED)) == (frag_flags & ~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN|FRAG_DYNGEN_RESTRICTED))); # else ASSERT((v->buf[i].frag_flags & ~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN)) == (frag_flags & ~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN))); # endif /* shouldn't add non-dyngen overlapping existing dyngen, FIXME * is the reverse possible? right now we allow it */ ASSERT(TEST(FRAG_DYNGEN, frag_flags) || !TEST(FRAG_DYNGEN, v->buf[i].frag_flags)); #endif /* Never split FRAG_COARSE_GRAIN */ ASSERT(TEST(FRAG_COARSE_GRAIN, frag_flags) || !TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags)); /* for overlapping region: must overlap same type -- else split */ if ((vm_flags != v->buf[i].vm_flags || frag_flags != v->buf[i].frag_flags) && (v->should_merge_func == NULL || !v->should_merge_func(false/*not adjacent*/, data, v->buf[i].custom.client))) { LOG(GLOBAL, LOG_VMAREAS, 1, "add_vm_area "PFX"-"PFX" %s vm_flags=0x%08x " "frag_flags=0x%08x\n overlaps diff type "PFX"-"PFX" %s" "vm_flags=0x%08x frag_flags=0x%08x\n in vect at "PFX"\n", start, end, comment, vm_flags, frag_flags, v->buf[i].start, v->buf[i].end, v->buf[i].comment, v->buf[i].vm_flags, v->buf[i].frag_flags, v); LOG(GLOBAL, LOG_VMAREAS, 3, "before splitting b/c adding "PFX"-"PFX":\n", start, end); DOLOG(3, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); }); /* split off the overlapping part from the new region * reasoning: old regions get marked selfmod, then see new code, * its region overlaps old selfmod -- don't make new all selfmod, * split off the part that hasn't been proved selfmod yet. * since we never split the old region, we don't need to worry * about splitting its frags list. */ if (start < v->buf[i].start) { if (end > v->buf[i].end) { void *add_data = data; /* need two areas, one for either side */ LOG(GLOBAL, LOG_VMAREAS, 3, "=> will add "PFX"-"PFX" after i\n", v->buf[i].end, end); /* safe to recurse here, new area will be after the area * we are currently looking at in the vector */ if (v->split_payload_func != NULL) add_data = v->split_payload_func(data); add_vm_area(v, v->buf[i].end, end, vm_flags, frag_flags, add_data _IF_DEBUG(comment)); } /* if had been merging, let this routine finish that off -- else, * need to add a new area */ end = v->buf[i].start; if (overlap_start == i) { /* no merging */ overlap_start = -1; } LOG(GLOBAL, LOG_VMAREAS, 3, "=> will add/merge "PFX"-"PFX" before i\n", start, end); overlap_end = i; break; } else if (end > v->buf[i].end) { /* shift area of consideration to end of i, and keep going, * can't act now since don't know areas overlapping beyond i */ LOG(GLOBAL, LOG_VMAREAS, 3, "=> ignoring "PFX"-"PFX", only adding "PFX"-"PFX"\n", start, v->buf[i].end, v->buf[i].end, end); start = v->buf[i].end; /* reset overlap vars */ ASSERT(overlap_start <= i); overlap_start = -1; } else { /* completely inside -- ok, we'll leave it that way and won't split */ LOG(GLOBAL, LOG_VMAREAS, 3, "=> ignoring "PFX"-"PFX", forcing to be part of "PFX"-"PFX"\n", start, end, v->buf[i].start, v->buf[i].end); } ASSERT(end > start); } } else if (overlap_start > -1) { overlap_end = i; /* not inclusive */ break; } else if (end <= v->buf[i].start) break; } if (overlap_start == -1) { /* brand-new area, goes before v->buf[i] */ struct vm_area_t new_area = {start, end, vm_flags, frag_flags, /* rest 0 */}; #ifdef DEBUG /* get comment */ size_t len = strlen(comment); ASSERT(len < 1024); new_area.comment = (char *) global_heap_alloc(len+1 HEAPACCT(ACCT_VMAREAS)); strncpy(new_area.comment, comment, len); new_area.comment[len] = '\0'; /* if max no null */ #endif new_area.custom.client = data; LOG(GLOBAL, LOG_VMAREAS, 3, "=> adding "PFX"-"PFX"\n", start, end); vm_area_vector_check_size(v); /* shift subsequent entries */ for (j = v->length; j > i; j--) v->buf[j] = v->buf[j-1]; v->buf[i] = new_area; /* assumption: no overlaps between areas in list! */ #ifdef DEBUG if (!((i == 0 || v->buf[i-1].end <= v->buf[i].start) && (i == v->length || v->buf[i].end <= v->buf[i+1].start))) { LOG(GLOBAL, LOG_VMAREAS, 1, "ERROR: add_vm_area illegal overlap "PFX" "PFX" %s\n", start, end, comment); print_vm_areas(v, GLOBAL); } #endif ASSERT((i == 0 || v->buf[i-1].end <= v->buf[i].start) && (i == v->length || v->buf[i].end <= v->buf[i+1].start)); v->length++; STATS_TRACK_MAX(max_vmareas_length, v->length); DOSTATS({ if (v == dynamo_areas) STATS_TRACK_MAX(max_DRareas_length, v->length); else if (v == executable_areas) STATS_TRACK_MAX(max_execareas_length, v->length); }); #ifdef WINDOWS DOSTATS({ extern vm_area_vector_t *loaded_module_areas; if (v == loaded_module_areas) STATS_TRACK_MAX(max_modareas_length, v->length); }); #endif } else { /* overlaps one or more areas, modify first to equal entire range, * delete rest */ if (overlap_end == -1) overlap_end = v->length; LOG(GLOBAL, LOG_VMAREAS, 3, "=> changing "PFX"-"PFX, v->buf[overlap_start].start, v->buf[overlap_start].end); if (start < v->buf[overlap_start].start) v->buf[overlap_start].start = start; if (end > v->buf[overlap_end-1].end) v->buf[overlap_start].end = end; else v->buf[overlap_start].end = v->buf[overlap_end-1].end; if (v->merge_payload_func != NULL) { v->buf[overlap_start].custom.client = v->merge_payload_func(data, v->buf[overlap_start].custom.client); } else if (v->free_payload_func != NULL) { /* if a merge exists we assume it will free if necessary */ v->free_payload_func(v->buf[overlap_start].custom.client); } LOG(GLOBAL, LOG_VMAREAS, 3, " to "PFX"-"PFX"\n", v->buf[overlap_start].start, v->buf[overlap_start].end); /* when merge, use which comment? could combine them all * FIXME */ /* now delete */ for (i = overlap_start+1; i < overlap_end; i++) { LOG(GLOBAL, LOG_VMAREAS, 3, "=> completely removing "PFX"-"PFX" %s\n", v->buf[i].start, v->buf[i].end, v->buf[i].comment); #ifdef DEBUG global_heap_free(v->buf[i].comment, strlen(v->buf[i].comment)+1 HEAPACCT(ACCT_VMAREAS)); #endif if (v->merge_payload_func != NULL) { v->buf[overlap_start].custom.client = v->merge_payload_func(v->buf[overlap_start].custom.client, v->buf[i].custom.client); } else if (v->free_payload_func != NULL) { /* if a merge exists we assume it will free if necessary */ v->free_payload_func(v->buf[i].custom.client); } /* merge frags lists */ /* FIXME: switch this to a merge_payload_func. It won't be able * to print out the bounds, and it will have to do the work of * vm_area_clean_fraglist() on each merge, but we could then get * rid of VECTOR_FRAGMENT_LIST. */ if (TEST(VECTOR_FRAGMENT_LIST, v->flags) && v->buf[i].custom.frags != NULL) vm_area_merge_fraglists(&v->buf[overlap_start], &v->buf[i]); } diff = overlap_end - (overlap_start+1); for (i = overlap_start+1; i < v->length-diff; i++) v->buf[i] = v->buf[i+diff]; v->length -= diff; i = overlap_start; /* for return value */ if (TEST(VECTOR_FRAGMENT_LIST, v->flags) && v->buf[i].custom.frags != NULL) { dcontext_t *dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); /* have to remove all alsos that are now in same area as frag */ vm_area_clean_fraglist(dcontext, &v->buf[i]); } } DOLOG(5, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); }); } static void adjust_coarse_unit_bounds(vm_area_t *area, bool if_invalid) { coarse_info_t *info = (coarse_info_t *) area->custom.client; ASSERT(TEST(FRAG_COARSE_GRAIN, area->frag_flags)); ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); ASSERT(info != NULL); if (info == NULL) /* be paranoid */ return; /* FIXME: we'd like to grab info->lock but we have a rank order w/ * exec_areas lock -- so instead we rely on all-thread-synch flushing * being the only reason to get here; an empty flush won't have synchall, * but we won't be able to get_executable_area_coarse_info w/o the * exec areas write lock so we're ok there. */ ASSERT(dynamo_all_threads_synched || (!TEST(VM_EXECUTED_FROM, area->vm_flags) && READWRITE_LOCK_HELD(&executable_areas->lock))); if (!if_invalid && TEST(PERSCACHE_CODE_INVALID, info->flags)) { /* Don't change bounds of primary or secondary; we expect vm_area_t to * be merged back to this size post-rebind; if not, we'll throw out this * pcache at validation time due to not matching the vm_area_t. */ return; } LOG(THREAD_GET, LOG_VMAREAS, 3, "%s: "PFX"-"PFX" vs area "PFX"-"PFX"\n", __FUNCTION__, info->base_pc, info->end_pc, area->start, area->end); while (info != NULL) { /* loop over primary and secondary unit */ /* We should have reset this coarse info when flushing */ ASSERT((info->cache == NULL && !info->frozen && !info->persisted) || /* i#1652: if nothing was flushed a pcache may remain */ (info->base_pc == area->start && info->end_pc == area->end)); /* No longer covers the removed region */ if (info->base_pc < area->start) info->base_pc = area->start; if (info->end_pc > area->end) info->end_pc = area->end; ASSERT(info->frozen || info->non_frozen == NULL); info = info->non_frozen; ASSERT(info == NULL || !info->frozen); } } /* Assumes caller holds v->lock, if necessary * Returns false if no area contains start..end * Ignores type of area -- removes all within start..end * Caller should probably clear last_area as well */ static bool remove_vm_area(vm_area_vector_t *v, app_pc start, app_pc end, bool restore_prot) { int i, diff; int overlap_start = -1, overlap_end = -1; bool add_new_area = false; vm_area_t new_area = {0}; /* used only when add_new_area, wimpy compiler */ /* FIXME: cleaner test? shared_data copies flags, but uses * custom.frags and not custom.client */ bool official_coarse_vector = (v == executable_areas); ASSERT_VMAREA_VECTOR_PROTECTED(v, WRITE); LOG(GLOBAL, LOG_VMAREAS, 4, "in remove_vm_area "PFX" "PFX"\n", start, end); /* N.B.: removed area could span multiple areas! */ for (i = 0; i < v->length; i++) { /* look for overlap */ if (start < v->buf[i].end && end > v->buf[i].start) { if (overlap_start == -1) overlap_start = i; } else if (overlap_start > -1) { overlap_end = i; /* not inclusive */ break; } else if (end <= v->buf[i].start) break; } if (overlap_start == -1) return false; if (overlap_end == -1) overlap_end = v->length; /* since it's sorted and there are no overlaps, we do not have to re-sort. * we just delete entire intervals affected, and shorten non-entire */ if (start > v->buf[overlap_start].start) { /* need to split? */ if (overlap_start == overlap_end-1 && end < v->buf[overlap_start].end) { /* don't call add_vm_area now, that will mess up our vector */ new_area = v->buf[overlap_start]; /* make a copy */ new_area.start = end; /* rest of fields are correct */ add_new_area = true; } /* move ending bound backward */ LOG(GLOBAL, LOG_VMAREAS, 3, "\tchanging "PFX"-"PFX" to "PFX"-"PFX"\n", v->buf[overlap_start].start, v->buf[overlap_start].end, v->buf[overlap_start].start, start); if (restore_prot && DR_MADE_READONLY(v->buf[overlap_start].vm_flags)) { vm_make_writable(start, end - start); } v->buf[overlap_start].end = start; /* FIXME: add a vmvector callback function for changing bounds? */ if (TEST(FRAG_COARSE_GRAIN, v->buf[overlap_start].frag_flags) && official_coarse_vector) { adjust_coarse_unit_bounds(&v->buf[overlap_start], false/*leave invalid*/); } overlap_start++; /* don't delete me */ } if (end < v->buf[overlap_end-1].end) { /* move starting bound forward */ LOG(GLOBAL, LOG_VMAREAS, 3, "\tchanging "PFX"-"PFX" to "PFX"-"PFX"\n", v->buf[overlap_end-1].start, v->buf[overlap_end-1].end, end, v->buf[overlap_end-1].end); if (restore_prot && DR_MADE_READONLY(v->buf[overlap_end-1].vm_flags)) { vm_make_writable(v->buf[overlap_end-1].start, end - v->buf[overlap_end-1].start); } v->buf[overlap_end-1].start = end; /* FIXME: add a vmvector callback function for changing bounds? */ if (TEST(FRAG_COARSE_GRAIN, v->buf[overlap_end-1].frag_flags) && official_coarse_vector) { adjust_coarse_unit_bounds(&v->buf[overlap_end-1], false/*leave invalid*/); } overlap_end--; /* don't delete me */ } /* now delete */ if (overlap_start < overlap_end) { for (i = overlap_start; i < overlap_end; i++) { LOG(GLOBAL, LOG_VMAREAS, 3, "\tcompletely removing "PFX"-"PFX" %s\n", v->buf[i].start, v->buf[i].end, v->buf[i].comment); if (restore_prot && DR_MADE_READONLY(v->buf[i].vm_flags)) { vm_make_writable(v->buf[i].start, v->buf[i].end - v->buf[i].start); } /* FIXME: use a free_payload_func instead of this custom * code. But then we couldn't assert on the bounds and on * VM_EXECUTED_FROM. Could add bounds to callback params, but * vm_flags are not exposed to vmvector interface... */ if (TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags) && official_coarse_vector) { coarse_info_t *info = (coarse_info_t *) v->buf[i].custom.client; coarse_info_t *next_info; ASSERT(info != NULL); ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); while (info != NULL) { /* loop over primary and secondary unit */ ASSERT(info->base_pc >= v->buf[i].start && info->end_pc <= v->buf[i].end); ASSERT(info->frozen || info->non_frozen == NULL); /* Should have already freed fields, unless we flushed a region * that has not been executed from (case 10995): in which case * we must delay as we cannot grab change_linking_lock or * special_heap_lock or info->lock while holding exec_areas lock. */ if (info->cache != NULL) { ASSERT(info->persisted); ASSERT(!TEST(VM_EXECUTED_FROM, v->buf[i].vm_flags)); ASSERT(info->non_frozen != NULL); ASSERT(coarse_to_delete != NULL); /* Both primary and secondary must be un-executed */ info->non_frozen->non_frozen = *coarse_to_delete; *coarse_to_delete = info; info = NULL; } else { ASSERT(info->cache == NULL && info->stubs == NULL); next_info = info->non_frozen; coarse_unit_free(GLOBAL_DCONTEXT, info); info = next_info; ASSERT(info == NULL || !info->frozen); } } v->buf[i].custom.client = NULL; } if (v->free_payload_func != NULL) { v->free_payload_func(v->buf[i].custom.client); } #ifdef DEBUG global_heap_free(v->buf[i].comment, strlen(v->buf[i].comment)+1 HEAPACCT(ACCT_VMAREAS)); #endif /* frags list should always be null here (flush should have happened, * etc.) */ ASSERT(!TEST(VECTOR_FRAGMENT_LIST, v->flags) || v->buf[i].custom.frags == NULL); } diff = overlap_end - overlap_start; for (i = overlap_start; i < v->length-diff; i++) v->buf[i] = v->buf[i+diff]; #ifdef DEBUG memset(v->buf + v->length - diff, 0, diff * sizeof(vm_area_t)); #endif v->length -= diff; } if (add_new_area) { /* Case 8640: Do not propagate coarse-grain-ness to split-off region, * for now only for simplicity. FIXME: come up with better policy. We * do keep it on original part of split region. FIXME: assert that * there the unit is fully flushed. Better to remove in * vm_area_allsynch_flush_fragments() and then re-add if warranted? */ new_area.frag_flags &= ~FRAG_COARSE_GRAIN; /* With flush of partial module region w/o remove (e.g., from * -unsafe_ignore_IAT_writes) we can have VM_ADD_TO_SHARED_DATA set */ new_area.vm_flags &= ~VM_ADD_TO_SHARED_DATA; LOG(GLOBAL, LOG_VMAREAS, 3, "\tadding "PFX"-"PFX"\n", new_area.start, new_area.end); /* we copied v->buf[overlap_start] above and so already have a copy * of the client field */ if (v->split_payload_func != NULL) { new_area.custom.client = v->split_payload_func(new_area.custom.client); } /* else, just keep the copy */ add_vm_area(v, new_area.start, new_area.end, new_area.vm_flags, new_area.frag_flags, new_area.custom.client _IF_DEBUG(new_area.comment)); } DOLOG(5, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); }); return true; } /* Returns true if start..end overlaps any area in v. * If end==NULL, assumes that end is very top of address space (wraparound). * If area!=NULL, sets *area to an overlapping area in v * If index!=NULL, sets *index to the vector index of area; if no match * is found, sets *index to the index before [start,end) (may be -1). * If first, makes sure *area is the 1st overlapping area * Assumes caller holds v->lock, if necessary * N.B.: the pointer returned by this routine is volatile! Only use it while * you have exclusive control over the vector v, either by holding its lock * or by being its owning thread if it has no lock. */ static bool binary_search(vm_area_vector_t *v, app_pc start, app_pc end, vm_area_t **area/*OUT*/, int *index/*OUT*/, bool first) { /* BINARY SEARCH -- assumes the vector is kept sorted by add & remove! */ int min = 0; int max = v->length - 1; ASSERT(start < end || end == NULL /* wraparound */); ASSERT_VMAREA_VECTOR_PROTECTED(v, READWRITE); LOG(GLOBAL, LOG_VMAREAS, 7, "Binary search for "PFX"-"PFX" on this vector:\n", start, end); DOLOG(7, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); }); /* binary search */ while (max >= min) { int i = (min + max) / 2; if (end != NULL && end <= v->buf[i].start) max = i - 1; else if (start >= v->buf[i].end) min = i + 1; else { if (area != NULL || index != NULL) { if (first) { /* caller wants 1st matching area */ for (; i >= 1 && v->buf[i-1].end > start; i--) ; } /* returning pointer to volatile array dangerous -- see comment above */ if (area != NULL) *area = &(v->buf[i]); if (index != NULL) *index = i; } LOG(GLOBAL, LOG_VMAREAS, 7, "\tfound "PFX"-"PFX" in area "PFX"-"PFX"\n", start, end, v->buf[i].start, v->buf[i].end); return true; } } /* now max < min */ LOG(GLOBAL, LOG_VMAREAS, 7, "\tdid not find "PFX"-"PFX"!\n", start, end); if (index != NULL) { ASSERT((max < 0 || v->buf[max].end <= start) && (min > v->length - 1 || v->buf[min].start >= end)); *index = max; } return false; } /* lookup an addr in the current area * RETURN true if address area is found, false otherwise * if area is non NULL it is set to the area found * Assumes caller holds v->lock, if necessary * N.B.: the pointer returned by this routine is volatile! Only use it while * you have exclusive control over the vector v, either by holding its lock * or by being its owning thread if it has no lock. */ /* FIXME: change lookup_addr to two routines, one for readers which * returns a copy, and the other for writers who must hold a lock * across all uses of the pointer */ static bool lookup_addr(vm_area_vector_t *v, app_pc addr, vm_area_t **area) { /* binary search asserts v is protected */ return binary_search(v, addr, addr+1/*open end*/, area, NULL, false); } /* returns true if the passed in area overlaps any known executable areas * Assumes caller holds v->lock, if necessary */ static bool vm_area_overlap(vm_area_vector_t *v, app_pc start, app_pc end) { /* binary search asserts v is protected */ return binary_search(v, start, end, NULL, NULL, false); } /*********************** EXPORTED ROUTINES **********************/ /* thread-shared initialization that should be repeated after a reset */ void vm_areas_reset_init(void) { memset(shared_data, 0, sizeof(*shared_data)); VMVECTOR_INITIALIZE_VECTOR(&shared_data->areas, VECTOR_SHARED | VECTOR_FRAGMENT_LIST, shared_vm_areas); } void dynamo_vm_areas_init() { VMVECTOR_ALLOC_VECTOR(dynamo_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, dynamo_areas); } /* calls find_executable_vm_areas to get per-process map * N.B.: add_dynamo_vm_area can be called before this init routine! * N.B.: this is called after vm_areas_thread_init() */ int vm_areas_init() { int areas; /* Case 7957: we allocate all vm vectors on the heap for self-prot reasons. * We're already paying the indirection cost by passing their addresses * to generic routines, after all. */ VMVECTOR_ALLOC_VECTOR(executable_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, executable_areas); VMVECTOR_ALLOC_VECTOR(pretend_writable_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, pretend_writable_areas); VMVECTOR_ALLOC_VECTOR(patch_proof_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, patch_proof_areas); VMVECTOR_ALLOC_VECTOR(emulate_write_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, emulate_write_areas); VMVECTOR_ALLOC_VECTOR(IAT_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, IAT_areas); VMVECTOR_ALLOC_VECTOR(written_areas, GLOBAL_DCONTEXT, VECTOR_SHARED | VECTOR_NEVER_MERGE, written_areas); vmvector_set_callbacks(written_areas, free_written_area, NULL, NULL, NULL); #ifdef PROGRAM_SHEPHERDING VMVECTOR_ALLOC_VECTOR(futureexec_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, futureexec_areas); # ifdef WINDOWS VMVECTOR_ALLOC_VECTOR(app_flushed_areas, GLOBAL_DCONTEXT, VECTOR_SHARED, app_flushed_areas); # endif #endif shared_data = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, thread_data_t, ACCT_VMAREAS, PROTECTED); todelete = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, deletion_lists_t, ACCT_VMAREAS, PROTECTED); memset(todelete, 0, sizeof(*todelete)); coarse_to_delete = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, coarse_info_t *, ACCT_VMAREAS, PROTECTED); *coarse_to_delete = NULL; if (DYNAMO_OPTION(unloaded_target_exception)) { last_deallocated = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, last_deallocated_t, ACCT_VMAREAS, PROTECTED); memset(last_deallocated, 0, sizeof(*last_deallocated)); } else ASSERT(last_deallocated == NULL); vm_areas_reset_init(); /* initialize dynamo list first */ LOG(GLOBAL, LOG_VMAREAS, 2, "\n--------------------------------------------------------------------------\n"); dynamo_vm_areas_lock(); areas = find_dynamo_library_vm_areas(); dynamo_vm_areas_unlock(); /* initialize executable list * this routine calls app_memory_allocation() w/ dcontext==NULL and so we * won't go adding rwx regions, like the linux stack, to our list, even w/ * -executable_if_alloc */ areas = find_executable_vm_areas(); DOLOG(1, LOG_VMAREAS, { if (areas > 0) { LOG(GLOBAL, LOG_VMAREAS, 1, "\nExecution is allowed in %d areas\n", areas); print_executable_areas(GLOBAL); } LOG(GLOBAL, LOG_VMAREAS, 2, "--------------------------------------------------------------------------\n"); }); return areas; } static void vm_areas_statistics() { #ifdef PROGRAM_SHEPHERDING DOLOG(1, LOG_VMAREAS|LOG_STATS, { uint top; uint bottom; divide_uint64_print(GLOBAL_STAT(looked_up_in_last_area), GLOBAL_STAT(checked_addresses), true, 2, &top, &bottom); LOG(GLOBAL, LOG_VMAREAS|LOG_STATS, 1, "Code Origin: %d address lookups, %d in last area, hit ratio %u.%.2u\n", GLOBAL_STAT(checked_addresses), GLOBAL_STAT(looked_up_in_last_area), top, bottom); }); #endif /* PROGRAM_SHEPHERDING */ DOLOG(1, LOG_VMAREAS, { LOG(GLOBAL, LOG_VMAREAS, 1, "\nexecutable_areas at exit:\n"); print_executable_areas(GLOBAL); }); } /* Free all thread-shared state not critical to forward progress; * vm_areas_reset_init() will be called before continuing. */ void vm_areas_reset_free(void) { if (SHARED_FRAGMENTS_ENABLED()) { /* all deletion entries should be removed in fragment_exit(), * else we'd have to free the frags lists and entries here */ ASSERT(todelete->shared_delete == NULL); ASSERT(todelete->shared_delete_tail == NULL); /* FIXME: don't free lock so init has less work */ vmvector_free_vector(GLOBAL_DCONTEXT, &shared_data->areas); } /* vm_area_coarse_units_reset_free() is called in fragment_reset_free() */ } int vm_areas_exit() { vm_areas_exited = true; vm_areas_statistics(); if (DYNAMO_OPTION(thin_client)) { vmvector_delete_vector(GLOBAL_DCONTEXT, dynamo_areas); dynamo_areas = NULL; /* For thin_client none of the following areas should have been * initialized because they aren't used. * FIXME: wonder if I can do something like this for -client and see * what I am using unnecessarily. */ ASSERT(shared_data == NULL); ASSERT(todelete == NULL); ASSERT(executable_areas == NULL); ASSERT(pretend_writable_areas == NULL); ASSERT(patch_proof_areas == NULL); ASSERT(emulate_write_areas == NULL); ASSERT(written_areas == NULL); #ifdef PROGRAM_SHEPHERDING ASSERT(futureexec_areas == NULL); IF_WINDOWS(ASSERT(app_flushed_areas == NULL);) #endif ASSERT(IAT_areas == NULL); return 0; } vm_areas_reset_free(); DELETE_LOCK(shared_delete_lock); DELETE_LOCK(lazy_delete_lock); ASSERT(todelete->lazy_delete_count == 0); ASSERT(!todelete->move_pending); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, shared_data, thread_data_t, ACCT_VMAREAS, PROTECTED); shared_data = NULL; HEAP_TYPE_FREE(GLOBAL_DCONTEXT, todelete, deletion_lists_t, ACCT_VMAREAS, PROTECTED); todelete = NULL; ASSERT(coarse_to_delete != NULL); /* should be freed immediately after each use, during a no-exec flush */ ASSERT(*coarse_to_delete == NULL); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, coarse_to_delete, coarse_info_t *, ACCT_VMAREAS, PROTECTED); if (DYNAMO_OPTION(unloaded_target_exception)) { HEAP_TYPE_FREE(GLOBAL_DCONTEXT, last_deallocated, last_deallocated_t, ACCT_VMAREAS, PROTECTED); last_deallocated = NULL; } else ASSERT(last_deallocated == NULL); DELETE_LOCK(last_deallocated_lock); vmvector_delete_vector(GLOBAL_DCONTEXT, executable_areas); executable_areas = NULL; DOLOG(1, LOG_VMAREAS, { if (dynamo_areas->buf != NULL) { LOG(GLOBAL, LOG_VMAREAS, 1, "DR regions at exit are:\n"); print_dynamo_areas(GLOBAL); LOG(GLOBAL, LOG_VMAREAS, 1, "\n"); } }); vmvector_delete_vector(GLOBAL_DCONTEXT, dynamo_areas); dynamo_areas = NULL; DOLOG(1, LOG_VMAREAS, { if (written_areas->buf != NULL) { LOG(GLOBAL, LOG_VMAREAS, 1, "Code write and selfmod exec counts:\n"); print_written_areas(GLOBAL); LOG(GLOBAL, LOG_VMAREAS, 1, "\n"); } }); vmvector_delete_vector(GLOBAL_DCONTEXT, pretend_writable_areas); pretend_writable_areas = NULL; vmvector_delete_vector(GLOBAL_DCONTEXT, patch_proof_areas); patch_proof_areas = NULL; vmvector_delete_vector(GLOBAL_DCONTEXT, emulate_write_areas); emulate_write_areas = NULL; vmvector_delete_vector(GLOBAL_DCONTEXT, written_areas); written_areas = NULL; #ifdef PROGRAM_SHEPHERDING DOLOG(1, LOG_VMAREAS, { if (futureexec_areas->buf != NULL) LOG(GLOBAL, LOG_VMAREAS, 1, "futureexec %d regions at exit are:\n", futureexec_areas->length); print_futureexec_areas(GLOBAL); }); vmvector_delete_vector(GLOBAL_DCONTEXT, futureexec_areas); futureexec_areas = NULL; DELETE_LOCK(threads_killed_lock); # ifdef WINDOWS ASSERT(DYNAMO_OPTION(xdata_rct) || vmvector_empty(app_flushed_areas)); vmvector_delete_vector(GLOBAL_DCONTEXT, app_flushed_areas); app_flushed_areas = NULL; # endif #endif #ifdef SIMULATE_ATTACK DELETE_LOCK(simulate_lock); #endif vmvector_delete_vector(GLOBAL_DCONTEXT, IAT_areas); IAT_areas = NULL; tamper_resistant_region_start = NULL; tamper_resistant_region_end = NULL; return 0; } void vm_areas_thread_reset_init(dcontext_t *dcontext) { thread_data_t *data = (thread_data_t *) dcontext->vm_areas_field; memset(dcontext->vm_areas_field, 0, sizeof(thread_data_t)); VMVECTOR_INITIALIZE_VECTOR(&data->areas, VECTOR_FRAGMENT_LIST, thread_vm_areas); /* data->areas.lock is never used, but we may want to grab it one day, e.g. to print other thread areas */ } /* N.B.: this is called before vm_areas_init() */ void vm_areas_thread_init(dcontext_t *dcontext) { thread_data_t *data = HEAP_TYPE_ALLOC(dcontext, thread_data_t, ACCT_OTHER, PROTECTED); dcontext->vm_areas_field = data; vm_areas_thread_reset_init(dcontext); } void vm_areas_thread_reset_free(dcontext_t *dcontext) { /* we free the local areas vector so it will match fragments post-reset * FIXME: put it in nonpersistent heap */ thread_data_t *data = (thread_data_t *) dcontext->vm_areas_field; /* yes, we end up using global heap for the thread-local area * vector...not a big deal, but FIXME sometime */ vmvector_free_vector(GLOBAL_DCONTEXT, &data->areas); } void vm_areas_thread_exit(dcontext_t *dcontext) { vm_areas_thread_reset_free(dcontext); #ifdef DEBUG /* for non-debug we do fast exit path and don't free local heap */ HEAP_TYPE_FREE(dcontext, dcontext->vm_areas_field, thread_data_t, ACCT_OTHER, PROTECTED); #endif } /**************************************************************************** * external interface to vm_area_vector_t * * FIXME: add user data field to vector and to add routine * FIXME: have init and destroy routines so don't have to expose * vm_area_vector_t struct or declare vector in this file */ void vmvector_set_callbacks(vm_area_vector_t *v, void (*free_func)(void*), void *(*split_func)(void*), bool (*should_merge_func)(bool, void*, void*), void *(*merge_func)(void*, void*)) { bool release_lock; /* 'true' means this routine needs to unlock */ ASSERT(v != NULL); LOCK_VECTOR(v, release_lock, read); v->free_payload_func = free_func; v->split_payload_func = split_func; v->should_merge_func = should_merge_func; v->merge_payload_func = merge_func; UNLOCK_VECTOR(v, release_lock, read); } void vmvector_print(vm_area_vector_t *v, file_t outf) { bool release_lock; /* 'true' means this routine needs to unlock */ LOCK_VECTOR(v, release_lock, read); print_vm_areas(v, outf); UNLOCK_VECTOR(v, release_lock, read); } void vmvector_add(vm_area_vector_t *v, app_pc start, app_pc end, void *data) { bool release_lock; /* 'true' means this routine needs to unlock */ LOCK_VECTOR(v, release_lock, write); ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); add_vm_area(v, start, end, 0, 0, data _IF_DEBUG("")); UNLOCK_VECTOR(v, release_lock, write); } void * vmvector_add_replace(vm_area_vector_t *v, app_pc start, app_pc end, void *data) { bool overlap; vm_area_t *area = NULL; void *old_data = NULL; bool release_lock; /* 'true' means this routine needs to unlock */ LOCK_VECTOR(v, release_lock, write); ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); overlap = lookup_addr(v, start, &area); if (overlap && start == area->start && end == area->end) { old_data = area->custom.client; area->custom.client = data; } else add_vm_area(v, start, end, 0, 0, data _IF_DEBUG("")); UNLOCK_VECTOR(v, release_lock, write); return old_data; } bool vmvector_remove(vm_area_vector_t *v, app_pc start, app_pc end) { bool ok; bool release_lock; /* 'true' means this routine needs to unlock */ LOCK_VECTOR(v, release_lock, write); ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); ok = remove_vm_area(v, start, end, false); UNLOCK_VECTOR(v, release_lock, write); return ok; } /* Looks up area encapsulating target pc and removes. * returns true if found and removed, and optional area boundaries are set * returns false if not found */ bool vmvector_remove_containing_area(vm_area_vector_t *v, app_pc pc, app_pc *area_start /* OUT optional */, app_pc *area_end /* OUT optional */) { vm_area_t *a; bool ok; bool release_lock; /* 'true' means this routine needs to unlock */ /* common path should be to find one, and would need write lock to * remove */ LOCK_VECTOR(v, release_lock, write); ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); ok = lookup_addr(v, pc, &a); if (ok) { if (area_start != NULL) *area_start = a->start; if (area_end != NULL) *area_end = a->end; remove_vm_area(v, a->start, a->end, false); } UNLOCK_VECTOR(v, release_lock, write); return ok; } bool vmvector_overlap(vm_area_vector_t *v, app_pc start, app_pc end) { bool overlap; bool release_lock; /* 'true' means this routine needs to unlock */ if (vmvector_empty(v)) return false; LOCK_VECTOR(v, release_lock, read); ASSERT_OWN_READWRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); overlap = vm_area_overlap(v, start, end); UNLOCK_VECTOR(v, release_lock, read); return overlap; } /* returns custom data field, or NULL if not found. NOTE: Access to * custom data needs explicit synchronization in addition to * vm_area_vector_t's locks! */ void * vmvector_lookup(vm_area_vector_t *v, app_pc pc) { void *data = NULL; vmvector_lookup_data(v, pc, NULL, NULL, &data); return data; } /* Looks up if pc is in a vmarea and optionally returns the areas's bounds * and any custom data. NOTE: Access to custom data needs explicit * synchronization in addition to vm_area_vector_t's locks! */ bool vmvector_lookup_data(vm_area_vector_t *v, app_pc pc, app_pc *start /* OUT */, app_pc *end /* OUT */, void **data /* OUT */) { bool overlap; vm_area_t *area = NULL; bool release_lock; /* 'true' means this routine needs to unlock */ LOCK_VECTOR(v, release_lock, read); ASSERT_OWN_READWRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); overlap = lookup_addr(v, pc, &area); if (overlap) { if (start != NULL) *start = area->start; if (end != NULL) *end = area->end; if (data != NULL) *data = area->custom.client; } UNLOCK_VECTOR(v, release_lock, read); return overlap; } /* Returns false if pc is in a vmarea in v. * Otherwise, returns the start pc of the vmarea prior to pc in prev and * the start pc of the vmarea after pc in next. * FIXME: most callers will call this and vmvector_lookup_data(): * should this routine do both to avoid an extra binary search? */ bool vmvector_lookup_prev_next(vm_area_vector_t *v, app_pc pc, OUT app_pc *prev, OUT app_pc *next) { bool success; int index; bool release_lock; /* 'true' means this routine needs to unlock */ LOCK_VECTOR(v, release_lock, read); ASSERT_OWN_READWRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); success = !binary_search(v, pc, pc+1, NULL, &index, false); if (success) { if (prev != NULL) { if (index == -1) *prev = NULL; else *prev = v->buf[index].start; } if (next != NULL) { if (index >= v->length - 1) *next = (app_pc) POINTER_MAX; else *next = v->buf[index+1].start; } } UNLOCK_VECTOR(v, release_lock, read); return success; } /* Sets custom data field if a vmarea is present. Returns true if found, * false if not found. NOTE: Access to custom data needs explicit * synchronization in addition to vm_area_vector_t's locks! */ bool vmvector_modify_data(vm_area_vector_t *v, app_pc start, app_pc end, void *data) { bool overlap; vm_area_t *area = NULL; bool release_lock; /* 'true' means this routine needs to unlock */ LOCK_VECTOR(v, release_lock, write); ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock); overlap = lookup_addr(v, start, &area); if (overlap && start == area->start && end == area->end) area->custom.client = data; UNLOCK_VECTOR(v, release_lock, write); return overlap; } /* this routine does NOT initialize the rw lock! use VMVECTOR_INITIALIZE_VECTOR */ void vmvector_init_vector(vm_area_vector_t *v, uint flags) { memset(v, 0, sizeof(*v)); v->flags = flags; } /* this routine does NOT initialize the rw lock! use VMVECTOR_ALLOC_VECTOR instead */ vm_area_vector_t * vmvector_create_vector(dcontext_t *dcontext, uint flags) { vm_area_vector_t *v = HEAP_TYPE_ALLOC(dcontext, vm_area_vector_t, ACCT_VMAREAS, PROTECTED); vmvector_init_vector(v, flags); return v; } /* frees the fields of vm_area_vector_t v (not v itself) */ void vmvector_reset_vector(dcontext_t *dcontext, vm_area_vector_t *v) { DODEBUG({ int i; /* walk areas and delete coarse info and comments */ for (i = 0; i < v->length; i++) { /* FIXME: this code is duplicated in remove_vm_area() */ if (TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags) && /* FIXME: cleaner test? shared_data copies flags, but uses * custom.frags and not custom.client */ v == executable_areas) { coarse_info_t *info = (coarse_info_t *) v->buf[i].custom.client; coarse_info_t *next_info; ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); ASSERT(info != NULL); while (info != NULL) { /* loop over primary and secondary unit */ next_info = info->non_frozen; ASSERT(info->frozen || info->non_frozen == NULL); coarse_unit_free(GLOBAL_DCONTEXT, info); info = next_info; ASSERT(info == NULL || !info->frozen); } v->buf[i].custom.client = NULL; } global_heap_free(v->buf[i].comment, strlen(v->buf[i].comment)+1 HEAPACCT(ACCT_VMAREAS)); } }); /* with thread shared cache it is in fact possible to have no thread local vmareas */ if (v->buf != NULL) { /* FIXME: walk through and make sure frags lists are all freed */ global_heap_free(v->buf, v->size*sizeof(struct vm_area_t) HEAPACCT(ACCT_VMAREAS)); v->size = 0; v->length = 0; v->buf = NULL; } else ASSERT(v->size == 0 && v->length == 0); } static void vmvector_free_vector(dcontext_t *dcontext, vm_area_vector_t *v) { vmvector_reset_vector(dcontext, v); DELETE_READWRITE_LOCK(v->lock); } /* frees the vm_area_vector_t v and its associated memory */ void vmvector_delete_vector(dcontext_t *dcontext, vm_area_vector_t *v) { if (v->free_payload_func != NULL) { int i; for (i = 0; i < v->length; i++) { v->free_payload_func(v->buf[i].custom.client); } } vmvector_free_vector(dcontext, v); HEAP_TYPE_FREE(dcontext, v, vm_area_vector_t, ACCT_VMAREAS, PROTECTED); } /* vmvector iterator */ /* initialize an iterator, has to be released with * vmvector_iterator_stop. The iterator doesn't support mutations. * In fact shared vectors should detect a deadlock * if vmvector_add() and vmvector_remove() is erroneously called. */ void vmvector_iterator_start(vm_area_vector_t *v, vmvector_iterator_t *vmvi) { ASSERT(v != NULL); ASSERT(vmvi != NULL); if (SHOULD_LOCK_VECTOR(v)) read_lock(&v->lock); vmvi->vector = v; vmvi->index = -1; } bool vmvector_iterator_hasnext(vmvector_iterator_t *vmvi) { ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE); return (vmvi->index + 1) < vmvi->vector->length; } void vmvector_iterator_startover(vmvector_iterator_t *vmvi) { ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE); vmvi->index = -1; } /* iterator accessor * has to be initialized with vmvector_iterator_start, and should be * called only when vmvector_iterator_hasnext() is true * * returns custom data and * sets the area boundaries in area_start and area_end * * does not increment the iterator */ void* vmvector_iterator_peek(vmvector_iterator_t *vmvi, /* IN/OUT */ app_pc *area_start /* OUT */, app_pc *area_end /* OUT */) { int idx = vmvi->index + 1; ASSERT(vmvector_iterator_hasnext(vmvi)); ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE); ASSERT(idx < vmvi->vector->length); if (area_start != NULL) *area_start = vmvi->vector->buf[idx].start; if (area_end != NULL) *area_end = vmvi->vector->buf[idx].end; return vmvi->vector->buf[idx].custom.client; } /* iterator accessor * has to be initialized with vmvector_iterator_start, and should be * called only when vmvector_iterator_hasnext() is true * * returns custom data and * sets the area boundaries in area_start and area_end */ void* vmvector_iterator_next(vmvector_iterator_t *vmvi, /* IN/OUT */ app_pc *area_start /* OUT */, app_pc *area_end /* OUT */) { void *res = vmvector_iterator_peek(vmvi, area_start, area_end); vmvi->index++; return res; } void vmvector_iterator_stop(vmvector_iterator_t *vmvi) { ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE); if (SHOULD_LOCK_VECTOR(vmvi->vector)) read_unlock(&vmvi->vector->lock); DODEBUG({ vmvi->vector = NULL; /* crash incorrect reuse */ vmvi->index = -1; }); } /**************************************************************************** * routines specific to our own vectors */ void print_executable_areas(file_t outf) { vmvector_print(executable_areas, outf); } void print_dynamo_areas(file_t outf) { dynamo_vm_areas_start_reading(); print_vm_areas(dynamo_areas, outf); dynamo_vm_areas_done_reading(); } #ifdef PROGRAM_SHEPHERDING void print_futureexec_areas(file_t outf) { vmvector_print(futureexec_areas, outf); } #endif #if defined(DEBUG) && defined(INTERNAL) static void print_written_areas(file_t outf) { vmvector_print(written_areas, outf); } #endif static void free_written_area(void *data) { HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (ro_vs_sandbox_data_t *) data, ro_vs_sandbox_data_t, ACCT_VMAREAS, UNPROTECTED); } /* Functions as a lookup routine if an entry is already present. * Returns true if an entry was already present, false if not, in which * case an entry containing tag with suggested bounds of [start, end) * (actual bounds may be smaller to avoid overlap) is added. */ static bool add_written_area(vm_area_vector_t *v, app_pc tag, app_pc start, app_pc end, vm_area_t **area) { vm_area_t *a = NULL; bool already; DEBUG_DECLARE(bool ok;) /* currently only one vector */ ASSERT(v == written_areas); ASSERT_OWN_WRITE_LOCK(true, &v->lock); ASSERT(tag >= start && tag < end); /* re-adding fails for written_areas since no merging, so lookup first */ already = lookup_addr(v, tag, &a); if (!already) { app_pc prev_start = NULL, next_start = NULL; LOG(GLOBAL, LOG_VMAREAS, 2, "new written executable vm area: "PFX"-"PFX"\n", start, end); /* case 9179: With no flags, any overlap (in non-tag portion of [start, * end)) will result in a merge: so we'll inherit and share counts from * any adjacent region(s): maybe better to split? Rare in any case and * not critical. In case of simultaneous overlap, we take counter from * first region, since that's how add_vm_area does the merge. */ /* we can't merge b/c we have hardcoded counter pointers in code * in the cache, so we make sure to only add the non-overlap */ DEBUG_DECLARE(ok = ) vmvector_lookup_prev_next(v, tag, &prev_start, &next_start); ASSERT(ok); /* else already should be true */ if (prev_start != NULL) { vm_area_t *prev_area = NULL; DEBUG_DECLARE(ok = ) lookup_addr(v, prev_start, &prev_area); ASSERT(ok); /* we hold the lock after all */ if (prev_area->end > start) start = prev_area->end; } if (next_start < (app_pc) POINTER_MAX && end > next_start) end = next_start; add_vm_area(v, start, end, /* no flags */ 0, 0, NULL _IF_DEBUG("")); DEBUG_DECLARE(ok = ) lookup_addr(v, tag, &a); ASSERT(ok && a != NULL); /* If we merged, we already have an ro2s struct */ /* FIXME: now that we have merge callback support, should just pass * a struct into add_vm_area and avoid this post-lookup */ if (a->custom.client == NULL) { /* Since selfmod_execs is written from the cache this must be * unprotected. Attacker changing selfmod_execs or written_count * shouldn't be able to cause problems. */ ro_vs_sandbox_data_t *ro2s = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, ro_vs_sandbox_data_t, ACCT_VMAREAS, UNPROTECTED); /* selfmod_execs is inc-ed from the cache, and if it crosses a cache * line we could have a problem with large thresholds. We assert on * 32-bit alignment here, which our heap alloc currently provides, to * ensure no cache line is crossed. */ ASSERT(ALIGNED(ro2s, sizeof(uint))); memset(ro2s, 0, sizeof(*ro2s)); a->custom.client = (void *) ro2s; } } else { LOG(GLOBAL, LOG_VMAREAS, 3, "request for written area "PFX"-"PFX" vs existing "PFX"-"PFX"\n", start, end, a->start, a->end); } ASSERT(a != NULL); if (area != NULL) *area = a; return already; } #ifdef WINDOWS /* Adjusts a new executable area with respect to the IAT. * Returns whether it should remain coarse or not. */ static bool add_executable_vm_area_check_IAT(app_pc *start /*IN/OUT*/, app_pc *end /*IN/OUT*/, uint vm_flags, vm_area_t **existing_area /*OUT*/, coarse_info_t **info_out /*OUT*/, coarse_info_t **tofree /*OUT*/, app_pc *delay_start /*OUT*/, app_pc *delay_end /*OUT*/) { bool keep_coarse = false; app_pc IAT_start = NULL, IAT_end = NULL; app_pc orig_start = *start, orig_end = *end; ASSERT(existing_area != NULL && info_out != NULL && tofree != NULL); ASSERT(delay_start != NULL && delay_end != NULL); if (DYNAMO_OPTION(coarse_merge_iat) && get_module_base(*start) != NULL && get_IAT_section_bounds(get_module_base(*start), &IAT_start, &IAT_end) && /* case 1094{5,7}: to match the assumptions of case 10600 we adjust * to post-IAT even if the IAT is in the middle, if it's toward the front */ (*start >= IAT_start || (IAT_start - *start < *end - IAT_end)) && *start < IAT_end && /* be paranoid: multi-page IAT where hooker fooled our loader matching * could add just 1st page of IAT? */ *end > IAT_end /* for == avoid an empty region */) { /* If a pre-IAT region exists, split if off separately (case 10945). * We want to keep as coarse, but we need the post-IAT region to be the * primary coarse and the one we try to load a pcache for: so we delay * the add. * FIXME: should we do a general split around the IAT and make both sides * coarse with larger the primary instead of assuming pre-IAT is smaller? */ if (orig_start < IAT_start) { LOG(GLOBAL, LOG_VMAREAS, 2, "splitting pre-IAT "PFX"-"PFX" off from exec area "PFX"-"PFX"\n", orig_start, IAT_start, orig_start, orig_end); *delay_start = orig_start; *delay_end = IAT_start; DOCHECK(1, { /* When IAT is in the middle of +rx region we expect .orpc */ app_pc orpc_start = NULL; app_pc orpc_end = NULL; get_named_section_bounds(get_module_base(orig_start), ".orpc", &orpc_start, &orpc_end); ASSERT_CURIOSITY(orpc_start == orig_start && orpc_end == IAT_start); }); } /* Just abandon [*start, IAT_end) */ *start = IAT_end; ASSERT(*end > *start); LOG(GLOBAL, LOG_VMAREAS, 2, "adjusting exec area "PFX"-"PFX" to post-IAT "PFX"-"PFX"\n", orig_start, *end, *start, *end); } else { LOG(GLOBAL, LOG_VMAREAS, 2, "NOT adjusting exec area "PFX"-"PFX" vs IAT "PFX"-"PFX"\n", orig_start, *end, IAT_start, IAT_end); } if (TEST(VM_UNMOD_IMAGE, vm_flags)) keep_coarse = true; else { /* Keep the coarse-grain flag for modified pages only if IAT pages. * We want to avoid repeated coarse flushes, so we are * very conservative about marking if_rx_text regions coarse: we count on * our IAT loader check to make this a do-once. * FIXME: Should extend this to also merge on left with .orpc: * .orpc at page 1, IAT on page 2, and .text continuing on */ ASSERT(ALIGNED(*end, PAGE_SIZE)); if (DYNAMO_OPTION(coarse_merge_iat) && vm_flags == 0 /* no other flags */ && /* FIXME: used our stored bounds */ is_IAT(orig_start, orig_end, true/*page-align*/, NULL, NULL) && is_module_patch_region(GLOBAL_DCONTEXT, orig_start, orig_end, true/*be conservative*/) && /* We stored the IAT code at +rw time */ os_module_cmp_IAT_code(orig_start)) { vm_area_t *area = NULL; bool all_new = !executable_vm_area_overlap(orig_start, orig_end-1, true/*wlock*/); ASSERT(IAT_start != NULL); /* should have found bounds above */ if (all_new && /* elseif assumes next call happened */ lookup_addr(executable_areas, *end, &area) && TEST(FRAG_COARSE_GRAIN, area->frag_flags) && /* Only merge if no execution has yet occurred: else this * must not be normal rebinding */ !TEST(VM_EXECUTED_FROM, area->vm_flags) && /* Should be marked invalid; else no loader +rw => not rebinding */ area->custom.client != NULL && TEST(PERSCACHE_CODE_INVALID, ((coarse_info_t *)area->custom.client)->flags)) { /* Case 8640: merge IAT page back in to coarse area. * Easier to merge here than in add_vm_area. */ coarse_info_t *info = (coarse_info_t *) area->custom.client; keep_coarse = true; LOG(GLOBAL, LOG_VMAREAS, 2, "merging post-IAT ("PFX"-"PFX") with "PFX"-"PFX"\n", IAT_end, orig_end, area->start, area->end); ASSERT(area != NULL); ASSERT(area->start == *end); ASSERT(IAT_end > orig_start && IAT_end < area->start); ASSERT(*start == IAT_end); /* set up above */ *end = area->end; area->start = *start; *existing_area = area; STATS_INC(coarse_merge_IAT); /* If info was loaded prior to rebinding just use it. * Else, we need a fresh coarse_info_t if persisted, so rather than * adjust_coarse_unit_bounds on info we must free it. * Due to lock constraints we can't do that while holding * exec areas lock. */ /* Bounds should match exactly, since we did not adjust them * on the flush; if they don't, don't use the pcache. */ if (info->base_pc == area->start && info->end_pc == area->end) { info->flags &= ~PERSCACHE_CODE_INVALID; *info_out = info; STATS_INC(coarse_marked_valid); LOG(GLOBAL, LOG_VMAREAS, 2, "\tkeeping now-valid info %s "PFX"-"PFX"\n", info->module, info->base_pc, info->end_pc); } else { /* Go ahead and merge, but don't use this pcache */ ASSERT_CURIOSITY(false && "post-rebind pcache bounds mismatch"); *tofree = info; area->custom.client = NULL; /* FIXME: we'll try to load again: prevent that? We * know the image hasn't been modified so no real danger. */ STATS_INC(perscache_rebind_load); } } else if (all_new && area == NULL /*nothing following*/) { /* Code section is a single page, so was completely flushed * We'll try to re-load the pcache. * FIXME: we already merged the persisted rct tables into * the live tables when we flushed the pcache: so now * we'll have redundancy, and if we flush again we'll waste * time tryingn to re-add (we do check for dups). */ ASSERT(!lookup_addr(executable_areas, *start, NULL)); LOG(GLOBAL, LOG_VMAREAS, 2, "marking IAT/code region ("PFX"-"PFX" vs "PFX"-"PFX") as coarse\n", IAT_start, IAT_end, orig_start, orig_end); keep_coarse = true; STATS_INC(coarse_merge_IAT); /* we use same stat */ } else { LOG(GLOBAL, LOG_VMAREAS, 2, "NOT merging IAT-containing "PFX"-"PFX": abuts non-inv-coarse\n", orig_start, orig_end); DOCHECK(1, { if (all_new && area != NULL && TEST(FRAG_COARSE_GRAIN, area->frag_flags) && TEST(VM_EXECUTED_FROM, area->vm_flags)) { coarse_info_t *info = (coarse_info_t *) area->custom.client; ASSERT(!info->persisted); ASSERT(!TEST(PERSCACHE_CODE_INVALID, info->flags)); } }); } } else { LOG(GLOBAL, LOG_VMAREAS, 2, "NOT merging .text "PFX"-"PFX" vs IAT "PFX"-"PFX" %d %d %d %d %d\n", orig_start, orig_end, IAT_start, IAT_end, DYNAMO_OPTION(coarse_merge_iat), vm_flags == 0, is_IAT(orig_start, *end, true/*page-align*/, NULL, NULL), is_module_patch_region(GLOBAL_DCONTEXT, orig_start, orig_end, true/*be conservative*/), os_module_cmp_IAT_code(orig_start)); } } return keep_coarse; } #endif static void add_executable_vm_area_helper(app_pc start, app_pc end, uint vm_flags, uint frag_flags, coarse_info_t *info _IF_DEBUG(const char *comment)) { ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock); add_vm_area(executable_areas, start, end, vm_flags, frag_flags, NULL _IF_DEBUG(comment)); if (TEST(VM_WRITABLE, vm_flags)) { /* N.B.: the writable flag indicates the natural state of the memory, * not what we have made it be -- we make it read-only before adding * to the executable list! * FIXME: win32 callback's intercept_call code appears in fragments * and is writable...would like to fix that, and coalesce that memory * with the generated routines or something */ LOG(GLOBAL, LOG_VMAREAS, 2, "WARNING: new executable vm area is writable: "PFX"-"PFX" %s\n", start, end, comment); #if 0 /* this syslog causes services.exe to hang (ref case 666) once case 666 * is fixed re-enable if desired FIXME */ SYSLOG_INTERNAL_WARNING_ONCE("new executable vm area is writable."); #endif } #ifdef PROGRAM_SHEPHERDING if (!DYNAMO_OPTION(selfmod_futureexec) && TEST(FRAG_SELFMOD_SANDBOXED, frag_flags)) { /* We do not need future entries for selfmod regions. We mark * the futures as once-only when they are selfmod at future add time, and * here we catch those who weren't selfmod then but are now. */ remove_futureexec_vm_area(start, end); } #endif if (TEST(FRAG_COARSE_GRAIN, frag_flags)) { vm_area_t *area = NULL; DEBUG_DECLARE(bool found = ) lookup_addr(executable_areas, start, &area); ASSERT(found && area != NULL); /* case 9521: always have one non-frozen coarse unit per coarse region */ if (info == NULL || info->frozen) { coarse_info_t *new_info = coarse_unit_create(start, end, (info == NULL) ? NULL : &info->module_md5, true/*for execution*/); LOG(GLOBAL, LOG_VMAREAS, 1, "new %scoarse unit %s "PFX"-"PFX"\n", info == NULL ? "" : "secondary ", new_info->module, start, end); if (info == NULL) info = new_info; else info->non_frozen = new_info; } area->custom.client = (void *) info; } DOLOG(2, LOG_VMAREAS, { /* new area could have been split into multiple */ print_contig_vm_areas(executable_areas, start, end, GLOBAL, "new executable vm area: "); }); } static coarse_info_t * vm_area_load_coarse_unit(app_pc *start INOUT, app_pc *end INOUT, uint vm_flags, uint frag_flags, bool delayed _IF_DEBUG(const char *comment)) { coarse_info_t *info; /* We load persisted cache files at mmap time primarily for RCT * tables; but to avoid duplicated code, and for simplicity, we do * so if -use_persisted even if not -use_persisted_rct. */ dcontext_t *dcontext = get_thread_private_dcontext(); ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock); /* FIXME: we're called before 1st thread is set up. Only a problem * right now for rac_entries_resurrect() w/ private after-call * which won't happen w/ -coarse_units that requires shared bbs. */ info = coarse_unit_load(dcontext == NULL ? GLOBAL_DCONTEXT : dcontext, *start, *end, true/*for execution*/); if (info != NULL) { ASSERT(info->base_pc >= *start && info->end_pc <= *end); LOG(GLOBAL, LOG_VMAREAS, 1, "using persisted coarse unit %s "PFX"-"PFX" for "PFX"-"PFX"\n", info->module, info->base_pc, info->end_pc, *start, *end); /* Case 8640/9653/8639: adjust region bounds so that a * cache consistency event outside the persisted region * does not invalidate it (mainly targeting loader rebinding). * We count on FRAG_COARSE_GRAIN preventing any merging of regions. * We could delay this until code validation, as RCT tables don't care, * and then we could avoid splitting the region in case validation * fails: but our plan for lazy per-page validation (case 10601) * means we can fail post-split even that way. So we go ahead and split * up front here. For 4.4 we should move this to 1st exec. */ if (delayed && (info->base_pc > *start || info->end_pc < *end)) { /* we already added a region for the whole range earlier */ remove_vm_area(executable_areas, *start, *end, false/*leave writability*/); add_executable_vm_area_helper(info->base_pc, info->end_pc, vm_flags, frag_flags, info _IF_DEBUG(comment)); } if (info->base_pc > *start) { add_executable_vm_area_helper(*start, info->base_pc, vm_flags, frag_flags, NULL _IF_DEBUG(comment)); *start = info->base_pc; } if (info->end_pc < *end) { add_executable_vm_area_helper(info->end_pc, *end, vm_flags, frag_flags, NULL _IF_DEBUG(comment)); *end = info->end_pc; } /* if !delayed we'll add the region for the unit in caller */ ASSERT(info->frozen && info->persisted); vm_flags |= VM_PERSISTED_CACHE; /* For 4.4 we would mark as PERSCACHE_CODE_INVALID here and * mark valid only at 1st execution when we do md5 checks; * for 4.3 we're valid until a rebind action. */ ASSERT(!TEST(PERSCACHE_CODE_INVALID, info->flags)); /* We must add to shared_data, but we cannot here due to lock * rank issues (shared_vm_areas lock is higher rank than * executable_areas, and we have callers doing flushes and * already holding executable_areas), so we delay. */ vm_flags |= VM_ADD_TO_SHARED_DATA; } return info; } /* NOTE : caller is responsible for ensuring that consistency conditions are * met, thus if the region is writable the caller must either mark it read * only or pass in the VM_DELAY_READONLY flag in which case * check_thread_vm_area will mark it read only when a thread goes to build a * block from the region */ static bool add_executable_vm_area(app_pc start, app_pc end, uint vm_flags, uint frag_flags, bool have_writelock _IF_DEBUG(const char *comment)) { vm_area_t *existing_area = NULL; coarse_info_t *info = NULL; coarse_info_t *tofree = NULL; app_pc delay_start = NULL, delay_end = NULL; /* only expect to see the *_READONLY flags on WRITABLE regions */ ASSERT(!TEST(VM_DELAY_READONLY, vm_flags) || TEST(VM_WRITABLE, vm_flags)); ASSERT(!TEST(VM_MADE_READONLY, vm_flags) || TEST(VM_WRITABLE, vm_flags)); #ifdef DEBUG /* can't use DODEBUG b/c of ifdef inside */ { /* we only expect certain flags */ uint expect = VM_WRITABLE|VM_UNMOD_IMAGE|VM_MADE_READONLY| VM_DELAY_READONLY|VM_WAS_FUTURE|VM_EXECUTED_FROM|VM_DRIVER_ADDRESS; # ifdef PROGRAM_SHEPHERDING expect |= VM_PATTERN_REVERIFY; # endif ASSERT(!TESTANY(~expect, vm_flags)); } #endif /* DEBUG */ if (!have_writelock) { #ifdef HOT_PATCHING_INTERFACE /* case 9970: need to check hotp vs perscache; rank order hotp < exec_areas */ if (DYNAMO_OPTION(hot_patching)) read_lock(hotp_get_lock()); #endif write_lock(&executable_areas->lock); } ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock); /* FIXME: rather than change all callers who already hold exec_areas lock * to first grab hotp lock, we don't support perscache in those cases. * We expect to only be adding a coarse-grain area for module loads. */ ASSERT(!TEST(FRAG_COARSE_GRAIN, frag_flags) || !have_writelock); if (TEST(FRAG_COARSE_GRAIN, frag_flags) && !have_writelock) { #ifdef WINDOWS if (!add_executable_vm_area_check_IAT(&start, &end, vm_flags, &existing_area, &info, &tofree, &delay_start, &delay_end)) frag_flags &= ~FRAG_COARSE_GRAIN; #else ASSERT(TEST(VM_UNMOD_IMAGE, vm_flags)); #endif ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); if (TEST(FRAG_COARSE_GRAIN, frag_flags) && DYNAMO_OPTION(use_persisted) && info == NULL /* if clients are present, don't load until after they're initialized */ IF_CLIENT_INTERFACE(&& (dynamo_initialized || !CLIENTS_EXIST()))) { info = vm_area_load_coarse_unit(&start, &end, vm_flags, frag_flags, false _IF_DEBUG(comment)); } } if (existing_area == NULL) { add_executable_vm_area_helper(start, end, vm_flags, frag_flags, info _IF_DEBUG(comment)); } else { /* we shouldn't need the other parts of _helper() */ ASSERT(!TEST(VM_WRITABLE, vm_flags)); #ifdef PROGRAM_SHEPHERDING ASSERT(DYNAMO_OPTION(selfmod_futureexec) || !TEST(FRAG_SELFMOD_SANDBOXED, frag_flags)); #endif } if (delay_start != NULL) { ASSERT(delay_end > delay_start); add_executable_vm_area_helper(delay_start, delay_end, vm_flags, frag_flags, NULL _IF_DEBUG(comment)); } DOLOG(2, LOG_VMAREAS, { /* new area could have been split into multiple */ print_contig_vm_areas(executable_areas, start, end, GLOBAL, "new executable vm area: "); }); if (!have_writelock) { write_unlock(&executable_areas->lock); #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) read_unlock(hotp_get_lock()); #endif } if (tofree != NULL) { /* Since change_linking_lock and info->lock are higher rank than exec areas we * must free down here. FIXME: this should move to 1st exec for 4.4. */ ASSERT(tofree->non_frozen == NULL); coarse_unit_reset_free(GLOBAL_DCONTEXT, tofree, false/*no locks*/, true/*unlink*/, true/*give up primary*/); coarse_unit_free(GLOBAL_DCONTEXT, tofree); } return true; } /* Used to add dr allocated memory regions that may execute out of the cache */ /* NOTE : region is assumed to not be writable, caller is responsible for * ensuring this (see fixme in signal.c adding sigreturn code) */ bool add_executable_region(app_pc start, size_t size _IF_DEBUG(const char *comment)) { return add_executable_vm_area(start, start+size, 0, 0, false/*no lock*/ _IF_DEBUG(comment)); } /* remove an executable area from the area list * the caller is responsible for ensuring that all threads' local vm lists * are updated by calling flush_fragments_and_remove_region (can't just * remove local vm areas and leave existing fragments hanging...) */ static bool remove_executable_vm_area(app_pc start, app_pc end, bool have_writelock) { bool ok; LOG(GLOBAL, LOG_VMAREAS, 2, "removing executable vm area: "PFX"-"PFX"\n", start, end); if (!have_writelock) write_lock(&executable_areas->lock); ok = remove_vm_area(executable_areas, start, end, true/*restore writability!*/); if (!have_writelock) write_unlock(&executable_areas->lock); return ok; } /* removes a region from the executable list */ /* NOTE :the caller is responsible for ensuring that all threads' local * vm lists are updated by calling flush_fragments_and_remove_region */ bool remove_executable_region(app_pc start, size_t size, bool have_writelock) { return remove_executable_vm_area(start, start+size, have_writelock); } #ifdef CLIENT_INTERFACE /* To give clients a chance to process pcaches as we load them, we * delay the loading until we've initialized the clients. */ void vm_area_delay_load_coarse_units(void) { int i; ASSERT(!dynamo_initialized); if (!DYNAMO_OPTION(use_persisted) || /* we already loaded if there's no client */ !CLIENTS_EXIST()) return; write_lock(&executable_areas->lock); for (i = 0; i < executable_areas->length; i++) { if (TEST(FRAG_COARSE_GRAIN, executable_areas->buf[i].frag_flags)) { vm_area_t *a = &executable_areas->buf[i]; /* store cur_info b/c a might be blown away */ coarse_info_t *cur_info = (coarse_info_t *) a->custom.client; if (cur_info == NULL || !cur_info->frozen) { app_pc start = a->start, end = a->end; coarse_info_t *info = vm_area_load_coarse_unit(&start, &end, a->vm_flags, a->frag_flags, true _IF_DEBUG(a->comment)); if (info != NULL) { /* re-acquire a and i */ DEBUG_DECLARE(bool ok = ) binary_search(executable_areas, info->base_pc, info->base_pc+1/*open end*/, &a, &i, false); ASSERT(ok); if (cur_info != NULL) info->non_frozen = cur_info; a->custom.client = (void *) info; } } else ASSERT_NOT_REACHED(); /* shouldn't have been loaded already */ } } write_unlock(&executable_areas->lock); } #endif /* case 10995: we have to delay freeing un-executed coarse units until * we can release the exec areas lock when we flush an un-executed region. * This routine frees the queued-up coarse units, and releases the * executable areas lock, which the caller must hold. */ bool free_nonexec_coarse_and_unlock() { bool freed_any = false; coarse_info_t *info = NULL; coarse_info_t *next_info; /* We must hold the exec areas lock while traversing the to-delete list, * yet we cannot delete while holding it, so we use a temp var */ ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock); ASSERT(coarse_to_delete != NULL); if (coarse_to_delete != NULL/*paranoid*/ && *coarse_to_delete != NULL) { freed_any = true; info = *coarse_to_delete; *coarse_to_delete = NULL; } /* Now we can unlock, and then it's safe to delete */ executable_areas_unlock(); if (freed_any) { /* units are chained by non_frozen field */ while (info != NULL) { next_info = info->non_frozen; if (info->cache != NULL) { ASSERT(info->persisted); /* We shouldn't need to unlink since no execution has occurred * (lazy linking) */ ASSERT(info->incoming == NULL); ASSERT(!coarse_unit_outgoing_linked(GLOBAL_DCONTEXT, info)); } coarse_unit_reset_free(GLOBAL_DCONTEXT, info, false/*no locks*/, false/*!unlink*/, true/*give up primary*/); coarse_unit_free(GLOBAL_DCONTEXT, info); info = next_info; } } return freed_any; } #ifdef PROGRAM_SHEPHERDING /* add a "future executable area" (e.g., mapped EW) to the future list * * FIXME: now that this is vmareas.c-internal we should change it to * take in direct VM_ flags, and make separate flags for each future-adding * code origins policy. Then we can have policy-specific removal from future list. */ static bool add_futureexec_vm_area(app_pc start, app_pc end, bool once_only _IF_DEBUG(const char *comment)) { /* FIXME: don't add portions that overlap w/ exec areas */ LOG(GLOBAL, LOG_VMAREAS, 2, "new FUTURE executable vm area: "PFX"-"PFX" %s%s\n", start, end, (once_only?"ONCE ":""), comment); if (DYNAMO_OPTION(unloaded_target_exception)) { /* case 9371 - to avoid possible misclassification in a tight race * between NtUnmapViewOfSection and a consecutive future area * allocated in the same place, we clear our the unload in progress flag */ mark_unload_future_added(start, end - start); } write_lock(&futureexec_areas->lock); add_vm_area(futureexec_areas, start, end, (once_only ? VM_ONCE_ONLY : 0), 0 /* frag_flags */, NULL _IF_DEBUG(comment)); write_unlock(&futureexec_areas->lock); return true; } /* remove a "future executable area" from the future list */ static bool remove_futureexec_vm_area(app_pc start, app_pc end) { bool ok; LOG(GLOBAL, LOG_VMAREAS, 2, "removing FUTURE executable vm area: "PFX"-"PFX"\n", start, end); write_lock(&futureexec_areas->lock); ok = remove_vm_area(futureexec_areas, start, end, false); write_unlock(&futureexec_areas->lock); return ok; } /* returns true if the passed in area overlaps any known future executable areas */ static bool futureexec_vm_area_overlap(app_pc start, app_pc end) { bool overlap; read_lock(&futureexec_areas->lock); overlap = vm_area_overlap(futureexec_areas, start, end); read_unlock(&futureexec_areas->lock); return overlap; } #endif /* PROGRAM_SHEPHERDING */ /* lookup against the per-process executable addresses map */ bool is_executable_address(app_pc addr) { bool found; read_lock(&executable_areas->lock); found = lookup_addr(executable_areas, addr, NULL); read_unlock(&executable_areas->lock); return found; } /* returns any VM_ flags associated with addr's vm area * returns 0 if no area is found * cf. get_executable_area_flags() for FRAG_ flags */ bool get_executable_area_vm_flags(app_pc addr, uint *vm_flags) { bool found = false; vm_area_t *area; read_lock(&executable_areas->lock); if (lookup_addr(executable_areas, addr, &area)) { *vm_flags = area->vm_flags; found = true; } read_unlock(&executable_areas->lock); return found; } /* if addr is an executable area, returns true and returns in *flags * any FRAG_ flags associated with addr's vm area * returns false if area not found * * cf. get_executable_area_vm_flags() for VM_ flags */ bool get_executable_area_flags(app_pc addr, uint *frag_flags) { bool found = false; vm_area_t *area; read_lock(&executable_areas->lock); if (lookup_addr(executable_areas, addr, &area)) { *frag_flags = area->frag_flags; found = true; } read_unlock(&executable_areas->lock); return found; } /* For coarse-grain operation, we use a separate cache and htable per region. * See coarse_info_t notes on synchronization model. * Returns NULL when region is not coarse. * Assumption: this routine is called prior to the first execution from a * coarse vm area region. */ static coarse_info_t * get_coarse_info_internal(app_pc addr, bool init, bool have_shvm_lock) { coarse_info_t *coarse = NULL; vm_area_t *area = NULL; vm_area_t area_copy = {0,}; bool is_coarse = false; bool add_to_shared = false; bool reset_unit = false; /* FIXME perf opt: have a last_area */ /* FIXME: could use vmvector_lookup_data() but I need area->{vm,frag}_flags */ read_lock(&executable_areas->lock); if (lookup_addr(executable_areas, addr, &area)) { ASSERT(area != NULL); /* The custom field is initialized to 0 in add_vm_area */ coarse = (coarse_info_t *) area->custom.client; is_coarse = TEST(FRAG_COARSE_GRAIN, area->frag_flags); /* We always create coarse_info_t up front in add_executable_vm_area */ ASSERT((is_coarse && coarse != NULL) || (!is_coarse && coarse == NULL)); if (init && coarse != NULL && TEST(PERSCACHE_CODE_INVALID, coarse->flags)) { /* Reset the unit as the validating event did not occur * (can't do it here due to lock rank order vs exec areas lock) */ reset_unit = true; /* We do need to adjust coarse unit bounds for 4.3 when we don't see * the rebind +rx event */ adjust_coarse_unit_bounds(area, true/*even if invalid*/); STATS_INC(coarse_executed_invalid); /* FIXME for 4.4: validation won't happen post-rebind like 4.3, so we * will always get here marked as invalid. Here we'll do full md5 * modulo rebasing check (split into per-page via read-only as opt). */ } /* We cannot add to shared_data when we load in a persisted unit * due to lock rank issues, so we delay until first asked about. */ if (init && TEST(VM_ADD_TO_SHARED_DATA, area->vm_flags)) { add_to_shared = true; area->vm_flags &= ~VM_ADD_TO_SHARED_DATA; area->vm_flags |= VM_EXECUTED_FROM; area_copy = *area; } else { DODEBUG({ area_copy = *area; }); /* for ASSERT below */ } } read_unlock(&executable_areas->lock); if (coarse != NULL && init) { /* For 4.3, bounds check is done at post-rebind validation; * FIXME: in 4.4, we need to do it here and adjust bounds or invalidate * pcache if not a superset (we'll allow any if_rx_text to merge into coarse). */ ASSERT(coarse->base_pc == area_copy.start && coarse->end_pc == area_copy.end); if (reset_unit) { coarse_unit_reset_free(get_thread_private_dcontext(), coarse, false/*no locks*/, true/*unlink*/, true/*give up primary*/); } if (add_to_shared) { if (!have_shvm_lock) SHARED_VECTOR_RWLOCK(&shared_data->areas, write, lock); ASSERT_VMAREA_VECTOR_PROTECTED(&shared_data->areas, WRITE); /* avoid double-add from a race */ if (!lookup_addr(&shared_data->areas, coarse->base_pc, NULL)) { LOG(GLOBAL, LOG_VMAREAS, 2, "adding coarse region "PFX"-"PFX" to shared vm areas\n", area_copy.start, area_copy.end); add_vm_area(&shared_data->areas, area_copy.start, area_copy.end, area_copy.vm_flags, area_copy.frag_flags, NULL _IF_DEBUG(area_copy.comment)); } if (!have_shvm_lock) SHARED_VECTOR_RWLOCK(&shared_data->areas, write, unlock); } } else ASSERT(!add_to_shared && !reset_unit); return coarse; } coarse_info_t * get_executable_area_coarse_info(app_pc addr) { return get_coarse_info_internal(addr, true/*init*/, false/*no lock*/); } /* Ensures there is a non-frozen coarse unit for the executable_areas region * corresponding to "frozen", which is now frozen. */ void mark_executable_area_coarse_frozen(coarse_info_t *frozen) { vm_area_t *area = NULL; coarse_info_t *info; ASSERT(frozen->frozen); /* caller should mark */ write_lock(&executable_areas->lock); /* since writing flags */ if (lookup_addr(executable_areas, frozen->base_pc, &area)) { ASSERT(area != NULL); /* The custom field is initialized to 0 in add_vm_area */ if (area->custom.client != NULL) { ASSERT(TEST(FRAG_COARSE_GRAIN, area->frag_flags)); info = (coarse_info_t *) area->custom.client; ASSERT(info == frozen && frozen->non_frozen == NULL); info = coarse_unit_create(frozen->base_pc, frozen->end_pc, &frozen->module_md5, true/*for execution*/); LOG(GLOBAL, LOG_VMAREAS, 1, "new secondary coarse unit %s "PFX"-"PFX"\n", info->module, frozen->base_pc, frozen->end_pc); frozen->non_frozen = info; } else ASSERT(!TEST(FRAG_COARSE_GRAIN, area->frag_flags)); } write_unlock(&executable_areas->lock); } /* iterates through all executable areas overlapping the pages touched * by the region addr_[start,end) * if are_all_matching is false * returns true if any overlapping region has matching vm_flags and frag_flags; * false otherwise * if are_all_matching is true * returns true only if all overlapping regions have matching vm_flags * and matching frag_flags, or if there are no overlapping regions; * false otherwise * a match of 0 matches all */ static bool executable_areas_match_flags(app_pc addr_start, app_pc addr_end, bool *found_area, bool are_all_matching /* ALL when true, EXISTS when false */, uint match_vm_flags, uint match_frag_flags) { /* binary search below will assure that we hold an executable_areas lock */ app_pc page_start = (app_pc)ALIGN_BACKWARD(addr_start, PAGE_SIZE); app_pc page_end = (app_pc)ALIGN_FORWARD(addr_end, PAGE_SIZE); vm_area_t *area; if (found_area != NULL) *found_area = false; ASSERT(page_start < page_end || page_end == NULL); /* wraparound */ /* We have subpage regions from some of our rules, we should return true * if any area on the list that overlaps the pages enclosing the addr_[start,end) * region is writable */ while (binary_search(executable_areas, page_start, page_end, &area, NULL, true)) { if (found_area != NULL) *found_area = true; /* TESTALL will return true for a match of 0 */ if (are_all_matching) { if (!TESTALL(match_vm_flags, area->vm_flags) || !TESTALL(match_frag_flags, area->frag_flags)) return false; } else { if (TESTALL(match_vm_flags, area->vm_flags) && TESTALL(match_frag_flags, area->frag_flags)) return true; } if (area->end < page_end || page_end == NULL) page_start = area->end; else break; } return are_all_matching; /* false for EXISTS, true for ALL */ } /* returns true if addr is on a page that was marked writable by the * application but that we marked RO b/c it contains executable code * does NOT check if addr is executable, only that something on its page is! */ bool is_executable_area_writable(app_pc addr) { bool writable; read_lock(&executable_areas->lock); writable = executable_areas_match_flags(addr, addr+1 /* open ended */, NULL, false /* EXISTS */, VM_MADE_READONLY, 0); read_unlock(&executable_areas->lock); return writable; } #if defined(DEBUG) /* since only used for a stat right now */ /* returns true if region [start, end) overlaps pages that match match_vm_flags * e.g. VM_WRITABLE is set when all pages marked writable by the * application but that we marked RO b/c they contain executable code. * * Does NOT check if region is executable, only that something * overlapping its pages is! are_all_matching determines * whether all regions need to match flags, or whether a matching * region exists. */ static bool is_executable_area_writable_overlap(app_pc start, app_pc end, bool are_all_matching, uint match_vm_flags) { bool writable; read_lock(&executable_areas->lock); writable = executable_areas_match_flags(start, end, NULL, are_all_matching, match_vm_flags, 0); read_unlock(&executable_areas->lock); return writable; } #endif bool is_pretend_or_executable_writable(app_pc addr) { /* see if asking about an executable area we made read-only */ return (!standalone_library && (is_executable_area_writable(addr) || (USING_PRETEND_WRITABLE() && is_pretend_writable_address(addr)))); } /* Returns true if region [start, end) overlaps any regions that are * marked as FRAG_COARSE_GRAIN. */ bool executable_vm_area_coarse_overlap(app_pc start, app_pc end) { bool match; read_lock(&executable_areas->lock); match = executable_areas_match_flags(start, end, NULL, false/*exists, not all*/, 0, FRAG_COARSE_GRAIN); read_unlock(&executable_areas->lock); return match; } /* Returns true if region [start, end) overlaps any regions that are * marked as VM_PERSISTED_CACHE. */ bool executable_vm_area_persisted_overlap(app_pc start, app_pc end) { bool match; read_lock(&executable_areas->lock); match = executable_areas_match_flags(start, end, NULL, false/*exists, not all*/, VM_PERSISTED_CACHE, 0); read_unlock(&executable_areas->lock); return match; } /* Returns true if any part of region [start, end) has ever been executed from */ bool executable_vm_area_executed_from(app_pc start, app_pc end) { bool match; read_lock(&executable_areas->lock); match = executable_areas_match_flags(start, end, NULL, false/*exists, not all*/, VM_EXECUTED_FROM, 0); read_unlock(&executable_areas->lock); return match; } /* If there is no overlap between executable_areas and [start,end), returns false. * Else, returns true and sets [overlap_start,overlap_end) as the bounds of the first * and last executable_area regions that overlap [start,end); i.e., * overlap_start starts the first area that overlaps [start,end); * overlap_end ends the last area that overlaps [start,end). * Note that overlap_start may be > start and overlap_end may be < end. * * If frag_flags != 0, the region described above is expanded such that the regions * before and after [overlap_start,overlap_end) do NOT match * [overlap_start,overlap_end) in TESTALL of frag_flags, but only considering * non-contiguous regions if !contig. * For example, we pass in FRAG_COARSE_GRAIN and contig=true; then, if * the overlap_start region is FRAG_COARSE_GRAIN and it has a contiguous region * to its left that is also FRAG_COARSE_GRAIN, but beyond that there is no * contiguous region, we will return the start of the region to the left rather than * the regular overlap_start. */ bool executable_area_overlap_bounds(app_pc start, app_pc end, app_pc *overlap_start/*OUT*/, app_pc *overlap_end/*OUT*/, uint frag_flags, bool contig) { int start_index, end_index; /* must be signed */ int i; /* must be signed */ ASSERT(overlap_start != NULL && overlap_end != NULL); read_lock(&executable_areas->lock); /* Find first overlapping region */ if (!binary_search(executable_areas, start, end, NULL, &start_index, true/*first*/)) return false; ASSERT(start_index >= 0); if (frag_flags != 0) { for (i = start_index - 1; i >= 0; i--) { if ((contig && executable_areas->buf[i].end != executable_areas->buf[i+1].start) || (TESTALL(frag_flags, executable_areas->buf[i].frag_flags) != TESTALL(frag_flags, executable_areas->buf[start_index].frag_flags))) break; } ASSERT(i + 1 >= 0); *overlap_start = executable_areas->buf[i + 1].start; } else *overlap_start = executable_areas->buf[start_index].start; /* Now find region just at or before end */ binary_search(executable_areas, end-1, end, NULL, &end_index, true/*first*/); ASSERT(end_index >= 0); /* else 1st binary search would have failed */ ASSERT(end_index >= start_index); if (end_index < executable_areas->length - 1 && frag_flags != 0) { for (i = end_index + 1; i < executable_areas->length; i++) { if ((contig && executable_areas->buf[i].start != executable_areas->buf[i-1].end) || (TESTALL(frag_flags, executable_areas->buf[i].frag_flags) != TESTALL(frag_flags, executable_areas->buf[end_index].frag_flags))) break; } ASSERT(i - 1 < executable_areas->length); *overlap_end = executable_areas->buf[i - 1].end; } else /* no extension asked for, or nowhere to extend to */ *overlap_end = executable_areas->buf[end_index].end; read_unlock(&executable_areas->lock); return true; } /*************************************************** * Iterator over coarse units in executable_areas that overlap [start,end) */ void vm_area_coarse_iter_start(vmvector_iterator_t *vmvi, app_pc start) { int start_index; /* must be signed */ ASSERT(vmvi != NULL); vmvector_iterator_start(executable_areas, vmvi); ASSERT_OWN_READ_LOCK(true, &executable_areas->lock); /* Find first overlapping region */ if (start != NULL && binary_search(executable_areas, start, start+1, NULL, &start_index, true/*first*/)) { ASSERT(start_index >= 0); vmvi->index = start_index - 1 /*since next is +1*/; } } static bool vm_area_coarse_iter_find_next(vmvector_iterator_t *vmvi, app_pc end, bool mutate, coarse_info_t **info_out/*OUT*/) { int forw; ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE); ASSERT(vmvi->vector == executable_areas); for (forw = 1; vmvi->index + forw < vmvi->vector->length; forw++) { if (end != NULL && executable_areas->buf[vmvi->index+forw].start >= end) break; if (TEST(FRAG_COARSE_GRAIN, executable_areas->buf[vmvi->index+forw].frag_flags)) { coarse_info_t *info = executable_areas->buf[vmvi->index+forw].custom.client; if (mutate) vmvi->index = vmvi->index+forw; ASSERT(info != NULL); /* we always allocate up front */ if (info_out != NULL) *info_out = info; return true; } } return false; } bool vm_area_coarse_iter_hasnext(vmvector_iterator_t *vmvi, app_pc end) { return vm_area_coarse_iter_find_next(vmvi, end, false/*no mutate*/, NULL); } /* May want to return region bounds if have callers who care about that. */ coarse_info_t * vm_area_coarse_iter_next(vmvector_iterator_t *vmvi, app_pc end) { coarse_info_t *info = NULL; vm_area_coarse_iter_find_next(vmvi, end, true/*mutate*/, &info); return info; } void vm_area_coarse_iter_stop(vmvector_iterator_t *vmvi) { ASSERT(vmvi->vector == executable_areas); vmvector_iterator_stop(vmvi); } /***************************************************/ /* returns true if addr is on a page that contains at least one selfmod * region and no non-selfmod regions. */ bool is_executable_area_on_all_selfmod_pages(app_pc start, app_pc end) { bool all_selfmod; bool found; read_lock(&executable_areas->lock); all_selfmod = executable_areas_match_flags(start, end, &found, true /* ALL */, 0, FRAG_SELFMOD_SANDBOXED); read_unlock(&executable_areas->lock); /* we require at least one area to be present */ return all_selfmod && found; } /* Meant to be called from a seg fault handler. * Returns true if addr is on a page that was marked writable by the * application but that we marked RO b/c it contains executable code, OR if * addr is on a writable page (since another thread could have removed addr * from exec list before seg fault handler was scheduled). * does NOT check if addr is executable, only that something on its page is! */ bool was_executable_area_writable(app_pc addr) { bool found_area = false, was_writable = false; read_lock(&executable_areas->lock); was_writable = executable_areas_match_flags(addr, addr+1, &found_area, false /* EXISTS */, VM_MADE_READONLY, 0); /* seg fault could have happened, then area was made writable before * thread w/ exception was scheduled. * we assume that area was writable at time of seg fault if it's * exec writable now (above) OR no area was found and it's writable now * and not on DR area list (below). * Need to check DR area list since a write to protected DR area from code * cache can end up here, as DR area may be made writable once in fault handler * due to self-protection un-protection for entering DR! * FIXME: checking for threads_ever_created==1 could further rule out other * causes for some apps. * Keep readlock to avoid races. */ if (!found_area) { uint prot; if (get_memory_info(addr, NULL, NULL, &prot)) was_writable = TEST(MEMPROT_WRITE, prot) && !is_dynamo_address(addr); } read_unlock(&executable_areas->lock); return was_writable; } /* returns true if addr is in an executable area that contains * self-modifying code, and so should be sandboxed */ bool is_executable_area_selfmod(app_pc addr) { uint flags; if (get_executable_area_flags(addr, &flags)) return TEST(FRAG_SELFMOD_SANDBOXED, flags); else return false; } #ifdef DGC_DIAGNOSTICS /* returns false if addr is not in an executable area marked as dyngen */ bool is_executable_area_dyngen(app_pc addr) { uint flags; if (get_executable_area_flags(addr, &flags)) return TEST(FRAG_DYNGEN, flags); else return false; } #endif /* lookup against the per-process addresses map */ bool is_valid_address(app_pc addr) { ASSERT_NOT_IMPLEMENTED(false && "is_valid_address not implemented"); return false; } /* Due to circular dependencies bet vmareas and global heap, we cannot * incrementally keep dynamo_areas up to date. * Instead, we wait until people ask about it, when we do a complete * walk through the heap units and add them all (yes, re-adding * ones we've seen). */ static void update_dynamo_vm_areas(bool have_writelock) { if (dynamo_areas_uptodate) return; if (!have_writelock) dynamo_vm_areas_lock(); ASSERT(dynamo_areas != NULL); ASSERT_OWN_WRITE_LOCK(true, &dynamo_areas->lock); /* avoid uptodate asserts from heap needed inside add_vm_area */ DODEBUG({ dynamo_areas_synching = true; }); /* check again with lock, and repeat until done since * could require more memory in the middle for vm area vector */ while (!dynamo_areas_uptodate) { dynamo_areas_uptodate = true; heap_vmareas_synch_units(); LOG(GLOBAL, LOG_VMAREAS, 3, "after updating dynamo vm areas:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(dynamo_areas, GLOBAL); }); } DODEBUG({ dynamo_areas_synching = false; }); if (!have_writelock) dynamo_vm_areas_unlock(); } bool are_dynamo_vm_areas_stale(void) { return !dynamo_areas_uptodate; } /* Used for DR heap area changes as circular dependences prevent * directly adding or removing DR vm areas-> * Must hold the DR areas lock across the combination of calling this and * modifying the heap lists. */ void mark_dynamo_vm_areas_stale() { /* ok to ask for locks or mark stale before dynamo_areas is allocated */ ASSERT((dynamo_areas == NULL && get_num_threads() <= 1 /*must be only DR thread*/) || self_owns_write_lock(&dynamo_areas->lock)); dynamo_areas_uptodate = false; } /* HACK to get recursive write lock for internal and external use */ void dynamo_vm_areas_lock() { all_memory_areas_lock(); /* ok to ask for locks or mark stale before dynamo_areas is allocated, * during heap init and before we can allocate it. no lock needed then. */ ASSERT(dynamo_areas != NULL || get_num_threads() <= 1 /*must be only DR thread*/); if (dynamo_areas == NULL) return; if (self_owns_write_lock(&dynamo_areas->lock)) { dynamo_areas_recursion++; /* we have a 5-deep path: * global_heap_alloc | heap_create_unit | get_guarded_real_memory | * heap_low_on_memory | release_guarded_real_memory */ ASSERT_CURIOSITY(dynamo_areas_recursion <= 4); } else write_lock(&dynamo_areas->lock); } void dynamo_vm_areas_unlock() { /* ok to ask for locks or mark stale before dynamo_areas is allocated, * during heap init and before we can allocate it. no lock needed then. */ ASSERT(dynamo_areas != NULL || get_num_threads() <= 1 /*must be only DR thread*/); if (dynamo_areas == NULL) return; if (dynamo_areas_recursion > 0) { ASSERT_OWN_WRITE_LOCK(true, &dynamo_areas->lock); dynamo_areas_recursion--; } else write_unlock(&dynamo_areas->lock); all_memory_areas_unlock(); } bool self_owns_dynamo_vm_area_lock() { /* heap inits before dynamo_areas (which now needs heap to init) so * we ignore the lock prior to dynamo_areas init, assuming single-DR-thread. */ ASSERT(dynamo_areas != NULL || get_num_threads() <= 1 /*must be only DR thread*/); return dynamo_areas == NULL || self_owns_write_lock(&dynamo_areas->lock); } /* grabs read lock and checks for update -- when it returns it guarantees * to hold read lock with no updates pending */ static void dynamo_vm_areas_start_reading() { read_lock(&dynamo_areas->lock); while (!dynamo_areas_uptodate) { /* switch to write lock * cannot rely on uptodate value prior to a lock so must * grab read and then check it, and back out if necessary * as we have no reader->writer transition */ read_unlock(&dynamo_areas->lock); dynamo_vm_areas_lock(); update_dynamo_vm_areas(true); /* FIXME: more efficient if we could safely drop from write to read * lock -- could simply reverse order here and then while becomes if, * but a little fragile in that properly nested rwlocks may be assumed * elsewhere */ dynamo_vm_areas_unlock(); read_lock(&dynamo_areas->lock); } } static void dynamo_vm_areas_done_reading() { read_unlock(&dynamo_areas->lock); } /* add dynamo-internal area to the dynamo-internal area list * this should be atomic wrt the memory being allocated to avoid races * w/ the app executing from it -- thus caller must hold DR areas write lock! */ bool add_dynamo_vm_area(app_pc start, app_pc end, uint prot, bool unmod_image _IF_DEBUG(const char *comment)) { uint vm_flags = (TEST(MEMPROT_WRITE, prot) ? VM_WRITABLE : 0) | (unmod_image ? VM_UNMOD_IMAGE : 0); /* case 3045: areas inside the vmheap reservation are not added to the list */ ASSERT(!is_vmm_reserved_address(start, end - start)); LOG(GLOBAL, LOG_VMAREAS, 2, "new dynamo vm area: "PFX"-"PFX" %s\n", start, end, comment); ASSERT(dynamo_areas != NULL); ASSERT_OWN_WRITE_LOCK(true, &dynamo_areas->lock); if (!dynamo_areas_uptodate) update_dynamo_vm_areas(true); ASSERT(!vm_area_overlap(dynamo_areas, start, end)); add_vm_area(dynamo_areas, start, end, vm_flags, 0 /* frag_flags */, NULL _IF_DEBUG(comment)); update_all_memory_areas(start, end, prot, unmod_image ? DR_MEMTYPE_IMAGE : DR_MEMTYPE_DATA); return true; } /* remove dynamo-internal area from the dynamo-internal area list * this should be atomic wrt the memory being freed to avoid races * w/ it being re-used and problems w/ the app executing from it -- * thus caller must hold DR areas write lock! */ bool remove_dynamo_vm_area(app_pc start, app_pc end) { bool ok; DEBUG_DECLARE(bool removed); LOG(GLOBAL, LOG_VMAREAS, 2, "removing dynamo vm area: "PFX"-"PFX"\n", start, end); ASSERT(dynamo_areas != NULL); ASSERT_OWN_WRITE_LOCK(true, &dynamo_areas->lock); if (!dynamo_areas_uptodate) update_dynamo_vm_areas(true); ok = remove_vm_area(dynamo_areas, start, end, false); DEBUG_DECLARE(removed = ) remove_from_all_memory_areas(start, end); ASSERT(removed); return ok; } /* adds dynamo-internal area to the dynamo-internal area list, but * doesn't grab the dynamo areas lock. intended to be only used for * heap walk updates, where the lock is grabbed prior to the walk and held * throughout the entire walk. */ bool add_dynamo_heap_vm_area(app_pc start, app_pc end, bool writable, bool unmod_image _IF_DEBUG(const char *comment)) { LOG(GLOBAL, LOG_VMAREAS, 2, "new dynamo vm area: "PFX"-"PFX" %s\n", start, end, comment); ASSERT(!vm_area_overlap(dynamo_areas, start, end)); /* case 3045: areas inside the vmheap reservation are not added to the list */ ASSERT(!is_vmm_reserved_address(start, end - start)); /* add_vm_area will assert that write lock is held */ add_vm_area(dynamo_areas, start, end, VM_DR_HEAP | (writable ? VM_WRITABLE : 0) | (unmod_image ? VM_UNMOD_IMAGE : 0), 0 /* frag_flags */, NULL _IF_DEBUG(comment)); return true; } /* breaking most abstractions here we return whether current vmarea * vector starts at given heap_pc. The price of circular dependency * is that abstractions can no longer be safely used. case 4196 */ bool is_dynamo_area_buffer(byte *heap_unit_start_pc) { return (void*)heap_unit_start_pc == dynamo_areas->buf; } /* assumes caller holds dynamo_areas->lock */ void remove_dynamo_heap_areas() { int i; /* remove_vm_area will assert that write lock is held, but let's make * sure we're holding it as we walk the vector, even if make no removals */ ASSERT_VMAREA_VECTOR_PROTECTED(dynamo_areas, WRITE); LOG(GLOBAL, LOG_VMAREAS, 4, "remove_dynamo_heap_areas:\n"); /* walk backwards to avoid O(n^2) */ for (i = dynamo_areas->length - 1; i >= 0; i--) { if (TEST(VM_DR_HEAP, dynamo_areas->buf[i].vm_flags)) { app_pc start = dynamo_areas->buf[i].start; app_pc end = dynamo_areas->buf[i].end; /* ASSUMPTION: remove_vm_area, given exact bounds, simply shifts later * areas down in vector! */ LOG(GLOBAL, LOG_VMAREAS, 4, "Before removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(dynamo_areas, GLOBAL); }); remove_vm_area(dynamo_areas, start, end, false); LOG(GLOBAL, LOG_VMAREAS, 4, "After removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(dynamo_areas, GLOBAL); }); remove_from_all_memory_areas(start, end); } } } bool is_dynamo_address(app_pc addr) { bool found; /* case 3045: areas inside the vmheap reservation are not added to the list */ if (is_vmm_reserved_address(addr, 1)) return true; dynamo_vm_areas_start_reading(); found = lookup_addr(dynamo_areas, addr, NULL); dynamo_vm_areas_done_reading(); return found; } /* returns true iff address is an address that the app thinks is writable * but really is not, as it overlaps DR memory (or did at the prot time); * or we're preventing function patching in specified application modules. */ bool is_pretend_writable_address(app_pc addr) { bool found; ASSERT(DYNAMO_OPTION(handle_DR_modify) == DR_MODIFY_NOP || DYNAMO_OPTION(handle_ntdll_modify) == DR_MODIFY_NOP || !IS_STRING_OPTION_EMPTY(patch_proof_list) || !IS_STRING_OPTION_EMPTY(patch_proof_default_list)); read_lock(&pretend_writable_areas->lock); found = lookup_addr(pretend_writable_areas, addr, NULL); read_unlock(&pretend_writable_areas->lock); return found; } /* returns true if the passed in area overlaps any known pretend writable areas */ static bool pretend_writable_vm_area_overlap(app_pc start, app_pc end) { bool overlap; read_lock(&pretend_writable_areas->lock); overlap = vm_area_overlap(pretend_writable_areas, start, end); read_unlock(&pretend_writable_areas->lock); return overlap; } #ifdef DEBUG /* returns comment for addr, if there is one, else NULL */ char * get_address_comment(app_pc addr) { char *res = NULL; vm_area_t *area; bool ok; read_lock(&executable_areas->lock); ok = lookup_addr(executable_areas, addr, &area); if (ok) res = area->comment; read_unlock(&executable_areas->lock); if (!ok) { read_lock(&dynamo_areas->lock); ok = lookup_addr(dynamo_areas, addr, &area); if (ok) res = area->comment; read_unlock(&dynamo_areas->lock); } return res; } #endif /* returns true if the passed in area overlaps any known executable areas * if !have_writelock, acquires the executable_areas read lock */ bool executable_vm_area_overlap(app_pc start, app_pc end, bool have_writelock) { bool overlap; if (!have_writelock) read_lock(&executable_areas->lock); overlap = vm_area_overlap(executable_areas, start, end); if (!have_writelock) read_unlock(&executable_areas->lock); return overlap; } void executable_areas_lock() { write_lock(&executable_areas->lock); } void executable_areas_unlock() { ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock); write_unlock(&executable_areas->lock); } /* returns true if the passed in area overlaps any dynamo areas */ bool dynamo_vm_area_overlap(app_pc start, app_pc end) { bool overlap; /* case 3045: areas inside the vmheap reservation are not added to the list */ if (is_vmm_reserved_address(start, end - start)) return true; dynamo_vm_areas_start_reading(); overlap = vm_area_overlap(dynamo_areas, start, end); dynamo_vm_areas_done_reading(); return overlap; } /* Checks to see if pc is on the stack * If pc has already been resolved into an area, pass that in. */ static bool is_on_stack(dcontext_t *dcontext, app_pc pc, vm_area_t *area) { byte *stack_base, *stack_top; /* "official" stack */ byte *esp = (byte *) get_mcontext(dcontext)->xsp; byte *esp_base; size_t size; bool ok, query_esp = true; /* First check the area if we're supplied one. */ if (area != NULL) { LOG(THREAD, LOG_VMAREAS, 3, "stack vs "PFX": area "PFX".."PFX", esp "PFX"\n", pc, area->start, area->end, esp); ASSERT(pc >= area->start && pc < area->end); if (esp >= area->start && esp < area->end) return true; } /* Now check the "official" stack bounds. These are cached so cheap to * look up. Xref case 8180, these might not always be available, * get_stack_bounds() takes care of any asserts on availability. */ ok = get_stack_bounds(dcontext, &stack_base, &stack_top); if (ok) { LOG(THREAD, LOG_VMAREAS, 3, "stack vs "PFX": official "PFX".."PFX", esp "PFX"\n", pc, stack_base, stack_top, esp); ASSERT(stack_base < stack_top); if (pc >= stack_base && pc < stack_top) return true; /* We optimize away the expensive query of esp region bounds if esp * is in within the "official" stack cached allocation bounds. */ if (esp >= stack_base && esp < stack_top) query_esp = false; } if (query_esp) { ok = get_memory_info(esp, &esp_base, &size, NULL); ASSERT(ok); LOG(THREAD, LOG_VMAREAS, 3, "stack vs "PFX": region "PFX".."PFX", esp "PFX"\n", pc, esp_base, esp_base+size, esp); /* FIXME - stack could be split into multiple os regions by prot * differences, could check alloc base equivalence. */ if (pc >= esp_base && pc < esp_base + size) return true; } return false; } bool is_address_on_stack(dcontext_t *dcontext, app_pc address) { return is_on_stack(dcontext, address, NULL); } /* returns true if an executable area exists with VM_DRIVER_ADDRESS, * not a strict opposite of is_user_address() */ bool is_driver_address(app_pc addr) { uint vm_flags; if (get_executable_area_vm_flags(addr, &vm_flags)) { return TEST(VM_DRIVER_ADDRESS, vm_flags); } return false; } #ifdef PROGRAM_SHEPHERDING /********************************************/ /* forward declaration */ static int check_origins_bb_pattern(dcontext_t *dcontext, app_pc addr, app_pc *base, size_t *size, uint *vm_flags, uint *frag_flags); /* The following two arrays need to be in synch with enum action_type_t defined in * vmareas.h. */ #define MESSAGE_EXEC_VIOLATION "Execution security violation was intercepted!\n" #define MESSAGE_CONTACT_VENDOR "Contact your vendor for a security vulnerability fix.\n" const char * const action_message[] = { /* no trailing newlines for SYSLOG_INTERNAL */ MESSAGE_EXEC_VIOLATION MESSAGE_CONTACT_VENDOR "Program terminated.", MESSAGE_EXEC_VIOLATION MESSAGE_CONTACT_VENDOR "Program continuing!", MESSAGE_EXEC_VIOLATION MESSAGE_CONTACT_VENDOR "Program continuing after terminating thread.", MESSAGE_EXEC_VIOLATION MESSAGE_CONTACT_VENDOR "Program continuing after throwing an exception." }; /* event log message IDs */ #ifdef WINDOWS const uint action_event_id[] = { MSG_SEC_VIOLATION_TERMINATED, MSG_SEC_VIOLATION_CONTINUE, MSG_SEC_VIOLATION_THREAD, MSG_SEC_VIOLATION_EXCEPTION, # ifdef HOT_PATCHING_INTERFACE MSG_HOT_PATCH_VIOLATION, # endif }; #endif /* fills the target component of a threat ID */ static void fill_security_violation_target(char name[MAXIMUM_VIOLATION_NAME_LENGTH], const byte target_contents[4]) { int i; for (i = 0; i < 4; i++) name[i + 5] = (char) ((target_contents[i] % 10) + '0'); } static void get_security_violation_name(dcontext_t *dcontext, app_pc addr, char *name, int name_length, security_violation_t violation_type, const char *threat_id) { ptr_uint_t addr_as_int; app_pc name_addr = NULL; int i; ASSERT(name_length >= MAXIMUM_VIOLATION_NAME_LENGTH); /* Hot patches & process_control use their own threat IDs. */ if (IF_HOTP(violation_type == HOT_PATCH_DETECTOR_VIOLATION || violation_type == HOT_PATCH_PROTECTOR_VIOLATION ||) IF_PROC_CTL(violation_type == PROCESS_CONTROL_VIOLATION ||) false) { ASSERT(threat_id != NULL); strncpy(name, threat_id, MAXIMUM_VIOLATION_NAME_LENGTH); } else { bool unreadable_addr = false; byte target_contents[4]; /* 4 instruction bytes read from target */ ASSERT(threat_id == NULL); /* Supplied only for hot patch violations.*/ /* First four characters are alphabetics calculated from the address of the beginning of the basic block from which the violating contol transfer instruction originated. Ideally we would use the exact CTI address rather than the beginning of its block, but we don't want to translate it back to an app address to reduce possible failure points on this critical path. */ name_addr = dcontext->last_fragment->tag; #ifdef WINDOWS /* Move PC relative to preferred base for consistent naming */ name_addr += get_module_preferred_base_delta(name_addr); #endif addr_as_int = (ptr_uint_t) name_addr; for (i = 0; i < 4; i++) { name[i] = (char) ((addr_as_int % 26) + 'A'); addr_as_int /= 256; } /* Fifth character is a '.' */ name[4] = '.'; unreadable_addr = !safe_read(addr, sizeof(target_contents), &target_contents); /* if at unreadable memory see if an ASLR preferred address can be used */ if (unreadable_addr) { app_pc likely_target_pc = aslr_possible_preferred_address(addr); if (likely_target_pc != NULL) { unreadable_addr = !safe_read(likely_target_pc, sizeof(target_contents), &target_contents); } else { unreadable_addr = true; } } /* Next four characters are decimal numerics from the target code */ if (unreadable_addr) { for (i = 0; i < 4; i++) name[i + 5] = 'X'; } else { fill_security_violation_target(name, target_contents); } } /* Tenth character is a '.' */ name[9] = '.'; /* Next character indicates the security violation type; * sequential letter choices used rather than semantic ones to * obfuscate meaning. */ switch (violation_type) { case STACK_EXECUTION_VIOLATION: name[10] = 'A'; break; case HEAP_EXECUTION_VIOLATION: name[10] = 'B'; break; case RETURN_TARGET_VIOLATION: name[10] = 'C'; break; case RETURN_DIRECT_RCT_VIOLATION: name[10] = 'D'; ASSERT_NOT_IMPLEMENTED(false); break; case INDIRECT_CALL_RCT_VIOLATION: name[10] = 'E'; break; case INDIRECT_JUMP_RCT_VIOLATION: name[10] = 'F'; break; #ifdef HOT_PATCHING_INTERFACE case HOT_PATCH_DETECTOR_VIOLATION: name[10] = 'H'; break; case HOT_PATCH_PROTECTOR_VIOLATION:name[10] = 'P'; break; #endif #ifdef PROCESS_CONTROL case PROCESS_CONTROL_VIOLATION: name[10] = 'K'; break; #endif #ifdef GBOP case GBOP_SOURCE_VIOLATION: name[10] = 'O'; break; #endif case ASLR_TARGET_VIOLATION: name[10] = 'R'; break; case ATTACK_SIM_NUDGE_VIOLATION: /* share w/ normal attack sim */ case ATTACK_SIMULATION_VIOLATION: name[10] = 'S'; break; case APC_THREAD_SHELLCODE_VIOLATION: /* injected shellcode threat names are custom generated */ ASSERT_NOT_REACHED(); name[10] = 'B'; break; default: name[10] = 'X'; ASSERT_NOT_REACHED(); } /* Null-terminate */ name[11] = '\0'; LOG(GLOBAL, LOG_ALL, 1, "Security violation name: %s\n", name); } bool is_exempt_threat_name(const char *name) { if (DYNAMO_OPTION(exempt_threat) && !IS_STRING_OPTION_EMPTY(exempt_threat_list)) { bool onlist; string_option_read_lock(); onlist = check_filter_with_wildcards(DYNAMO_OPTION(exempt_threat_list), name); string_option_read_unlock(); if (onlist) { LOG(THREAD_GET, LOG_INTERP|LOG_VMAREAS, 1, "WARNING: threat %s is on exempt list, suppressing violation\n", name); SYSLOG_INTERNAL_WARNING_ONCE("threat %s exempt", name); STATS_INC(num_exempt_threat); return true; } } return false; } /*************************************************************************** * Case 8075: we don't want to unprotect .data during violation reporting, so we * place all the local-scope static vars (from DO_THRESHOLD) into .fspdata. */ START_DATA_SECTION(FREQ_PROTECTED_SECTION, "w"); /* Report security violation to all outputs - syslog, diagnostics, and interactive * returns false if violation was not reported */ static bool security_violation_report(app_pc addr, security_violation_t violation_type, const char *name, action_type_t action) { bool dump_forensics = true; /* shouldn't report anything if on silent_block_threat_list */ if (!IS_STRING_OPTION_EMPTY(silent_block_threat_list)) { bool onlist; string_option_read_lock(); onlist = check_filter_with_wildcards(DYNAMO_OPTION(silent_block_threat_list), name); string_option_read_unlock(); if (onlist) { LOG(THREAD_GET, LOG_INTERP|LOG_VMAREAS, 1, "WARNING: threat %s is on silent block list, suppressing reporting\n", name); SYSLOG_INTERNAL_WARNING_ONCE("threat %s silently blocked", name); STATS_INC(num_silently_blocked_threat); return false; } } if (dynamo_options.report_max) { /* need bool since ctr only inc-ed when < threshold, so no way * to tell 1st instance beyond threshold from subsequent */ static bool reached_max = false; /* do not report in any way if report threshold is reached */ DO_THRESHOLD_SAFE(dynamo_options.report_max, FREQ_PROTECTED_SECTION, {/* < report_max */}, { /* >= report_max */ if (!reached_max) { reached_max = true; SYSLOG(SYSLOG_WARNING, WARNING_REPORT_THRESHOLD, 2, get_application_name(), get_application_pid()); } return false; }); } /* options already synchronized by security_violation() */ if ((TEST(DUMPCORE_SECURITY_VIOLATION, DYNAMO_OPTION(dumpcore_mask)) #ifdef HOT_PATCHING_INTERFACE /* Part of fix for 5367. */ && violation_type != HOT_PATCH_DETECTOR_VIOLATION && violation_type != HOT_PATCH_PROTECTOR_VIOLATION #endif ) #ifdef HOT_PATCHING_INTERFACE /* Part of fix for 5367. */ /* Dump core if violation was for hot patch detector/protector and * the corresponding dumpcore_mask flag was set. */ || (TEST(DUMPCORE_HOTP_DETECTION, DYNAMO_OPTION(dumpcore_mask)) && violation_type == HOT_PATCH_DETECTOR_VIOLATION) || (TEST(DUMPCORE_HOTP_PROTECTION, DYNAMO_OPTION(dumpcore_mask)) && violation_type == HOT_PATCH_PROTECTOR_VIOLATION) #endif ) { DO_THRESHOLD_SAFE(DYNAMO_OPTION(dumpcore_violation_threshold), FREQ_PROTECTED_SECTION, os_dump_core(name) /* < threshold */,); } #ifdef HOT_PATCHING_INTERFACE if (violation_type == HOT_PATCH_DETECTOR_VIOLATION || violation_type == HOT_PATCH_PROTECTOR_VIOLATION) { SYSLOG_CUSTOM_NOTIFY(SYSLOG_ERROR, IF_WINDOWS_ELSE_0(MSG_HOT_PATCH_VIOLATION), 3, (char *) action_message[action], get_application_name(), get_application_pid(), name); } else #endif SYSLOG_CUSTOM_NOTIFY(SYSLOG_ERROR, IF_WINDOWS_ELSE_0(action_event_id[action]), 3, (char *) action_message[action], get_application_name(), get_application_pid(), name); #ifdef HOT_PATCHING_INTERFACE /* Part of fix for 5367. For hot patches core dumps and forensics should * be generated only if needed, which is not the case for other violations. */ if (!(DYNAMO_OPTION(hotp_diagnostics)) && (violation_type == HOT_PATCH_DETECTOR_VIOLATION || violation_type == HOT_PATCH_PROTECTOR_VIOLATION)) dump_forensics = false; #endif #ifdef PROCESS_CONTROL if (!DYNAMO_OPTION(pc_diagnostics) && /* Case 11023. */ violation_type == PROCESS_CONTROL_VIOLATION) dump_forensics = false; #endif /* report_max (above) will limit the number of files created */ if (dump_forensics) report_diagnostics(action_message[action], name, violation_type); return true; } /* attack handling - reports violation and decides on action - possibly terminates the process * N.B.: we make assumptions about whether the callers of this routine hold * various locks, so be careful when adding new callers. * * type_handling prescribes per-type handling and is combined with * global options. It can be used to specify whether to take an * action (and may request specific alternative handling with * OPTION_HANDLING), and whether to report. * * The optional out value result_type can differ from the passed-in violation_type * for exemptions. * Returns an action, with the caller responsible for calling * security_violation_action() if action != ACTION_CONTINUE */ static action_type_t security_violation_internal_main(dcontext_t *dcontext, app_pc addr, security_violation_t violation_type, security_option_t type_handling, const char *threat_id, const action_type_t desired_action, read_write_lock_t *lock, security_violation_t *result_type/*OUT*/) { /* All violations except hot patch ones will request the safest solution, i.e., * to terminate the process. Based on the options used, different ones may be * selected in this function. However, hot patches can request specific actions * as specified by the hot patch writer. */ action_type_t action = desired_action; /* probably best to simply use the default TERMINATE_PROCESS */ char name[MAXIMUM_VIOLATION_NAME_LENGTH]; bool action_selected = false; bool found_unsupported = false; #ifdef HOT_PATCHING_INTERFACE /* Passing the hotp lock as an argument is ugly, but it is the cleanest way * to release the hotp lock for case 7988, otherwise, will have to release * it in hotp_event_notify and re-acquire it after reporting - really ugly. * Anyway, cleaning up the interface to security_violation is in plan * for Marlin, a FIXME, case 8079. */ ASSERT((DYNAMO_OPTION(hot_patching) && lock == hotp_get_lock()) || lock == NULL); #else ASSERT(lock == NULL); #endif /* though ASLR handling is currently not using this routine */ ASSERT(violation_type != ASLR_TARGET_VIOLATION); DOLOG(2, LOG_ALL, { SYSLOG_INTERNAL_INFO("security_violation("PFX", %d)", addr, violation_type); LOG(THREAD, LOG_VMAREAS, 2, "executable areas are:\n"); print_executable_areas(THREAD); LOG(THREAD, LOG_VMAREAS, 2, "future executable areas are:\n"); read_lock(&futureexec_areas->lock); print_vm_areas(futureexec_areas, THREAD); read_unlock(&futureexec_areas->lock); }); /* case 8075: we no longer unprot .data on the violation path */ ASSERT(check_should_be_protected(DATASEC_RARELY_PROT)); /* CHECK: all options for attack handling and reporting are dynamic, synchronized only once */ synchronize_dynamic_options(); #ifdef HOT_PATCHING_INTERFACE if (violation_type == HOT_PATCH_DETECTOR_VIOLATION || violation_type == HOT_PATCH_PROTECTOR_VIOLATION) { /* For hot patches, the action is provided by the hot patch writer; * nothing should be selected here. */ action_selected = true; } #endif #ifdef PROCESS_CONTROL /* A process control violation (which can only happen if process control is * turned on) results in the process being killed unless it is running in * detect mode. */ if (violation_type == PROCESS_CONTROL_VIOLATION) { ASSERT(IS_PROCESS_CONTROL_ON()); ASSERT((action == ACTION_TERMINATE_PROCESS && !DYNAMO_OPTION(pc_detect_mode)) || (action == ACTION_CONTINUE && DYNAMO_OPTION(pc_detect_mode))); action_selected = true; } #endif /* one last chance to avoid a violation */ get_security_violation_name(dcontext, addr, name, MAXIMUM_VIOLATION_NAME_LENGTH, violation_type, threat_id); if (!IS_STRING_OPTION_EMPTY(exempt_threat_list)) { if (is_exempt_threat_name(name)) { if (result_type != NULL) *result_type = ALLOWING_BAD; mark_module_exempted(addr); return ACTION_CONTINUE; } } /* FIXME: if we reinstate case 6141 where we acquire the thread_initexit_lock * we'll need to release our locks! * See ifdef FORENSICS_ACQUIRES_INITEXIT_LOCK in the Attic. * FIXME: even worse, we'll crash w/ case 9381 if we get a flush * while we're nolinking due to init-extra-vmareas on the frags list! */ /* diagnose_violation_mode says to check if would have allowed if were allowing patterns */ if (dynamo_options.diagnose_violation_mode && !dynamo_options.executable_if_trampoline) { size_t junk; if (check_origins_bb_pattern(dcontext, addr, (app_pc *) &junk, &junk, (uint *) &junk, (uint *) &junk) == ALLOWING_OK) { /* FIXME: change later user-visible message to indicate this may be * a false positive */ SYSLOG_INTERNAL_WARNING_ONCE("would have allowed pattern DGC."); } } #ifdef DGC_DIAGNOSTICS LOG(GLOBAL, LOG_VMAREAS, 1, "violating basic block target:\n"); DOLOG(1, LOG_VMAREAS, { disassemble_app_bb(dcontext, addr, GLOBAL); }); #endif /* for non-debug build, give some info on violating block */ DODEBUG({ if (is_readable_without_exception(addr, 12)) { SYSLOG_INTERNAL_WARNING("violating basic block target @"PFX": " "%x %x %x %x %x %x %x %x %x %x %x %x", addr, *addr, *(addr+1), *(addr+2), *(addr+3), *(addr+4), *(addr+5), *(addr+6), *(addr+7), *(addr+8), *(addr+9), *(addr+10), *(addr+11)); } else SYSLOG_INTERNAL_WARNING("violating basic block target @"PFX": not readable!", addr); }); if (DYNAMO_OPTION(detect_mode) && !TEST(OPTION_BLOCK_IGNORE_DETECT, type_handling) /* As of today, detect mode for hot patches is set using modes files. */ IF_HOTP(&& violation_type != HOT_PATCH_DETECTOR_VIOLATION && violation_type != HOT_PATCH_PROTECTOR_VIOLATION)) { bool allow = true; /* would be nice to keep the count going when no max, so if dynamically impose * one later all the previous ones count toward it, but then have to worry about * overflow of counter, etc. -- so we ignore count while there's no max */ if (DYNAMO_OPTION(detect_mode_max) > 0) { /* global counter for violations in all threads */ DO_THRESHOLD_SAFE(DYNAMO_OPTION(detect_mode_max), FREQ_PROTECTED_SECTION, {/* < max */ LOG(GLOBAL, LOG_ALL, 1, "security_violation: allowing violation #%d [max %d], tid="TIDFMT"\n", do_threshold_cur, DYNAMO_OPTION(detect_mode_max), get_thread_id()); }, {/* >= max */ allow = false; LOG(GLOBAL, LOG_ALL, 1, "security_violation: reached maximum allowed %d, tid="TIDFMT"\n", DYNAMO_OPTION(detect_mode_max), get_thread_id()); }); } else { LOG(GLOBAL, LOG_ALL, 1, "security_violation: allowing violation, no max, tid=%d\n", get_thread_id()); } if (allow) { /* we have priority over other handling options */ action = ACTION_CONTINUE; action_selected = true; mark_module_exempted(addr); } } /* FIXME: case 2144 we need to TEST(OPTION_BLOCK early on so that * we do not impact the counters, in addition we need to * TEST(OPTION_HANDLING to specify an alternative attack handling * (e.g. -throw_exception if default is -kill_thread) * FIXME: We may also want a different message to allow 'staging' events to be * considered differently, maybe with a DO_ONCE semantics... */ /* decide on specific attack handling action if not continuing */ if (!action_selected && DYNAMO_OPTION(throw_exception)) { thread_data_t *thread_local = (thread_data_t *) dcontext->vm_areas_field; /* maintain a thread local counter to bail out and avoid infinite exceptions */ if (thread_local->thrown_exceptions < DYNAMO_OPTION(throw_exception_max_per_thread)) { # ifdef WINDOWS /* If can't verify consistent SEH chain should fall through to kill path */ /* UnhandledExceptionFilter is always installed. */ /* There is no point in throwing an exception if no other handlers are installed to unwind. We may still get there when our exception is not handled, but at least cleanup code will be given a chance. */ enum { MIN_SEH_DEPTH = 1 /* doesn't seem to deserve a separate option */ }; int seh_chain_depth = exception_frame_chain_depth(dcontext); if (seh_chain_depth > MIN_SEH_DEPTH) { /* note the check is best effort, e.g. attacked handler can still point to valid RET */ bool global_max_reached = true; /* check global counter as well */ DO_THRESHOLD_SAFE(DYNAMO_OPTION(throw_exception_max), FREQ_PROTECTED_SECTION, {global_max_reached = false;}, {global_max_reached = true;}); if (!global_max_reached) { thread_local->thrown_exceptions++; LOG(GLOBAL, LOG_ALL, 1, "security_violation: throwing exception %d for this thread [max pt %d] [global max %d]\n", thread_local->thrown_exceptions, dynamo_options.throw_exception_max_per_thread, dynamo_options.throw_exception_max); action = ACTION_THROW_EXCEPTION; action_selected = true; } } else { LOG(GLOBAL, LOG_ALL, 1, "security_violation: SEH chain invalid [%d], better kill\n", seh_chain_depth); } # else ASSERT_NOT_IMPLEMENTED(false); # endif /* WINDOWS */ } else { LOG(GLOBAL, LOG_ALL, 1, "security_violation: reached maximum exception count, kill now\n"); } } /* kill process or maybe thread */ if (!action_selected) { ASSERT(action == ACTION_TERMINATE_PROCESS); if (DYNAMO_OPTION(kill_thread)) { /* check global counter as well */ DO_THRESHOLD_SAFE(DYNAMO_OPTION(kill_thread_max), FREQ_PROTECTED_SECTION, {/* < max */ LOG(GLOBAL, LOG_ALL, 1, "security_violation: \t killing thread #%d [max %d], tid=%d\n", do_threshold_cur, DYNAMO_OPTION(kill_thread_max), get_thread_id()); /* FIXME: can't check if get_num_threads()==1 then say we're killing process * because it is possible that another thread has not been scheduled yet * and we wouldn't have seen it. * Still, only our message will be wrong if we end up killing the process, * when we terminate the last thread */ action = ACTION_TERMINATE_THREAD; action_selected = true; }, {/* >= max */ LOG(GLOBAL, LOG_ALL, 1, "security_violation: reached maximum thread kill, kill process now\n"); action = ACTION_TERMINATE_PROCESS; action_selected = true; }); } else { action = ACTION_TERMINATE_PROCESS; action_selected = true; } } ASSERT(action_selected); #ifdef CLIENT_INTERFACE /* Case 9712: Inform the client of the security violation and * give it a chance to modify the action. */ if (CLIENTS_EXIST()) { instrument_security_violation(dcontext, addr, violation_type, &action); } #endif /* now we know what is the chosen action and we can report */ if (TEST(OPTION_REPORT, type_handling)) security_violation_report(addr, violation_type, name, action); /* FIXME: walking the loader data structures at arbitrary * points is dangerous due to data races with other threads * -- see is_module_being_initialized and get_module_name */ if (check_for_unsupported_modules()) { /* found an unsupported module */ action = ACTION_TERMINATE_PROCESS; found_unsupported = true; /* NOTE that because of the violation_threshold this * check isn't actually sufficient to ensure we get a dump file * (if for instance already got several violations) but it's good * enough */ if (TEST(DUMPCORE_UNSUPPORTED_APP, DYNAMO_OPTION(dumpcore_mask)) && !TEST(DUMPCORE_SECURITY_VIOLATION, DYNAMO_OPTION(dumpcore_mask))) { os_dump_core("unsupported module"); } } #ifdef WINDOWS if (ACTION_TERMINATE_PROCESS == action && (TEST(DETACH_UNHANDLED_VIOLATION, DYNAMO_OPTION(internal_detach_mask)) || (found_unsupported && TEST(DETACH_UNSUPPORTED_MODULE, DYNAMO_OPTION(internal_detach_mask))))) { /* set pc to right value and detach */ get_mcontext(dcontext)->pc = addr; /* FIXME - currently detach_internal creates a new thread to do the * detach (case 3312) and if we hold an app lock used by the init apc * such as the loader lock (case 4486) we could livelock the process * if we used a synchronous detach. Instead, we set detach in motion * disable all future violations and continue. */ detach_internal(); options_make_writable(); /* make sure synchronizes won't clobber the changes here */ dynamo_options.dynamic_options = false; dynamo_options.detect_mode = true; dynamo_options.detect_mode_max = 0; /* no limit on detections */ dynamo_options.report_max = 1; /* don't report any more */ options_restore_readonly(); action = ACTION_CONTINUE; } #endif /* FIXME: move this into hotp code like we've done for bb building so we * don't need to pass the lock in anymore */ #ifdef HOT_PATCHING_INTERFACE /* Fix for case 7988. Release the hotp lock when the remediation action * is to terminate the {thread,process} or to throw an exception, otherwise * we will deadlock trying to access the hotp_vul_table in another thread. */ if (lock != NULL && (action == ACTION_TERMINATE_THREAD || action == ACTION_TERMINATE_PROCESS || action == ACTION_THROW_EXCEPTION)) { #ifdef GBOP ASSERT(violation_type == HOT_PATCH_DETECTOR_VIOLATION || violation_type == HOT_PATCH_PROTECTOR_VIOLATION || violation_type == GBOP_SOURCE_VIOLATION); #else ASSERT(violation_type == HOT_PATCH_DETECTOR_VIOLATION || violation_type == HOT_PATCH_PROTECTOR_VIOLATION); #endif ASSERT_OWN_READ_LOCK(true, lock); read_unlock(lock); } #endif if (result_type != NULL) *result_type = violation_type; return action; } /* Meant to be called after security_violation_internal_main(). * Caller should only call for action!=ACTION_CONTINUE. */ void security_violation_action(dcontext_t *dcontext, action_type_t action, app_pc addr) { ASSERT(action != ACTION_CONTINUE); if (action == ACTION_CONTINUE) return; /* timeout before we take an action */ if (dynamo_options.timeout) { /* For now assuming only current thread sleeps. FIXME: If we are about the kill the process anyways, it may be safer to stop_the_world, so attacks in this time window do not get through. TODO: On the other hand sleeping in one thread, while the rest are preparing for controlled shutdown sounds better, yet we have no way of telling them that process death is pending. */ /* FIXME: shouldn't we suspend all other threads for the messagebox too? */ /* For services you can get a similar effect to -timeout on kill process by settings in Services\service properties\Recovery. Restart service after x minutes. 0 is very useful - then you get your app back immediately. 1 minute however may be too much in some circumstances, Our option is then useful for finer control, e.g. -timeout 10s */ os_timeout(dynamo_options.timeout); } if (ACTION_THROW_EXCEPTION == action) { os_forge_exception(addr, UNREADABLE_MEMORY_EXECUTION_EXCEPTION); ASSERT_NOT_REACHED(); } if (ACTION_CONTINUE != action) { uint terminate_flags_t = TERMINATE_PROCESS; if (is_self_couldbelinking()) { /* must be nolinking for terminate cleanup to avoid deadlock w/ flush */ enter_nolinking(dcontext, NULL, false/*not a real cache transition*/); } if (action == ACTION_TERMINATE_THREAD) { terminate_flags_t = TERMINATE_THREAD; /* clean up when terminating a thread */ terminate_flags_t |= TERMINATE_CLEANUP; } else { ASSERT(action == ACTION_TERMINATE_PROCESS && terminate_flags_t == TERMINATE_PROCESS); } #ifdef HOT_PATCHING_INTERFACE ASSERT(!DYNAMO_OPTION(hot_patching) || !READ_LOCK_HELD(hotp_get_lock())); /* See case 7998. */ #endif os_terminate(dcontext, terminate_flags_t); ASSERT_NOT_REACHED(); } ASSERT_NOT_REACHED(); } /* Caller must call security_violation_action() if return != ACTION_CONTINUE */ static action_type_t security_violation_main(dcontext_t *dcontext, app_pc addr, security_violation_t violation_type, security_option_t type_handling) { return security_violation_internal_main(dcontext, addr, violation_type, type_handling, NULL, ACTION_TERMINATE_PROCESS, NULL, NULL); } /* See security_violation_internal_main() for further comments. * * Returns ALLOWING_BAD if on exempt_threat_list, or if in detect mode * returns the passed violation_type (a negative value) * Does not return if protection action is taken. */ security_violation_t security_violation_internal(dcontext_t *dcontext, app_pc addr, security_violation_t violation_type, security_option_t type_handling, const char *threat_id, const action_type_t desired_action, read_write_lock_t *lock) { security_violation_t result_type; action_type_t action = security_violation_internal_main(dcontext, addr, violation_type, type_handling, threat_id, desired_action, lock, &result_type); DOKSTATS(if (ACTION_CONTINUE != action) { KSTOP_REWIND_UNTIL(dispatch_num_exits); }); if (action != ACTION_CONTINUE) security_violation_action(dcontext, action, addr); return result_type; } /* security_violation_internal() is the real function. This a wrapper exists * for two reasons; one, hot patching needs to send extra arguments for event * notification and two, existing calls to security_violation() in the code * shouldn't have to change the interface. */ security_violation_t security_violation(dcontext_t *dcontext, app_pc addr, security_violation_t violation_type, security_option_t type_handling) { return security_violation_internal(dcontext, addr, violation_type, type_handling, NULL, ACTION_TERMINATE_PROCESS, NULL); } /* back to normal section */ END_DATA_SECTION() /****************************************************************************/ bool is_dyngen_vsyscall(app_pc addr) { /* FIXME: on win32, should we only allow portion of page? */ /* CHECK: likely to be true on all Linux versions by the time we ship */ /* if vsyscall_page_start == 0, then this exception doesn't apply */ /* Note vsyscall_page_start is a global defined in the corresponding os.c files */ if (vsyscall_page_start == 0) return false; return (addr >= (app_pc) vsyscall_page_start && addr < (app_pc) (vsyscall_page_start+PAGE_SIZE)); } bool is_in_futureexec_area(app_pc addr) { bool future; read_lock(&futureexec_areas->lock); future = lookup_addr(futureexec_areas, addr, NULL); read_unlock(&futureexec_areas->lock); return future; } bool is_dyngen_code(app_pc addr) { uint flags; if (get_executable_area_flags(addr, &flags)) { /* assuming only true DGC is marked DYNGEN */ return TEST(FRAG_DYNGEN, flags); } return is_in_futureexec_area(addr); } /* Returns true if in is a direct jmp targeting a known piece of non-DGC code */ static bool is_direct_jmp_to_image(dcontext_t *dcontext, instr_t *in) { bool ok = false; if (instr_get_opcode(in) == OP_jmp && /* no short jmps */ opnd_is_near_pc(instr_get_target(in))) { app_pc target = opnd_get_pc(instr_get_target(in)); uint flags; if (get_executable_area_flags(target, &flags)) { /* we could test for UNMOD_IMAGE but that would ruin windows * loader touch-ups, which can happen for any dll! * so we test FRAG_DYNGEN instead */ ok = !TEST(FRAG_DYNGEN, flags); } } return ok; } /* allow original code displaced by a hook, seen for Citrix 4.0 (case 6615): * <zero or more non-cti and non-syscall instrs whose length < 5> * <one more such instr, making length sum X> * jmp <dll:Y>, where <dll:Y-X> contains a jmp to this page */ static bool check_trampoline_displaced_code(dcontext_t *dcontext, app_pc addr, bool on_stack, instrlist_t *ilist, size_t *len) { uint size = 0; bool match = false; instr_t *in, *last = instrlist_last(ilist); ASSERT(DYNAMO_OPTION(trampoline_displaced_code)); if (on_stack || !is_direct_jmp_to_image(dcontext, last)) return false; ASSERT(instr_length(dcontext, last) == JMP_LONG_LENGTH); for (in = instrlist_first(ilist); in != NULL/*sanity*/ && in != last; in = instr_get_next(in)) { /* build_app_bb_ilist should fully decode everything */ ASSERT(instr_opcode_valid(in)); if (instr_is_cti(in) || instr_is_syscall(in) || instr_is_interrupt(in)) break; size += instr_length(dcontext, in); if (instr_get_next(in) == last) { if (size < JMP_LONG_LENGTH) break; } else { if (size >= JMP_LONG_LENGTH) break; } } ASSERT(in != NULL); if (in == last) { app_pc target; LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "check_trampoline_displaced_code @"PFX": size=%d\n", addr, size); DOLOG(3, LOG_INTERP|LOG_VMAREAS, { instrlist_disassemble(dcontext, addr, ilist, THREAD); }); /* is_direct_jmp_to_image should have checked for us */ ASSERT(opnd_is_near_pc(instr_get_target(last))); target = opnd_get_pc(instr_get_target(last)); if (is_readable_without_exception(target - size, JMP_LONG_LENGTH)) { instr_t *tramp = instr_create(dcontext); /* Ensure a racy unmap causing a decode crash is passed to the app */ set_thread_decode_page_start(dcontext, (app_pc) PAGE_START(target - size)); target = decode_cti(dcontext, target - size, tramp); if (target != NULL && instr_opcode_valid(tramp) && instr_is_ubr(tramp) && opnd_is_near_pc(instr_get_target(tramp))) { app_pc hook = opnd_get_pc(instr_get_target(tramp)); /* FIXME: could be tighter by ensuring that hook targets a jmp * or call right before addr but that may be too specific. * FIXME: if the pattern crosses a page we could fail to match. * we could check for being inside region instead. */ if (PAGE_START(hook) == PAGE_START(addr)) { *len = size + JMP_LONG_LENGTH; LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: allowing hook-displaced code "PFX" -> "PFX" -> "PFX"\n", addr, target, hook); SYSLOG_INTERNAL_WARNING_ONCE("hook-displaced code allowed."); STATS_INC(trampolines_displaced_code); match = true; } } instr_destroy(dcontext, tramp); } } return match; } /* other than JITed code, we allow a small set of specific patterns of DGC such * as function closure trampolines, which this routine checks for. * returns ALLOWING_OK if bb matches, else returns ALLOWING_BAD */ static int check_origins_bb_pattern(dcontext_t *dcontext, app_pc addr, app_pc *base, size_t *size, uint *vm_flags, uint *frag_flags) { /* we assume this is not a cti target (flag diffs will prevent direct cti here) * we only check for the bb beginning at addr */ instrlist_t *ilist; instr_t *in, *first; opnd_t op; size_t len = 0; int res = ALLOWING_BAD; /* signal to caller not a match */ bool on_stack = is_on_stack(dcontext, addr, NULL); /* FIXME: verify bb memory is readable prior to decoding it * we shouldn't get here if addr is unreadable, but rest of bb could be * note that may end up looking at win32 GUARD page -- don't need to do * anything special since that will look unreadable */ /* FIXME bug 9376: if unreadable check_thread_vm_area() will * assert vmlist!=NULL and throw an exception, which is ok */ ilist = build_app_bb_ilist(dcontext, addr, INVALID_FILE); first = instrlist_first(ilist); if (first == NULL) /* empty bb: perhaps invalid instr */ goto check_origins_bb_pattern_exit; LOG(GLOBAL, LOG_VMAREAS, 3, "check_origins_bb_pattern:\n"); DOLOG(3, LOG_VMAREAS, { instrlist_disassemble(dcontext, addr, ilist, GLOBAL); }); #ifndef X86 /* FIXME: move the x86-specific analysis to an arch/ file! */ ASSERT_NOT_IMPLEMENTED(); #endif #ifdef UNIX /* is this a sigreturn pattern placed by kernel on the stack or vsyscall page? */ if (is_signal_restorer_code(addr, &len)) { LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "found signal restorer code @"PFX", allowing it\n", addr); SYSLOG_INTERNAL_WARNING_ONCE("signal restorer code allowed."); res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } #endif /* is this a closure trampoline that looks like this: * mov immed -> 0x4(esp) (put frame ptr directly in slot) * jmp known-non-DGC-address * or like this (gcc-style, also seen in dfrgui): * mov immed -> %ecx (put frame ptr in ecx, callee puts in slot) * jmp known-non-DGC-address * OR, is this some sort of C++ exception chaining (seen in soffice): * mov immed -> %eax (put try index in eax) * jmp known-non-DGC-address * these can be on the stack or on the heap, except the soffice one, which must * be on the heap (simply b/c we've never seen it on the stack) * all of these must be targeted by a call */ if (instr_get_opcode(first) == OP_mov_imm || /* funny case where store of immed is mov_st -- see arch/decode_table.c */ (instr_get_opcode(first) == OP_mov_st && opnd_is_immed(instr_get_src(first, 0)))) { bool ok = false; LOG(GLOBAL, LOG_VMAREAS, 3, "testing for mov immed pattern\n"); /* mov_imm always has immed src, just check dst */ op = instr_get_dst(first, 0); ok = (opnd_is_near_base_disp(op) && opnd_get_base(op) == REG_XSP && opnd_get_disp(op) == 4 && opnd_get_scale(op) == REG_NULL); if (!ok && opnd_is_reg(op) && opnd_get_size(instr_get_src(first, 0)) == OPSZ_4) { uint immed = (uint) opnd_get_immed_int(instr_get_src(first, 0)); /* require immed be addr for ecx, non-addr plus on heap for eax */ /* FIXME: PAGE_SIZE is arbitrary restriction, assuming eax values * are small indices, and it's a nice way to distinguish pointers */ IF_X64(ASSERT_NOT_TESTED()); /* on x64 will these become rcx & rax? */ ok = (opnd_get_reg(op) == REG_ECX && immed > PAGE_SIZE) || (opnd_get_reg(op) == REG_EAX && immed < PAGE_SIZE && !on_stack); } if (ok) { /* check 2nd instr */ ok = false; len += instr_length(dcontext, first); in = instr_get_next(first); if (instr_get_next(in) == NULL && /* only 2 instrs in this bb */ is_direct_jmp_to_image(dcontext, in)) { len += instr_length(dcontext, in); ok = true; } else LOG(GLOBAL, LOG_VMAREAS, 3, "2nd instr not jmp to good code!\n"); } else LOG(GLOBAL, LOG_VMAREAS, 3, "immed bad!\n"); if (ok) { /* require source to be known and to be a call * cases where source is unknown are fairly pathological * (another thread flushing and deleting the fragment, etc.) */ ok = EXIT_IS_CALL(dcontext->last_exit->flags); } if (ok) { LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: found trampoline block @"PFX", allowing it\n", addr); SYSLOG_INTERNAL_WARNING_ONCE("trampoline DGC allowed."); res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } } /* is this a PLT-type push/jmp, where the push uses its own address * (this is seen in soffice): * push own-address * jmp known-non-DGC-address */ if (instr_get_opcode(first) == OP_push_imm && opnd_get_size(instr_get_src(first, 0)) == OPSZ_4) { ptr_uint_t immed = opnd_get_immed_int(instr_get_src(first, 0)); LOG(GLOBAL, LOG_VMAREAS, 3, "testing for push immed pattern\n"); if ((app_pc)immed == addr) { len += instr_length(dcontext, first); in = instr_get_next(first); if (instr_get_next(in) == NULL && /* only 2 instrs in this bb */ is_direct_jmp_to_image(dcontext, in)) { len += instr_length(dcontext, in); LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: found push/jmp block @"PFX", allowing it\n", addr); SYSLOG_INTERNAL_WARNING_ONCE("push/jmp DGC allowed."); res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } } } /* look for the DGC ret on the stack that office xp uses, beyond TOS! * it varies between having no arg or having an immed arg -- my guess * is they use it to handle varargs with stdcall: callee must * clean up args but has to deal w/ dynamically varying #args, so * they use DGC ret, only alternative is jmp* and no ret */ if (instr_is_return(first) && on_stack && addr < (app_pc) get_mcontext(dcontext)->xsp) { /* beyond TOS */ ASSERT(instr_get_next(first) == NULL); /* bb should have only ret in it */ len = instr_length(dcontext, first); LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: found ret-beyond-TOS @"PFX", allowing it\n", addr); SYSLOG_INTERNAL_WARNING_ONCE("ret-beyond-TOS DGC allowed."); res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } if (DYNAMO_OPTION(trampoline_dirjmp) && !on_stack && is_direct_jmp_to_image(dcontext, first)) { /* should be a lone jmp */ ASSERT(instr_get_next(first) == NULL); len = instr_length(dcontext, first); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: allowing targeted direct jmp @"PFX"\n", addr); SYSLOG_INTERNAL_WARNING_ONCE("trampoline direct jmp allowed."); STATS_INC(trampolines_direct_jmps); res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } /* allow a .NET COM method table: a lone direct call on the heap, and a * ret immediately preceding it (see case 3558 and case 3564) */ if (DYNAMO_OPTION(trampoline_dircall) && !on_stack && instr_is_call_direct(first)) { len = instr_length(dcontext, first); /* ignore rest of ilist -- may or may not follow call for real bb, as * will have separate calls to check_thread_vm_area() and thus * separate code origins checks being applied to the target, making this * not really a security hole at all as attack could have sent control * directly to target */ LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: allowing targeted direct call @"PFX"\n", addr); SYSLOG_INTERNAL_WARNING_ONCE("trampoline direct call allowed."); STATS_INC(trampolines_direct_calls); res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } if (DYNAMO_OPTION(trampoline_com_ret) && !on_stack && instr_is_return(first)) { app_pc nxt_pc = addr + instr_length(dcontext, first); if (is_readable_without_exception(nxt_pc, MAX_INSTR_LENGTH)) { instr_t *nxt = instr_create(dcontext); /* WARNING: until our decoding is more robust, as this is AFTER a * ret this could fire a decode assert if not actually code there, * so we avoid any more decoding than we have to do w/ decode_cti. */ /* A racy unmap could cause a fault here so we track the page * that's being decoded. */ set_thread_decode_page_start(dcontext, (app_pc) PAGE_START(nxt_pc)); nxt_pc = decode_cti(dcontext, nxt_pc, nxt); if (nxt_pc != NULL && instr_opcode_valid(nxt) && instr_is_call_direct(nxt)) { /* actually we don't get here w/ current native_exec early-gateway * design since we go native at the PREVIOUS call to this ret's call */ ASSERT_NOT_TESTED(); instr_destroy(dcontext, nxt); len = instr_length(dcontext, first); LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: allowing .NET COM ret in method table @"PFX"\n", addr); SYSLOG_INTERNAL_WARNING_ONCE(".NET COM method table ret allowed."); STATS_INC(trampolines_com_rets); res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } instr_destroy(dcontext, nxt); } } if (DYNAMO_OPTION(trampoline_displaced_code) && check_trampoline_displaced_code(dcontext, addr, on_stack, ilist, &len)) { res = ALLOWING_OK; goto check_origins_bb_pattern_exit; } check_origins_bb_pattern_exit: if (res == ALLOWING_OK) { /* bb matches pattern, let's allow it, but only this block, not entire region! */ LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "Trimming exec area "PFX"-"PFX" to match pattern bb "PFX"-"PFX"\n", *base, *base+*size, addr, addr+len); *base = addr; ASSERT(len > 0); *size = len; /* Since this is a sub-page region that shouldn't be frequently * executed, it's best to use sandboxing. */ *frag_flags |= SANDBOX_FLAG(); /* ensure another thread is not able to use this memory region for * a non-pattern-matching code sequence */ *vm_flags |= VM_PATTERN_REVERIFY; STATS_INC(num_selfmod_vm_areas); } instrlist_clear_and_destroy(dcontext, ilist); return res; } /* trims [base, base+size) to its intersection with [start, end) * NOTE - regions are required to intersect */ static void check_origins_trim_region_helper(app_pc *base /*INOUT*/, size_t *size /*INOUT*/, app_pc start, app_pc end) { app_pc original_base = *base; ASSERT(!POINTER_OVERFLOW_ON_ADD(*base, *size)); /* shouldn't overflow */ ASSERT(start < end); /* [start, end) should be an actual region */ ASSERT(*base + *size > start && *base < end); /* region must intersect */ LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "Trimming exec area "PFX"-"PFX" to intersect area "PFX"-"PFX"\n", *base, *base+*size, start, end); *base = MAX(*base, start); /* don't use new base here! (case 8152) */ *size = MIN(original_base+*size, end) - *base; } /* Checks if the given PC is trusted and to what level * if execution for the referenced area is not allowed program execution * should be aborted */ static INLINE_ONCE security_violation_t check_origins_helper(dcontext_t *dcontext, app_pc addr, app_pc *base, size_t *size, uint prot, uint *vm_flags, uint *frag_flags, const char *modname) { vm_area_t *fut_area; if (is_dyngen_vsyscall(addr) && *size == PAGE_SIZE && (prot & MEMPROT_WRITE) == 0) { /* FIXME: don't allow anyone to make this region writable? */ LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, PFX" is the vsyscall page, ok to execute\n", addr); return ALLOWING_OK; } #if 0 /* this syslog causes services.exe to hang (ref case 666) once case 666 * is fixed re-enable if desired FIXME */ SYSLOG_INTERNAL_WARNING_ONCE("executing region at "PFX" not on executable list.", addr); #else LOG(GLOBAL, LOG_VMAREAS, 1, "executing region at "PFX" not on executable list. Thread %d\n", addr, dcontext->owning_thread); #endif if (USING_FUTURE_EXEC_LIST) { bool ok; bool once_only; read_lock(&futureexec_areas->lock); ok = lookup_addr(futureexec_areas, addr, &fut_area); if (!ok) read_unlock(&futureexec_areas->lock); else { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: pc = "PFX" is future executable, allowing\n", addr); LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: pc = "PFX" is future executable, allowing\n", addr); #if 0 /* this syslog causes services.exe to hang (ref case 666) * once case 666 is fixed re-enable if desired FIXME */ SYSLOG_INTERNAL_WARNING_ONCE("future executable region allowed."); #else DODEBUG_ONCE(LOG(GLOBAL, LOG_ALL, 1, "future executable region allowed.")); #endif if (*base < fut_area->start || *base+*size > fut_area->end) { check_origins_trim_region_helper(base, size, fut_area->start, fut_area->end); } once_only = TEST(VM_ONCE_ONLY, fut_area->vm_flags); /* now done w/ fut_area */ read_unlock(&futureexec_areas->lock); fut_area = NULL; if (is_on_stack(dcontext, addr, NULL)) { /* normally futureexec regions are persistent, to allow app to * repeatedly write and then execute (yes this happens a lot). * we don't want to do that for the stack, b/c it amounts to * permanently allowing a certain piece of stack to be executed! * besides, we don't see the write-exec iter scheme for the stack. */ STATS_INC(num_exec_future_stack); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "future exec "PFX"-"PFX" is on stack, removing from future list\n", *base, *base+*size); ok = remove_futureexec_vm_area(*base, *base+*size); ASSERT(ok); } else { STATS_INC(num_exec_future_heap); if (!DYNAMO_OPTION(selfmod_futureexec)) { /* if on all-selfmod pages, then we shouldn't need to keep it on * the futureexec list */ if (is_executable_area_on_all_selfmod_pages(*base, *base+*size)) once_only = true; } if (once_only) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "future exec "PFX"-"PFX" is once-only, removing from future list\n", *base, *base+*size); ok = remove_futureexec_vm_area(*base, *base+*size); ASSERT(ok); STATS_INC(num_exec_future_once); } } *vm_flags |= VM_WAS_FUTURE; return ALLOWING_OK; } } if (DYNAMO_OPTION(executable_if_text) || DYNAMO_OPTION(executable_if_rx_text) || (DYNAMO_OPTION(exempt_text) || !IS_STRING_OPTION_EMPTY(exempt_text_list))) { app_pc modbase = get_module_base(addr); if (modbase != NULL) { /* PE, and is readable */ /* note that it could still be a PRIVATE mapping */ /* don't expand region to match actual text section bounds -- if we split * let's keep this region smaller. */ app_pc sec_start = NULL, sec_end = NULL; if (is_in_code_section(modbase, addr, &sec_start, &sec_end)) { bool allow = false; if (DYNAMO_OPTION(executable_if_text)) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "exec region is in code section of module @"PFX" (%s), allowing\n", modbase, modname == NULL? "<invalid name>" : modname); STATS_INC(num_text); mark_module_exempted(addr); allow = true; } else { uint prot = 0; list_default_or_append_t deflist = LIST_NO_MATCH; /* Xref case 10526, in the common case app_mem_prot_change() adds * this region, however it can miss -> rx transitions if they * overlapped more then one section (fixing it to do so would * require signifigant restructuring of that routine, see comments * there) so we also check here. */ if (DYNAMO_OPTION(executable_if_rx_text) && get_memory_info(addr, NULL, NULL, &prot) && (TEST(MEMPROT_EXEC, prot) && !TEST(MEMPROT_WRITE, prot))) { /* matches -executable_if_rx_text */ /* case 9799: we don't mark exempted for default-on options */ allow = true; SYSLOG_INTERNAL_WARNING_ONCE("allowable rx text section not " "found till check_origins"); } if (!allow && modname != NULL) { bool onlist; string_option_read_lock(); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "exec region is in code section of module %s, vs list %s\n", modname, DYNAMO_OPTION(exempt_text_list)); onlist = check_filter(DYNAMO_OPTION(exempt_text_list), modname); string_option_read_unlock(); if (onlist) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "module %s is on text list, allowing execution\n", modname); STATS_INC(num_text_list); SYSLOG_INTERNAL_WARNING_ONCE("code origins: module %s text " "section exempt", modname); mark_module_exempted(addr); allow = true; } } if (!allow && modname != NULL) { deflist = check_list_default_and_append(dynamo_options. exempt_mapped_image_text_default_list, dynamo_options. exempt_mapped_image_text_list, modname); } if (deflist != LIST_NO_MATCH) { bool image_mapping = is_mapped_as_image(modbase); if (image_mapping) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "module %s is on text list, of a mapped IMAGE" " allowing execution\n", modname); STATS_INC(num_image_text_list); SYSLOG_INTERNAL_WARNING_ONCE("code origins: module %s IMAGE text " "section exempt", modname); if (deflist == LIST_ON_APPEND) /* case 9799: not default */ mark_module_exempted(addr); allow = true; } else { ASSERT_NOT_TESTED(); SYSLOG_INTERNAL_WARNING_ONCE("code origins: module %s text " "not IMAGE, attack!", modname); } } } if (allow) { /* trim exec area to allowed bounds */ check_origins_trim_region_helper(base, size, sec_start, sec_end); return ALLOWING_OK; } } } } if (DYNAMO_OPTION(executable_if_dot_data) || DYNAMO_OPTION(executable_if_dot_data_x) || (DYNAMO_OPTION(exempt_dot_data) && !IS_STRING_OPTION_EMPTY(exempt_dot_data_list)) || (DYNAMO_OPTION(exempt_dot_data_x) && !IS_STRING_OPTION_EMPTY(exempt_dot_data_x_list))) { /* FIXME: get_module_base() is called all over in this function. * This function could do with some refactoring. */ app_pc modbase = get_module_base(addr); if (modbase != NULL) { /* A loaded module exists for addr; now see if addr is in .data. */ app_pc sec_start = NULL, sec_end = NULL; if (is_in_dot_data_section(modbase, addr, &sec_start, &sec_end)) { bool allow = false; bool onlist = false; uint prot = 0; if (!DYNAMO_OPTION(executable_if_dot_data) && DYNAMO_OPTION(exempt_dot_data) && !IS_STRING_OPTION_EMPTY(exempt_dot_data_list)) { if (modname != NULL) { string_option_read_lock(); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "exec region is in data of module %s, vs list %s\n", modname, DYNAMO_OPTION(exempt_dot_data_list)); onlist = check_filter(DYNAMO_OPTION(exempt_dot_data_list), modname); string_option_read_unlock(); DOSTATS({ if (onlist) STATS_INC(num_dot_data_list); }); } } DOSTATS({ if (DYNAMO_OPTION(executable_if_dot_data)) STATS_INC(num_dot_data); }); if (onlist || DYNAMO_OPTION(executable_if_dot_data)) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "exec region is in .data section of module %s\n", modname == NULL? "<invalid name>" : modname); SYSLOG_INTERNAL_WARNING_ONCE( "code origins: .data section of module %s exempt", modname == NULL? "<invalid name>" : modname); /* case 9799: FIXME: we don't want to mark as exempted for the * default modules on the list: should split into a separate * default list so we can tell! Those modules will have private * pcaches if in a process w/ ANY exemption options */ mark_module_exempted(addr); allow = true;; } if (!allow && get_memory_info(addr, NULL, NULL, &prot) && TEST(MEMPROT_EXEC, prot)) { /* check the _x versions */ if (!DYNAMO_OPTION(executable_if_dot_data_x) && DYNAMO_OPTION(exempt_dot_data_x) && !IS_STRING_OPTION_EMPTY(exempt_dot_data_x_list)) { if (modname != NULL) { string_option_read_lock(); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "exec region is in x data of module %s, vs list %s\n", modname, DYNAMO_OPTION(exempt_dot_data_x_list)); onlist = check_filter_with_wildcards(DYNAMO_OPTION(exempt_dot_data_x_list), modname); string_option_read_unlock(); DOSTATS({ if (onlist) STATS_INC(num_dot_data_x_list); }); } DOSTATS({ if (DYNAMO_OPTION(executable_if_dot_data_x)) STATS_INC(num_dot_data_x); }); } if (DYNAMO_OPTION(executable_if_dot_data_x) || onlist) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "exec region is in x .data section of module %s\n", modname == NULL? "<invalid name>" : modname); SYSLOG_INTERNAL_WARNING_ONCE( "code origins: .data section of module %s exempt", modname == NULL? "<invalid name>" : modname); /* case 9799: FIXME: we don't want to mark as exempted for * the default modules on the list: should split into a * separate default list so we can tell! Those modules will * have private pcaches if in a process w/ ANY exemption * options */ mark_module_exempted(addr); allow = true; } } if (allow) { /* trim exec area to allowed bounds */ check_origins_trim_region_helper(base, size, sec_start, sec_end); return ALLOWING_OK; } } } } if (DYNAMO_OPTION(executable_if_image) || (DYNAMO_OPTION(exempt_image) && !IS_STRING_OPTION_EMPTY(exempt_image_list)) || !moduledb_exempt_list_empty(MODULEDB_EXEMPT_IMAGE)) { app_pc modbase = get_module_base(addr); if (modbase != NULL) { /* A loaded module exists for addr; we allow the module (xref 10526 we * used to limit to just certain sections). FIXME - we could use the * relaxed is_in_any_section here, but other relaxations (such as dll2heap) * exclude the entire module so need to match that to prevent there being * non exemptable areas. */ bool onlist = false; bool mark_exempted = true; if (!DYNAMO_OPTION(executable_if_image)) { if (modname != NULL) { string_option_read_lock(); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "exec region is in image of module %s, vs list %s\n", modname, DYNAMO_OPTION(exempt_image_list)); onlist = check_filter(DYNAMO_OPTION(exempt_image_list), modname); string_option_read_unlock(); DOSTATS({ if (onlist) STATS_INC(num_exempt_image_list); }); if (!onlist && !moduledb_exempt_list_empty(MODULEDB_EXEMPT_IMAGE)) { onlist = moduledb_check_exempt_list(MODULEDB_EXEMPT_IMAGE, modname); DOSTATS({ if (onlist) STATS_INC(num_moduledb_exempt_image); }); /* FIXME - could be that a later policy would * allow this in which case we shouldn't report, * however from layout this is should be the last * place that could allow this target. */ if (onlist) { /* Case 9799: We don't want to set this for * default-on options like moduledb to avoid * non-shared pcaches when other exemption options * are turned on in the process. */ mark_exempted = false; moduledb_report_exemption("Moduledb image exemption" " "PFX" to "PFX" from " "module %s", *base, *base + *size, modname); } } } } else { STATS_INC(num_exempt_image); } if (onlist || DYNAMO_OPTION(executable_if_image)) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "exec region is in the loaded image of module %s\n", modname == NULL ? "<invalid name>" : modname); SYSLOG_INTERNAL_WARNING_ONCE("code origins: loaded image of module %s" "exempt", modname == NULL ? "<invalid name>" : modname); if (mark_exempted) mark_module_exempted(addr); return ALLOWING_OK; } } } if (((DYNAMO_OPTION(exempt_dll2heap) && !IS_STRING_OPTION_EMPTY(exempt_dll2heap_list)) || !moduledb_exempt_list_empty(MODULEDB_EXEMPT_DLL2HEAP) || (DYNAMO_OPTION(exempt_dll2stack) && !IS_STRING_OPTION_EMPTY(exempt_dll2stack_list)) || !moduledb_exempt_list_empty(MODULEDB_EXEMPT_DLL2STACK)) && /* FIXME: any way to find module info for deleted source? */ !LINKSTUB_FAKE(dcontext->last_exit)) { /* no cutting corners here -- find exact module that exit cti is from */ app_pc modbase; app_pc translated_pc = recreate_app_pc(dcontext, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit), dcontext->last_fragment); ASSERT(translated_pc != NULL); modbase = get_module_base(translated_pc); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "check_origins: dll2heap and dll2stack for "PFX": cache "PFX" => app "PFX" == mod "PFX"\n", addr, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit), translated_pc, modbase); if (modbase != NULL) { /* PE, and is readable */ if (modname != NULL) { bool onheaplist = false, onstacklist = false; bool on_moddb_heaplist = false, on_moddb_stacklist = false; string_option_read_lock(); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "source region is in module %s\n", modname); if (DYNAMO_OPTION(exempt_dll2heap)) { onheaplist = check_filter(DYNAMO_OPTION(exempt_dll2heap_list), modname); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "exempt heap list: %s\n", DYNAMO_OPTION(exempt_dll2heap_list)); } if (DYNAMO_OPTION(exempt_dll2stack)) { onstacklist = check_filter(DYNAMO_OPTION(exempt_dll2stack_list), modname); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "exempt stack list: %s\n", DYNAMO_OPTION(exempt_dll2stack_list)); } string_option_read_unlock(); if (!onheaplist) { on_moddb_heaplist = moduledb_check_exempt_list(MODULEDB_EXEMPT_DLL2HEAP, modname); } if (!onstacklist) { on_moddb_stacklist = moduledb_check_exempt_list(MODULEDB_EXEMPT_DLL2STACK, modname); } /* make sure targeting non-stack, non-module memory */ if ((onheaplist || on_moddb_heaplist) && !is_on_stack(dcontext, addr, NULL) && get_module_base(addr) == NULL) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "source module %s is on exempt list, target is heap => allowing " "execution\n", modname); if (on_moddb_heaplist) { STATS_INC(num_moduledb_exempt_dll2heap); moduledb_report_exemption("Moduledb dll2heap exemption "PFX" to" " "PFX" from module %s", translated_pc, addr, modname); } else { STATS_INC(num_exempt_dll2heap); SYSLOG_INTERNAL_WARNING_ONCE("code origins: dll2heap from %s " "exempt", modname); } return ALLOWING_OK; } if ((onstacklist || on_moddb_stacklist) && is_on_stack(dcontext, addr, NULL)) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "source module %s is on exempt list, target is stack => allowing" "execution\n", modname); if (on_moddb_stacklist) { STATS_INC(num_moduledb_exempt_dll2stack); moduledb_report_exemption("Moduledb dll2stack exemption "PFX" " "to "PFX" from module %s", translated_pc, addr, modname); } else { SYSLOG_INTERNAL_WARNING_ONCE("code origins: dll2stack from %s is" " exempt", modname); STATS_INC(num_exempt_dll2stack); } return ALLOWING_OK; } } } } if (dynamo_options.executable_if_trampoline) { /* check for specific bb patterns we allow */ if (check_origins_bb_pattern(dcontext, addr, base, size, vm_flags, frag_flags) == ALLOWING_OK) { DOSTATS({ if (is_on_stack(dcontext, addr, NULL)) { STATS_INC(num_trampolines_stack); } else { STATS_INC(num_trampolines_heap); } }); return ALLOWING_OK; } } if (DYNAMO_OPTION(executable_if_driver)) { if (TEST(VM_DRIVER_ADDRESS, *vm_flags)) { ASSERT(*size == PAGE_SIZE); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "check origins: pc = "PFX" is in a new driver area\n", addr); STATS_INC(num_driver_areas); return ALLOWING_OK; } } if (is_on_stack(dcontext, addr, NULL)) { /* WARNING: stack check not bulletproof since attackers control esp */ LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "check origins: pc = "PFX" is on the stack\n", addr); STATS_INC(num_stack_violations); if (!dynamo_options.executable_stack) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 1, "ERROR: Address "PFX" on the stack is not executable!\n", addr); return STACK_EXECUTION_VIOLATION; } else { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 1, "WARNING: Execution violation @ stack address "PFX" detected. " "Continuing...\n", addr); return ALLOWING_BAD; } } else { STATS_INC(num_heap_violations); if (!dynamo_options.executable_heap) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 1, "ERROR: Address "PFX" on the heap is not executable!\n", addr); SYSLOG_INTERNAL_WARNING_ONCE("Address "PFX" on the heap is not executable", addr); return HEAP_EXECUTION_VIOLATION; } else { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 1, "WARNING: Execution violation @ heap address "PFX" detected. " "Continuing...\n", addr); return ALLOWING_BAD; } } /* CHECK: why did we get here? */ ASSERT_NOT_REACHED(); } /* It is up to the caller to raise a violation if return value is < 0 */ static INLINE_ONCE int check_origins(dcontext_t *dcontext, app_pc addr, app_pc *base, size_t *size, uint prot, uint *vm_flags, uint *frag_flags, bool xfer) { security_violation_t res; /* Many exemptions need to know the module name, so we obtain here */ char modname_buf[MAX_MODNAME_INTERNAL]; const char *modname = os_get_module_name_buf_strdup(addr, modname_buf, BUFFER_SIZE_ELEMENTS(modname_buf) HEAPACCT(ACCT_VMAREAS)); ASSERT(DYNAMO_OPTION(code_origins)); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "check origins: pc = "PFX"\n", addr); res = check_origins_helper(dcontext, addr, base, size, prot, vm_flags, frag_flags, modname); # ifdef DGC_DIAGNOSTICS if (res != ALLOWING_OK) { /* set flag so we can call this area BAD in the future */ *frag_flags |= FRAG_DYNGEN_RESTRICTED; } # endif if (res < 0) { /* if_x shouldn't have to check here, should catch all regions marked x * at DR init time or app allocation time */ /* FIXME: turn these into a SYSLOG_INTERNAL_WARNING_ONCE(in case an * external agent has added that code) * and then we'd need to add them now. * FIXME: xref case 3742 */ ASSERT_BUG_NUM(3742, !DYNAMO_OPTION(executable_if_x) || !TEST(MEMPROT_EXEC, prot)); ASSERT(!DYNAMO_OPTION(executable_if_rx) || !TEST(MEMPROT_EXEC, prot) || TEST(MEMPROT_WRITE, prot)); } if (modname != NULL && modname != modname_buf) dr_strfree(modname HEAPACCT(ACCT_VMAREAS)); return res; } /* returns whether it ended up deleting the self-writing fragment * by flushing the region */ bool vm_area_fragment_self_write(dcontext_t *dcontext, app_pc tag) { if (!dynamo_options.executable_stack && is_on_stack(dcontext, tag, NULL)) { /* stack code is NOT persistently executable, nor is it allowed to be * written, period! however, in keeping with our philosophy of only * interfering with the program when it executes, we don't stop it * at the write here, we simply remove the code from the executable * list and remove its sandboxing. after all, the code on the stack * may be finished with, and now the stack is just being used as data! * * FIXME: there is a hole here due to selfmod fragments * being private: a second thread can write to a stack region and then * execute from the changed region w/o kicking it off the executable * list. case 4020 fixed this for pattern-matched regions. */ bool ok; vm_area_t *area = NULL; app_pc start, end; read_lock(&executable_areas->lock); ok = lookup_addr(executable_areas, tag, &area); ASSERT(ok); /* grab fields since can't hold lock entire time */ start = area->start; end = area->end; read_unlock(&executable_areas->lock); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 1, "WARNING: code on stack "PFX"-"PFX" @tag "PFX" written to\n", start, end, tag); SYSLOG_INTERNAL_WARNING_ONCE("executable code on stack written to."); /* FIXME: fragment could extend into multiple areas, we should flush * them all to cover the written-to region (which we don't know) */ flush_fragments_and_remove_region(dcontext, start, end - start, false /* don't own initexit_lock */, false /* keep futures */); return true; } return false; } #endif /* PROGRAM_SHEPHERDING ******************************************/ #ifdef SIMULATE_ATTACK enum { SIMULATE_INIT = 0, SIMULATE_GENERIC = 1, SIMULATE_AT_ADDR = 2, SIMULATE_AT_FRAGNUM = 4, SIMULATE_WIPE_STACK = 8, SIMULATE_OVER = 0x1000, }; /* attack simulation list */ /* comma separated list of simulate points. @fragnum fragment number available only in DEBUG builds 0xfragpc will test addr only whenever check_thread_vm_area is called start of bb, pc at end of direct cti instr, target of direct cti, pc at end of final instr in bb s: prefix wipes the stack Ex: -simulate_at @100,s:@150,0x77e9e8d6,s:0x77e9e8f0,@777,@2000,s:@19999,@29999 */ /* simulate_at is modified in place, hence caller needs to synchronize and should be 0 after the first call just like strtok */ int next_simulate_at_fragment(char **tokpos /* OUT */, int *action /* OUT */) { char *fragnum; // assumes sscanf won't get confused with the ,s for(fragnum = *tokpos; fragnum; fragnum = *tokpos) { int num; *tokpos = strchr(fragnum, ','); /* next ptr */ if (*tokpos) (*tokpos)++; if (sscanf(fragnum, PIFX, &num) == 1) { LOG(GLOBAL, LOG_VMAREAS, 1, "next_simulate_at_fragment: %s="PIFX" addr\n", fragnum, num); *action = SIMULATE_AT_ADDR; return num; } else if (sscanf(fragnum, "s:"PIFX, &num) == 1) { LOG(GLOBAL, LOG_VMAREAS, 1, "next_simulate_at_fragment: wipe stack %s="PIFX"\n", fragnum, num); *action = SIMULATE_WIPE_STACK | SIMULATE_AT_ADDR; return num; } #ifdef DEBUG /* for fragment count */ else if (sscanf(fragnum, "s:@%d", &num) == 1) { LOG(GLOBAL, LOG_VMAREAS, 1, "next_simulate_at_fragment: wipe stack %s=%d\n", fragnum, num); *action = SIMULATE_WIPE_STACK | SIMULATE_AT_FRAGNUM; return num; } else if (sscanf(fragnum, "@%d", &num) == 1) { LOG(GLOBAL, LOG_VMAREAS, 1, "next_simulate_at_fragment: %s=%d num\n", fragnum, num); *action = SIMULATE_AT_FRAGNUM; return num; } #endif else { LOG(GLOBAL, LOG_VMAREAS, 1, "next_simulate_at_fragment: frg=%s ignored\n", fragnum); } } *action = SIMULATE_OVER; LOG(GLOBAL, LOG_VMAREAS, 1, "next_simulate_at_fragment: simulate attack over\n"); return 0; } void simulate_attack(dcontext_t *dcontext, app_pc pc) { static char *tokpos; static int next_frag = 0; /* number or address */ static int action = SIMULATE_INIT; bool attack = false; if (TEST(SIMULATE_AT_FRAGNUM, action)) { attack = GLOBAL_STAT(num_fragments) > next_frag; } if (TEST(SIMULATE_AT_ADDR, action)) { if (pc == (app_pc)next_frag) attack = true; } if (attack) { LOG(GLOBAL, LOG_VMAREAS, 1, "SIMULATE ATTACK for "PFX" @%d frags\n", pc, GLOBAL_STAT(num_fragments)); if (TEST(SIMULATE_WIPE_STACK, action)) { reg_t esp = get_mcontext(dcontext)->xsp; uint overflow_size = 1024; LOG(THREAD_GET, LOG_VMAREAS, 1, "simulate_attack: wipe stack "PFX"-"PFX"\n", esp, esp + overflow_size-1); /* wipe out a good portion of the app stack */ memset((void*)esp, 0xbf, overflow_size); /* LOOK for 0xbf in the log */ LOG(THREAD_GET, LOG_VMAREAS, 1, "simulate_attack: wiped stack "PFX"-"PFX"\n", esp, esp + overflow_size-1); /* FIXME: we may want to just wipe the stack and return to app */ } } /* prepare for what to do next */ if (attack || action == SIMULATE_INIT) { mutex_lock(&simulate_lock); string_option_read_lock(); tokpos = dynamo_options.simulate_at; if (action == SIMULATE_INIT) { if ('\0' == *tokpos) tokpos = NULL; } next_frag = next_simulate_at_fragment(&tokpos, &action); /* dynamic changes to the string may have truncated it in front of original */ ASSERT(tokpos < strchr(dynamo_options.simulate_at, '\0')); string_option_read_unlock(); /* FIXME: tokpos ptr is kept beyond release of lock! */ mutex_unlock(&simulate_lock); } if (attack) { security_violation(dcontext, pc, ATTACK_SIMULATION_VIOLATION, OPTION_BLOCK|OPTION_REPORT); } } #endif /* SIMULATE_ATTACK */ #if defined(DEBUG) && defined(INTERNAL) static void print_entry(dcontext_t *dcontext, fragment_t *entry, const char *prefix) { if (entry == NULL) LOG(THREAD, LOG_VMAREAS, 1, "%s<NULL>\n", prefix); else if (FRAG_MULTI(entry)) { if (FRAG_MULTI_INIT(entry)) { LOG(THREAD, LOG_VMAREAS, 1, "%s"PFX" <init: tag="PFX"> pc="PFX"\n", prefix, entry, FRAG_FRAG(entry), FRAG_PC(entry)); } else { LOG(THREAD, LOG_VMAREAS, 1, "%s"PFX" F="PFX" pc="PFX"\n", prefix, entry, FRAG_FRAG(entry), FRAG_PC(entry)); } } else { fragment_t *f = (fragment_t *) entry; LOG(THREAD, LOG_VMAREAS, 1, "%s"PFX" F%d tag="PFX"\n", prefix, f, f->id, f->tag); } } static void print_fraglist(dcontext_t *dcontext, vm_area_t *area, const char *prefix) { fragment_t *entry, *last; LOG(THREAD, LOG_VMAREAS, 1, "%sFragments for area ("PFX") "PFX".."PFX"\n", prefix, area, area->start, area->end); for (entry = area->custom.frags, last = NULL; entry != NULL; last = entry, entry = FRAG_NEXT(entry)) { print_entry(dcontext, entry, "\t"); DOLOG(7, LOG_VMAREAS, { print_entry(dcontext, FRAG_PREV(entry), "\t <="); print_entry(dcontext, FRAG_NEXT(entry), "\t =>"); }); if (FRAG_ALSO(entry) != NULL) { fragment_t *also = FRAG_ALSO(entry); print_entry(dcontext, FRAG_ALSO(entry), "\t also =>"); /* check for also in same area == inconsistency in data structs */ if (FRAG_PC(also) >= area->start && FRAG_PC(also) < area->end) { if (FRAG_MULTI_INIT(also)) { LOG(THREAD, LOG_VMAREAS, 1, "WARNING: self-also frag tag "PFX"\n", FRAG_FRAG(also)); } else { fragment_t *f = FRAG_FRAG(also); LOG(THREAD, LOG_VMAREAS, 1, "WARNING: self-also frag F%d("PFX")%s\n", f->id, f->tag, TEST(FRAG_IS_TRACE, f->flags) ? " trace" : ""); } /* not an assertion b/c we sometimes print prior to cleaning */ } } ASSERT(last == NULL || last == FRAG_PREV(entry)); } ASSERT(area->custom.frags == NULL || FRAG_PREV(area->custom.frags) == last); } static void print_fraglists(dcontext_t *dcontext) { thread_data_t *data = GET_DATA(dcontext, 0); int i; ASSERT_VMAREA_DATA_PROTECTED(data, READWRITE); LOG(THREAD, LOG_VMAREAS, 1, "\nFragment lists for ALL AREAS:\n"); for (i = 0; i < data->areas.length; i++) { print_fraglist(dcontext, &(data->areas.buf[i]), ""); } LOG(THREAD, LOG_VMAREAS, 1, "\n"); } static void print_frag_arealist(dcontext_t *dcontext, fragment_t *f) { fragment_t *entry; if (FRAG_MULTI(f)) { LOG(THREAD, LOG_VMAREAS, 1, "Areas for F="PFX" ("PFX")\n", FRAG_FRAG(f), FRAG_PC(f)); } else LOG(THREAD, LOG_VMAREAS, 1, "Areas for F%d ("PFX")\n", f->id, f->tag); for (entry = f; entry != NULL; entry = FRAG_ALSO(entry)) { print_entry(dcontext, entry, "\t"); } } #endif /* DEBUG && INTERNAL */ #ifdef DEBUG static bool area_contains_frag_pc(vm_area_t *area, fragment_t *f) { app_pc pc = FRAG_PC(f); if (area == NULL) return true; return (pc >= area->start && pc < area->end); } #endif /* DEBUG */ /* adds entry to front of area's frags list * caller must synchronize modification of area * FIXME: how assert that caller has done that w/o asking for whole vector * to be passed in, or having backpointer from area? * See general FIXME of same flavor at top of file. */ static void prepend_entry_to_fraglist(vm_area_t *area, fragment_t *entry) { /* Can't assert area_contains_frag_pc() because vm_area_unlink_fragments * moves all also entries onto the area fraglist that's being flushed. */ LOG(THREAD_GET, LOG_VMAREAS, 4, "%s: putting fragment @"PFX" (%s) on vmarea "PFX"-"PFX"\n", /* i#1215: FRAG_ID(entry) can crash if entry->f hold tag temporarily */ __FUNCTION__, FRAG_PC(entry), TEST(FRAG_SHARED, entry->flags) ? "shared" : "private", area->start, area->end); FRAG_NEXT_ASSIGN(entry, area->custom.frags); /* prev wraps around, but not next */ if (area->custom.frags != NULL) { FRAG_PREV_ASSIGN(entry, FRAG_PREV(area->custom.frags)); FRAG_PREV_ASSIGN(area->custom.frags, entry); } else FRAG_PREV_ASSIGN(entry, entry); area->custom.frags = entry; } /* adds a multi_entry_t to the list of fragments for area. * cross-links with prev if prev != NULL. * sticks tag in for f (will be fixed in vm_area_add_fragment, once f is created) */ static fragment_t * prepend_fraglist(dcontext_t *dcontext, vm_area_t *area, app_pc entry_pc, app_pc tag, fragment_t *prev) { multi_entry_t *e = (multi_entry_t *) nonpersistent_heap_alloc(dcontext, sizeof(multi_entry_t) HEAPACCT(ACCT_VMAREA_MULTI)); fragment_t * entry = (fragment_t *) e; e->flags = FRAG_FAKE | FRAG_IS_EXTRA_VMAREA | /* distinguish from fragment_t */ FRAG_IS_EXTRA_VMAREA_INIT; /* indicate f field is a tag, not a fragment_t yet */ if (dcontext == GLOBAL_DCONTEXT) /* shared */ e->flags |= FRAG_SHARED; e->f = (fragment_t *) tag; /* placeholder */ e->pc = entry_pc; if (prev != NULL) FRAG_ALSO_ASSIGN(prev, entry); FRAG_ALSO_ASSIGN(entry, NULL); ASSERT(area_contains_frag_pc(area, entry)); prepend_entry_to_fraglist(area, entry); DOLOG(7, LOG_VMAREAS, { print_fraglist(dcontext, area, "after prepend_fraglist, "); }); return entry; } #ifdef DGC_DIAGNOSTICS void dyngen_diagnostics(dcontext_t *dcontext, app_pc pc, app_pc base_pc, size_t size, uint prot) { bool future, stack; char buf[MAXIMUM_SYMBOL_LENGTH]; app_pc translated_pc; read_lock(&futureexec_areas->lock); future = lookup_addr(futureexec_areas, pc, NULL); read_unlock(&futureexec_areas->lock); stack = is_on_stack(dcontext, pc, NULL); if (!future) future = is_dyngen_vsyscall(pc); print_symbolic_address(pc, buf, sizeof(buf), false); LOG(GLOBAL, LOG_VMAREAS, 1, "DYNGEN in %d: target="PFX" => "PFX"-"PFX" %s%s%s%s%s %s\n", dcontext->owning_thread, pc, base_pc, base_pc+size, ((prot & MEMPROT_READ) != 0) ? "R":"", ((prot & MEMPROT_WRITE) != 0)? "W":"", ((prot & MEMPROT_EXEC) != 0) ? "E":"", future ? " future":" BAD", stack ? " stack":"", buf); if (LINKSTUB_FAKE(dcontext->last_exit)) { LOG(GLOBAL, LOG_VMAREAS, 1, "source=!!! fake last_exit, must have been flushed?\n"); return; } /* FIXME: risky if last fragment is deleted -- should check for that * here and instead just print type from last_exit, since recreate * may fail */ translated_pc = recreate_app_pc(dcontext, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit), dcontext->last_fragment); if (translated_pc != NULL) { print_symbolic_address(translated_pc, buf, sizeof(buf), false); LOG(GLOBAL, LOG_VMAREAS, 1, "source=F%d("PFX") @"PFX" \"%s\"\n", dcontext->last_fragment->id, dcontext->last_fragment->tag, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit), buf); disassemble_with_bytes(dcontext, translated_pc, main_logfile); } DOLOG(4, LOG_VMAREAS, { disassemble_fragment(dcontext, dcontext->last_fragment, false); }); } #endif /*************************************************************************** * APPLICATION MEMORY STATE TRACKING */ /* Checks whether a requested allocation at a particular base will change * the protection bits of any code. Returns whether or not to allow * the operation to go through. */ bool app_memory_pre_alloc(dcontext_t *dcontext, byte *base, size_t size, uint prot, bool hint) { byte *pb = base; dr_mem_info_t info; while (pb < base + size && /* i#1462: getting the true bounds on Windows is expensive so we get just * the cur base first. This can result in an extra syscall in some cases, * but in large-region cases it saves huge number of syscalls. */ query_memory_cur_base(pb, &info)) { if (info.type != DR_MEMTYPE_FREE && info.type != DR_MEMTYPE_RESERVED) { size_t change_sz; uint subset_memprot; uint res; /* We need the real base */ if (!query_memory_ex(pb, &info)) break; change_sz = MIN(info.base_pc + info.size - pb, base + size - pb); if (hint) { /* Just have caller remove the hint, before we go through * -handle_dr_modify handling. */ return false; } res = app_memory_protection_change(dcontext, pb, change_sz, prot, &subset_memprot, NULL); if (res != DO_APP_MEM_PROT_CHANGE) { if (res == FAIL_APP_MEM_PROT_CHANGE) { return false; } else if (res == PRETEND_APP_MEM_PROT_CHANGE || res == SUBSET_APP_MEM_PROT_CHANGE) { /* This gets complicated to handle. If the syscall is * changing a few existing pages and then allocating new * pages beyond them, we could adjust the base: but there * are many corner cases. Thus we fail the syscall, which * is the right thing for cases we've seen like i#1178 * where the app tries to commit to a random address! */ SYSLOG_INTERNAL_WARNING_ONCE("Failing app alloc w/ suspect overlap"); return false; } } } if (POINTER_OVERFLOW_ON_ADD(info.base_pc, info.size)) break; pb = info.base_pc + info.size; } return true; } /* newly allocated or mapped in memory region, returns true if added to exec list * ok to pass in NULL for dcontext -- in fact, assumes dcontext is NULL at initialization * * It's up to the caller to handle any changes in protection in a new alloc that * overlaps an existing alloc, by calling app_memory_protection_change(). */ bool app_memory_allocation(dcontext_t *dcontext, app_pc base, size_t size, uint prot, bool image _IF_DEBUG(const char *comment)) { /* FIXME (case 68): to guard against external agents freeing memory, we * could remove this region from the executable list here -- is it worth the * performance hit? DR itself could allocate memory that was freed * externally -- but our DR overlap checks would catch that. */ ASSERT_CURIOSITY(!executable_vm_area_overlap(base, base + size, false/*have no lock*/) || /* This happens during module loading if we don't flush on mprot */ (!INTERNAL_OPTION(hw_cache_consistency) && /* .bss has !image so we just check for existing module overlap */ pc_is_in_module(base))); #ifdef PROGRAM_SHEPHERDING DODEBUG({ /* case 4175 - reallocations will overlap with no easy way to * enforce this */ if (futureexec_vm_area_overlap(base, base + size)) { SYSLOG_INTERNAL_WARNING_ONCE("existing future area overlapping ["PFX", " PFX")", base, base + size); } }); #endif /* no current policies allow non-x code at allocation time onto exec list */ if (!TEST(MEMPROT_EXEC, prot)) return false; /* Do not add our own code cache and other data structures * to executable list -- but do add our code segment * FIXME: checking base only is good enough? */ if (dynamo_vm_area_overlap(base, base + size)) { LOG(GLOBAL, LOG_VMAREAS, 2, "\t<dynamorio region>\n"); /* assumption: preload/preinject library is not on DR area list since unloaded */ if (!is_in_dynamo_dll(base) /* our own text section is ok */ /* client lib text section is ok (xref i#487) */ IF_CLIENT_INTERFACE(&& !is_in_client_lib(base))) return false; } LOG(GLOBAL, LOG_VMAREAS, 1, "New +x app memory region: "PFX"-"PFX" %s\n", base, base+size, memprot_string(prot)); if (!TEST(MEMPROT_WRITE, prot)) { uint frag_flags = 0; if (DYNAMO_OPTION(coarse_units) && image && !RUNNING_WITHOUT_CODE_CACHE()) { /* all images start out with coarse-grain management */ frag_flags |= FRAG_COARSE_GRAIN; } add_executable_vm_area(base, base + size, image ? VM_UNMOD_IMAGE : 0, frag_flags, false/*no lock*/ _IF_DEBUG(comment)); return true; } else if (dcontext==NULL || /* i#626: we skip is_no_stack because of no mcontext at init time, * we also assume that no alloc overlaps w/ stack at init time. */ (IF_CLIENT_INTERFACE(dynamo_initialized &&) !is_on_stack(dcontext, base, NULL))) { LOG(GLOBAL, LOG_VMAREAS, 1, "WARNING: "PFX"-"PFX" is writable, NOT adding to executable list\n", base, base+size); #ifdef PROGRAM_SHEPHERDING if (DYNAMO_OPTION(executable_if_x)) { LOG(GLOBAL, LOG_VMAREAS, 1, "app_memory_allocation: New future exec region b/c x: "PFX"-"PFX" %s\n", base, base+size, memprot_string(prot)); STATS_INC(num_mark_if_x); add_futureexec_vm_area(base, base+size, false/*permanent*/ _IF_DEBUG("alloc executable_if_x")); mark_module_exempted(base); } else if (DYNAMO_OPTION(executable_if_alloc)) { bool future = false; /* rwx regions are not added at init time unless in images */ # ifdef WINDOWS if (image) { /* anything marked rwx in an image is added to future list * otherwise it is not added -- must be separately allocated, * not just be present at init or in a mapped non-image file */ future = true; LOG(GLOBAL, LOG_VMAREAS, 1, "New future exec region b/c x from image: "PFX"-"PFX" %s\n", base, base+size, memprot_string(prot)); } else if (dcontext != NULL && dcontext->alloc_no_reserve) { /* we only add a region marked rwx at allocation time to the * future list if it is allocated and reserved at the same time * (to distinguish from the rwx heap on 2003) */ future = true; LOG(GLOBAL, LOG_VMAREAS, 1, "New future exec region b/c x @alloc & no reserve: "PFX"-"PFX" %s\n", base, base+size, memprot_string(prot)); } # else if (dcontext != NULL || image) { /* can't distinguish stack -- saved at init time since we don't add rwx then, * but what about stacks whose creation we see? FIXME */ future = true; LOG(GLOBAL, LOG_VMAREAS, 1, "New future exec region b/c x @alloc: "PFX"-"PFX" %s\n", base, base+size, memprot_string(prot)); } # endif if (future) { STATS_INC(num_alloc_exec); add_futureexec_vm_area(base, base+size, false/*permanent*/ _IF_DEBUG("alloc x")); } } #endif /* PROGRAM_SHEPHERDING */ } return false; } /* de-allocated or un-mapped memory region */ void app_memory_deallocation(dcontext_t *dcontext, app_pc base, size_t size, bool own_initexit_lock, bool image) { ASSERT(!dynamo_vm_area_overlap(base, base + size)); /* we check for overlap regardless of memory protections, to allow flexible * policies that are independent of rwx bits -- if any overlap we remove, * no shortcuts */ if (executable_vm_area_overlap(base, base + size, false/*have no lock*/)) { /* ok for overlap to have changed in between, flush checks again */ flush_fragments_and_remove_region(dcontext, base, size, own_initexit_lock, true/*free futures*/); #ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(ret_after_call) && !image && !DYNAMO_OPTION(rac_dgc_sticky)) { /* we can have after call targets in DGC in addition to DLLs */ /* Note IMAGE mappings are handled in process_image() on * Windows, so that they can be handled more efficiently * as a single region. FIXME: case 4983 on Linux */ /* only freeing if we have ever interp/executed from this area */ /* FIXME: note that on app_memory_protection_change() we * do NOT want to free these entries, therefore we'd have * a leak if a portion gets marked writable and is thus no * longer on our list. Note we can't flush the areas on * memory protection because the likelihood of introducing * a false positives in doing so is vastly greater than * the security risk of not flushing. (Many valid after * call locations may still be active, and our vmarea * boundaries can not precisely capture the application * intent.) Note that we would not leak on DLLs even if * they are made writable, because we treat separately. */ /* FIXME: see proposal in case 2236 about using a * heuristic that removes only when too numerous, if that * works well as a heuristic that DGC is being reused, and * unlikely that it will be so densely filled. */ /* FIXME: [perf] case 9331 this is not so good on all * deallocations, if we can't tell whether we have * executed from it. On every module LOAD, before mapping * it as MEM_IMAGE the loader first maps a DLL as * MEM_MAPPED, and on each of the corresponding unmaps * during LoadLibrary(), we'd be walking the cumulative * hashtable. Although there shouldn't be that many valid * AC entries at process startup, maybe best to leave the * DGC leak for now if this will potentially hurt startup * time in say svchost.exe. Currently rac_dgc_sticky is * on by default so we don't reach this code. */ /* case 9331: should find out if there was any true * execution in any thread here before we go through a * linear walk of the hashtable. More directly we need a * vmvector matching all vmareas that had a .C added for * them, considering the common case should be that this * is an app memory deallocation that has nothing to do * with us. * * FIXME: for now just checking if base is declared DGC, * and ignoring any others possible vm_areas for the same * OS region, so we may still have a leak. */ if (is_dyngen_code(base)) { ASSERT_NOT_TESTED(); invalidate_after_call_target_range(dcontext, base, base+size); } } #endif /* RETURN_AFTER_CALL */ } #ifdef PROGRAM_SHEPHERDING if (USING_FUTURE_EXEC_LIST && futureexec_vm_area_overlap(base, base + size)) { remove_futureexec_vm_area(base, base + size); LOG(GLOBAL, LOG_VMAREAS, 2, "removing future exec "PFX"-"PFX" since now freed\n", base, base+size); } #endif } /* A convenience routine that starts the two-phase flushing protocol */ /* Note this is not flush_fragments_and_remove_region */ static bool flush_and_remove_executable_vm_area(dcontext_t *dcontext, app_pc base, size_t size) { DEBUG_DECLARE(bool res;) flush_fragments_in_region_start(dcontext, base, size, false /* don't own initexit_lock */, false /* case 2236: keep futures */, true /* exec invalid */, false /* don't force synchall */ _IF_DGCDIAG(NULL)); DEBUG_DECLARE(res = ) remove_executable_vm_area(base, base + size, true/*have lock*/); DODEBUG(if (!res) { /* area doesn't have to be executable in fact when called * on executable_if_hook path */ LOG(THREAD, LOG_VMAREAS, 2, "\tregion was in fact not on executable_areas, so nothing to remove\n"); }); /* making sure there is no overlap now */ ASSERT(!executable_vm_area_overlap(base, base+size, true /* holding lock */)); return true; } void tamper_resistant_region_add(app_pc start, app_pc end) { /* For now assuming a single area for specially protected areas * that is looked up in addition to dynamo_vm_areas. Assuming * modifications to any location is ntdll.dll is always * interesting to us, instead of only those pieces we trampoline * this should be sufficient. * * FIXME: we could add a new vm_area_vector_t for protected possibly * subpage regions that we later turn into pretend_writable_areas * * Note that ntdll doesn't have an IAT section so we only worry * about function patching */ ASSERT(tamper_resistant_region_start == NULL); tamper_resistant_region_start = start; tamper_resistant_region_end = end; } /* returns true if [start, end) overlaps with a tamper_resistant region * as needed for DYNAMO_OPTION(handle_ntdll_modify) */ bool tamper_resistant_region_overlap(app_pc start, app_pc end) { return (end > tamper_resistant_region_start && start < tamper_resistant_region_end); } bool is_jit_managed_area(app_pc addr) { uint vm_flags; if (get_executable_area_vm_flags(addr, &vm_flags)) return TEST(VM_JIT_MANAGED, vm_flags); else return false; } void set_region_jit_managed(app_pc start, size_t len) { vm_area_t *region; ASSERT(DYNAMO_OPTION(opt_jit)); write_lock(&executable_areas->lock); if (lookup_addr(executable_areas, start, &region)) { LOG(GLOBAL, LOG_VMAREAS, 1, "set_region_jit_managed("PFX" +0x%x)\n", start, len); ASSERT(region->start == start && region->end == (start+len)); if (!TEST(VM_JIT_MANAGED, region->vm_flags)) { if (TEST(VM_MADE_READONLY, region->vm_flags)) vm_make_writable(region->start, region->end - region->start); region->vm_flags |= VM_JIT_MANAGED; region->vm_flags &= ~(VM_MADE_READONLY | VM_DELAY_READONLY); LOG(GLOBAL, LOG_VMAREAS, 1, "Region ("PFX" +0x%x) no longer 'made readonly'\n", start, len); } } else { LOG(GLOBAL, LOG_VMAREAS, 1, "Generating new jit-managed vmarea: "PFX"-"PFX"\n", start, start+len); add_vm_area(executable_areas, start, start+len, VM_JIT_MANAGED, 0, NULL _IF_DEBUG("jit-managed")); } write_unlock(&executable_areas->lock); } /* memory region base:base+size now has privileges prot * returns a value from the enum in vmareas->h about whether to perform the * system call or not and if not what the return code to the app should be */ /* FIXME : This is called before the system call that will change * the memory permission which could be race condition prone! If another * thread executes from a region added by this function before the system call * goes through we could get a disconnect on what the memory premissions of the * region really are vs what vmareas expects for consistency, see bug 2833 */ /* N.B.: be careful about leaving code read-only and returning * PRETEND_APP_MEM_PROT_CHANGE or SUBSET_APP_MEM_PROT_CHANGE, or other * cases where mixed with native execution we may have incorrect page settings - * e.g. make sure all pages that need to be executable are executable! * * Note new_memprot is set only for SUBSET_APP_MEM_PROT_CHANGE, * and old_memprot is set for PRETEND_APP_MEM_PROT_CHANGE or SUBSET_APP_MEM_PROT_CHANGE. */ /* Note: hotp_only_mem_prot_change() relies on executable_areas to find out * previous state, so eliminating it should be carefully; see case 6669. */ uint app_memory_protection_change(dcontext_t *dcontext, app_pc base, size_t size, uint prot, /* platform independent MEMPROT_ */ uint *new_memprot, /* OUT */ uint *old_memprot /* OPTIONAL OUT*/) { /* FIXME: look up whether image, etc. here? * but could overlap multiple regions! */ bool is_executable; bool should_finish_flushing = false; bool dr_overlap = DYNAMO_OPTION(handle_DR_modify) != DR_MODIFY_OFF /* we don't care */ && dynamo_vm_area_overlap(base, base + size); bool system_overlap = DYNAMO_OPTION(handle_ntdll_modify) != DR_MODIFY_OFF /* we don't care */ && tamper_resistant_region_overlap(base, base + size); bool patch_proof_overlap = false; #ifdef WINDOWS uint frag_flags; #endif ASSERT(new_memprot != NULL); /* old_memprot is optional */ #if defined(PROGRAM_SHEPHERDING) && defined(WINDOWS) patch_proof_overlap = (!IS_STRING_OPTION_EMPTY(patch_proof_default_list) || !IS_STRING_OPTION_EMPTY(patch_proof_list)) && vmvector_overlap(patch_proof_areas, base, base + size); /* FIXME: [minor perf] all the above tests can be combined into a * single vmarea lookup when this feature default on, case 6632 */ ASSERT(base != NULL); if (patch_proof_overlap) { app_pc modbase = get_module_base(base); bool loader = is_module_patch_region(dcontext, base, base+size, false/*be liberal: don't miss loader*/); bool patching_code = is_range_in_code_section(modbase, base, base+size, NULL, NULL); bool patching_IAT = is_IAT(base, base+size, true/*page-align*/, NULL, NULL); /* FIXME: [perf] could have added CODE sections instead of modules to patch_proof_areas */ /* FIXME: [minor perf] is_module_patch_region already collected these */ /* FIXME: [minor perf] same check is done later for all IATs for emulate_IAT_writes */ bool patch_proof_IAT = false; /* NYI - case 6622 */ /* FIXME: case 6622 IAT hooker protection for some modules is * expected to conflict with emulate_IAT_writes, need to make * sure emulate_write_areas will not overlap with this */ ASSERT_NOT_IMPLEMENTED(!patch_proof_IAT); patch_proof_overlap = !loader && patching_code && /* even if it is not the loader we protect IAT sections only */ (!patching_IAT || patch_proof_IAT); LOG(THREAD, LOG_VMAREAS, 1, "patch proof module "PFX"-"PFX" modified %s, by %s,%s=>%s\n", base, base+size, patching_code ? "code!" : "data --ok", loader ? "loader --ok" : patching_code ? "hooker!" : "loader or hooker", patching_IAT ? "IAT hooker" : "patching!", patch_proof_overlap ? "SQUASH" : "allow"); /* curiosly the loader modifies the .reloc section of Dell\QuickSet\dadkeyb.dll */ } #endif /* defined(PROGRAM_SHEPHERDING) && defined(WINDOWS) */ /* FIXME: case 6622 IAT hooking should be controlled separately, * note that when it is not protecting all IAT areas - exemptions * tracked by module name there may have to handle two different * cases. If making sure a particular DLL is always using the * real exports current implementation above will work. Yet in * the use case of avoiding a particular IAT hooker replacing * imports from kernel32, _all_ modules will have to be pretend * writable. xref case 1948 for tracking read/written values */ if (dr_overlap || system_overlap || patch_proof_overlap) { uint how_handle; const char *target_area_name; /* FIXME: separate this in a function */ if (dr_overlap) { how_handle = DYNAMO_OPTION(handle_DR_modify); STATS_INC(app_modify_DR_prot); target_area_name = PRODUCT_NAME; } else if (system_overlap) { ASSERT(system_overlap); how_handle = DYNAMO_OPTION(handle_ntdll_modify); STATS_INC(app_modify_ntdll_prot); target_area_name = "system"; } else { ASSERT(patch_proof_overlap); target_area_name = "module"; how_handle = DR_MODIFY_NOP; /* use pretend writable */ STATS_INC(app_modify_module_prot); } /* we can't be both pretend writable and emulate write */ ASSERT(!vmvector_overlap(emulate_write_areas, base, base+size)); if (how_handle == DR_MODIFY_HALT) { /* Until we've fixed our DR area list problems and gotten shim.dll to work, * we will issue an unrecoverable error */ report_dynamorio_problem(dcontext, DUMPCORE_SECURITY_VIOLATION, NULL, NULL, "Application changing protections of " "%s memory @"PFX"-"PFX, target_area_name, base, base+size); /* FIXME: walking the loader data structures at arbitrary * points is dangerous due to data races with other threads * -- see is_module_being_initialized and get_module_name */ check_for_unsupported_modules(); os_terminate(dcontext, TERMINATE_PROCESS); ASSERT_NOT_REACHED(); } else { SYSLOG_INTERNAL_WARNING_ONCE("Application changing protections of " "%s memory at least once ("PFX"-"PFX")", target_area_name, base, base+size); if (how_handle == DR_MODIFY_NOP) { /* we use a separate list, rather than a flag on DR areas, as the * affected region could include non-DR memory */ /* FIXME: note that we do not intersect with a concrete * region that we want to protect - considering Win32 * protection changes allowed only separately * allocated regions this may be ok. If we want to * have subpage regions then it becomes an issue: * we'd have to be able to emulate a write on a * page that has pretend writable regions. * For now we ensure pretend_writable_areas is always page-aligned. */ app_pc page_base; size_t page_size; ASSERT_CURIOSITY(ALIGNED(base, PAGE_SIZE)); ASSERT_CURIOSITY(ALIGNED(size, PAGE_SIZE)); page_base = (app_pc) PAGE_START(base); page_size = ALIGN_FORWARD(base + size, PAGE_SIZE) - (size_t)page_base; write_lock(&pretend_writable_areas->lock); if (TEST(MEMPROT_WRITE, prot)) { LOG(THREAD, LOG_VMAREAS, 2, "adding pretend-writable region "PFX"-"PFX"\n", page_base, page_base+page_size); add_vm_area(pretend_writable_areas, page_base, page_base+page_size, true, 0, NULL _IF_DEBUG("DR_MODIFY_NOP")); } else { LOG(THREAD, LOG_VMAREAS, 2, "removing pretend-writable region "PFX"-"PFX"\n", page_base, page_base+page_size); remove_vm_area(pretend_writable_areas, page_base, page_base+page_size, false); } write_unlock(&pretend_writable_areas->lock); LOG(THREAD, LOG_VMAREAS, 2, "turning system call into a nop\n"); if (old_memprot != NULL) { /* FIXME: case 10437 we should keep track of any previous values */ if (!get_memory_info(base, NULL, NULL, old_memprot)) { /* FIXME: should we fail instead of feigning success? */ ASSERT_CURIOSITY(false && "prot change nop should fail"); *old_memprot = MEMPROT_NONE; } } return PRETEND_APP_MEM_PROT_CHANGE; /* have syscall be a nop! */ } else if (how_handle == DR_MODIFY_FAIL) { /* not the default b/c hooks that target our DLL often ignore the return * code of the syscall and blindly write, failing on the write fault. */ LOG(THREAD, LOG_VMAREAS, 2, "turning system call into a failure\n"); return FAIL_APP_MEM_PROT_CHANGE; /* have syscall fail! */ } else if (how_handle == DR_MODIFY_ALLOW) { LOG(THREAD, LOG_VMAREAS, 2, "ALLOWING system call!\n"); /* continue down below */ } } ASSERT(how_handle == DR_MODIFY_ALLOW); } /* DR areas may have changed, but we still have to remove from pretend list */ if (USING_PRETEND_WRITABLE() && !TEST(MEMPROT_WRITE, prot) && pretend_writable_vm_area_overlap(base, base+size)) { ASSERT_NOT_TESTED(); /* FIXME: again we have the race -- if we could go from read to write * it would be a simple fix, else have to grab write up front, or check again */ write_lock(&pretend_writable_areas->lock); LOG(THREAD, LOG_VMAREAS, 2, "removing pretend-writable region "PFX"-"PFX"\n", base, base+size); remove_vm_area(pretend_writable_areas, base, base+size, false); write_unlock(&pretend_writable_areas->lock); } #ifdef PROGRAM_SHEPHERDING if (USING_FUTURE_EXEC_LIST && futureexec_vm_area_overlap(base, base + size)) { /* something changed */ if (!TEST(MEMPROT_EXEC, prot)) { /* we DO remove future regions just b/c they're now marked non-x * but we may want to re-consider this -- some hooks briefly go to rw, e.g. * although we MUST do this for executable_if_exec * we should add flags to future areas indicating which policy put it here * (have to not merge different policies, I guess -- problematic for * sub-page flush combined w/ other policies?) */ DEBUG_DECLARE(bool ok =) remove_futureexec_vm_area(base, base + size); ASSERT(ok); LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "future region "PFX"-"PFX" is being made non-x, removing\n", base, base + size); } else { /* Maybe nothing is changed in fact. */ /* In fact this happens when a protection size larger than * necessary for a hook leaves some pages on the * futureexec_vm_area_overlap region (case 2871 for a two * page hooker). There is nothing to do here, * executable_if_hook should re-add the pages. */ /* case 3279 - probably similar behaviour -- when a second * NOP memory protection change happens to a region * already on the future list - we'd need to power it up * again */ /* xref case 3102 - where we don't care about VM_WRITABLE */ #if 0 /* this syslog may causes services.exe to hang (ref case 666) */ SYSLOG_INTERNAL_WARNING("future executable area overlapping with "PFX"-"PFX" made %s", base, base + size, memprot_string(prot)); #endif } } #endif #if defined(PROGRAM_SHEPHERDING) && defined(WINDOWS) /* Just remove up front if changing anything about an emulation region. * Should certainly remove if becoming -w, but should also remove if * being added to exec list -- current usage expects to be removed on * next protection change (hooker restoring IAT privileges). * FIXME: should make the ->rx restoration syscall a NOP for performance */ if (DYNAMO_OPTION(emulate_IAT_writes) && !vmvector_empty(emulate_write_areas) && vmvector_overlap(emulate_write_areas, base, base+size)) { LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 2, "removing emulation region "PFX"-"PFX"\n", base, base+size); vmvector_remove(emulate_write_areas, base, base+size); } #endif #ifndef PROGRAM_SHEPHERDING if (!INTERNAL_OPTION(hw_cache_consistency)) return DO_APP_MEM_PROT_CHANGE; /* let syscall go through */ #endif /* look for calls making code writable! * cache is_executable here w/o holding lock -- if decide to perform state * change via flushing, we'll re-check overlap there and all will be atomic * at that point, no reason to try and make atomic from here, will hit * deadlock issues w/ thread_initexit_lock */ is_executable = executable_vm_area_overlap(base, base + size, false/*have no lock*/); if (is_executable && TEST(MEMPROT_WRITE, prot) && !TEST(MEMPROT_EXEC, prot) && INTERNAL_OPTION(hw_cache_consistency)) { #ifdef WINDOWS app_pc IAT_start, IAT_end; /* Could not page-align and ask for original params but some hookers * page-align even when targeting only IAT */ bool is_iat = is_IAT(base, base+size, true/*page-align*/, &IAT_start, &IAT_end); bool is_patch = is_module_patch_region(dcontext, base, base+size, true/*be conservative*/); DOSTATS({ if (is_iat && is_patch) STATS_INC(num_app_rebinds); }); #ifdef PROGRAM_SHEPHERDING /* This potentially unsafe option is superseded by -coarse_merge_iat * FIXME: this should be available for !PROGRAM_SHEPHERDING */ if (DYNAMO_OPTION(unsafe_ignore_IAT_writes) && is_iat && is_patch) { /* do nothing: let go writable and then come back */ LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "WARNING: letting IAT be written w/o flushing: potentially unsafe\n"); return DO_APP_MEM_PROT_CHANGE; /* let syscall go through */ } #endif /* Case 11072: must match these conditions w/ the assert on freeing */ if (DYNAMO_OPTION(coarse_units) && DYNAMO_OPTION(coarse_merge_iat) && # ifdef PROGRAM_SHEPHERDING /* Ensure we'll re-mark as valid */ (DYNAMO_OPTION(executable_if_rx_text) || DYNAMO_OPTION(executable_after_load)) && # endif is_iat && is_patch && !executable_vm_area_executed_from(IAT_start, IAT_end) && /* case 10830/11072: ensure currently marked coarse-grain to avoid * blessing the IAT region as coarse when it was in fact made non-coarse * due to a rebase (or anything else) prior to a rebind. check the end, * since we may have adjusted the exec area bounds to be post-IAT. */ get_executable_area_flags(base+size-1, &frag_flags) && TEST(FRAG_COARSE_GRAIN, frag_flags)) { coarse_info_t *info = get_coarse_info_internal(IAT_end, false/*no init*/, false/*no lock*/); /* loader rebinding * We cmp and free the stored code at +rx time; if that doesn't happen, * we free at module unload time. */ DEBUG_DECLARE(bool success =) os_module_store_IAT_code(base); ASSERT(success); ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); /* FRAG_COARSE_GRAIN excludes */ LOG(GLOBAL, LOG_VMAREAS, 2, "storing IAT code for "PFX"-"PFX"\n", IAT_start, IAT_end); if (info != NULL) { /* Only expect to do this for empty or persisted units */ ASSERT(info->cache == NULL || (info->persisted && info->non_frozen != NULL && info->non_frozen->cache == NULL)); /* Do not reset/free during flush as we hope to see a validating * event soon. */ ASSERT(!TEST(PERSCACHE_CODE_INVALID, info->flags)); info->flags |= PERSCACHE_CODE_INVALID; STATS_INC(coarse_marked_invalid); } } # ifdef PROGRAM_SHEPHERDING if (DYNAMO_OPTION(emulate_IAT_writes) && is_iat && /* We do NOT want to emulate hundreds of writes by the loader -- we * assume no other thread will execute in the module until it's * initialized. We only need our emulation for hookers who come in * after initialization when another thread may be in there. */ !is_patch) { /* To avoid having the IAT page (which often includes the start of the * text section) off the exec areas list, we only remove the IAT itself, * and emulate writes to it. * FIXME: perhaps this should become an IAT-only vector, and be used * for when we have the IAT read-only to protect it security-wise. */ /* unfortunately we have to flush to be conservative */ should_finish_flushing = flush_and_remove_executable_vm_area(dcontext, IAT_start, IAT_end - IAT_start); /* a write to IAT gets emulated, but to elsewhere on page is a code mod */ vmvector_add(emulate_write_areas, IAT_start, IAT_end, NULL); /* must release the exec areas lock, even if expect no flush */ if (should_finish_flushing) { flush_fragments_in_region_finish(dcontext, false /*don't keep initexit_lock*/); } LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "executable region == IAT so not marking %s, emulating writes\n", memprot_string(prot)); /* now leave as read-only. * we do not record what other flags they're using here -- we assume * they're going to restore IAT back to what it was */ /* FIXME: case 10437 we should keep track of any previous values */ if (old_memprot != NULL) { if (!get_memory_info(base, NULL, NULL, old_memprot)) { /* FIXME: should we fail instead of feigning success? */ ASSERT_CURIOSITY(false && "prot change nop should fail"); *old_memprot = MEMPROT_NONE; } } return PRETEND_APP_MEM_PROT_CHANGE; } # endif /* PROGRAM_SHEPHERDING */ #endif /* WINDOWS */ /* being made writable but non-executable! * kill all current fragments in the region (since a * non-executable region is ignored by flush routine) */ LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "WARNING: executable region being made writable and non-executable\n"); flush_fragments_and_remove_region(dcontext, base, size, false /* don't own initexit_lock */, false /* case 2236: keep futures */); #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hotp_only)) hotp_only_mem_prot_change(base, size, true, false); #endif } else if (is_executable && TESTALL(MEMPROT_WRITE | MEMPROT_EXEC, prot) && INTERNAL_OPTION(hw_cache_consistency)) { /* Need to flush all fragments in [base, base+size), unless * they are ALL already writable */ DOSTATS({ /* If all the overlapping executable areas are VM_WRITABLE| * VM_DELAY_READONLY then we could optimize away the flush since * we haven't made any portion of this region read only for * consistency purposes. We haven't implemented this optimization * as it's quite rare (though does happen xref case 8104) and * previous implementations of this optimization proved buggy. */ if (is_executable_area_writable_overlap(base, base + size, true /* ALL regions are: */, VM_WRITABLE|VM_DELAY_READONLY)) { STATS_INC(num_possible_app_to_rwx_skip_flush); } }); /* executable region being made writable * flush all current fragments, and mark as non-executable */ LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "WARNING: executable region "PFX"-"PFX" is being made writable!\n" "\tRemoving from executable list\n", base, base + size); /* use two-part flush to make futureexec & exec changes atomic w/ flush */ should_finish_flushing = flush_and_remove_executable_vm_area(dcontext, base, size); /* we flush_fragments_finish after security checks to keep them atomic */ } else if (is_executable && is_executable_area_writable(base) && !TEST(MEMPROT_WRITE, prot) && TEST(MEMPROT_EXEC, prot) && INTERNAL_OPTION(hw_cache_consistency)) { /* executable & writable region being made read-only * make sure any future write faults are given to app, not us */ LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "executable writable region "PFX"-"PFX" => read-only!\n", base, base + size); /* remove writable exec area, then add read-only exec area */ /* use two-part flush to make futureexec & exec changes atomic w/ flush */ should_finish_flushing = flush_and_remove_executable_vm_area(dcontext, base, size); /* FIXME: this is wrong -- this will make all pieces in the middle executable, * which is not what we want -- we want all pieces ON THE EXEC LIST to * change from rw to r. thus this should be like the change-to-selfmod case * in handle_modified_code => add new vector routine? (case 3570) */ add_executable_vm_area(base, base + size, 0 /* not image? FIXME */, 0, should_finish_flushing/* own lock if flushed */ _IF_DEBUG("protection change")); } /* also look for calls making data executable * FIXME: perhaps should do a write_keep for this is_executable, to bind * to the subsequent exec areas changes -- though case 2833 would still be there */ else if (!is_executable && TEST(MEMPROT_EXEC, prot) && INTERNAL_OPTION(hw_cache_consistency)) { if (TEST(MEMPROT_WRITE, prot)) { /* do NOT add to executable list if writable */ LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "WARNING: data region "PFX"-"PFX" made executable and " "writable, not adding to exec list\n", base, base + size); } else { bool add_to_exec_list = false; #ifdef WINDOWS bool check_iat = false; bool free_iat = false; #endif uint frag_flags = 0; DEBUG_DECLARE(const char *comment = "";) LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "WARNING: data region "PFX"-"PFX" is being made executable\n", base, base+size); #ifdef PROGRAM_SHEPHERDING /* if on future, no reason to add to exec list now * if once-only, no reason to add to exec list and remove from future * wait until actually executed! */ /* none of our policies allow this on the stack */ if (is_address_on_stack(dcontext, base)) { LOG(THREAD, LOG_VMAREAS, 2, "not allowing data->x for stack region\n"); # ifdef WINDOWS } else if (DYNAMO_OPTION(executable_after_load) && is_module_patch_region(dcontext, base, base+size, false/*be liberal: can't miss loader*/)) { STATS_INC(num_mark_after_load); add_to_exec_list = true; check_iat = true; DODEBUG({ comment = "if_after_load"; }); LOG(THREAD, LOG_VMAREAS, 2, "module is being initialized, adding region to executable list\n"); # endif } else if (DYNAMO_OPTION(executable_if_rx_text)) { /* FIXME: this should be moved out of the if (!executable) branch? * to where executable_if_x is handled */ /* NOTE - xref case 10526, the check here is insufficient to implement * this policy because [*base, *base+*size) could overlap multiple * sections (some of which might not be code) which would cause this * check to fail. Fixing this here would require us to find the * intersection of this region and any code section(s) and add the * resulting region(s) (there could be more then one). Instead we leave * this check here to catch the common case but extend * check_origins_helper to catch anything unusual. */ app_pc modbase = get_module_base(base); if (modbase != NULL && is_range_in_code_section(modbase, base, base+size, NULL, NULL)) { STATS_INC(num_2rx_text); add_to_exec_list = true; IF_WINDOWS(check_iat = true;) DODEBUG({ comment = "if_rx_text"; }); LOG(THREAD, LOG_VMAREAS, 2, "adding code region being marked rx to executable list\n"); } } /* Don't use an else if here, the else if for * -executable_if_rx_text if doesn't check all its * conditionals in the first if */ if (DYNAMO_OPTION(executable_if_rx)) { STATS_INC(num_mark_if_rx); add_to_exec_list = true; mark_module_exempted(base); DODEBUG({ comment = "if_rx"; }); LOG(THREAD, LOG_VMAREAS, 2, "adding region marked only rx " "to executable list\n"); } #else add_to_exec_list = true; IF_WINDOWS(check_iat = true;) #endif #ifdef WINDOWS if (check_iat) { if (DYNAMO_OPTION(coarse_units) && DYNAMO_OPTION(coarse_merge_iat) && is_IAT(base, base+size, true/*page-align*/, NULL, NULL)) free_iat = true; LOG(THREAD, LOG_VMAREAS, 2, ".text or IAT is being made rx again "PFX"-"PFX"\n", base, base+size); if (!RUNNING_WITHOUT_CODE_CACHE()) { /* case 8640: let add_executable_vm_area() decide whether to * keep the coarse-grain flag */ frag_flags |= FRAG_COARSE_GRAIN; } else { free_iat = false; ASSERT(!os_module_free_IAT_code(base)); } } #endif if (add_to_exec_list) { /* FIXME : see note at top of function about bug 2833 */ ASSERT(!TEST(MEMPROT_WRITE, prot)); /* sanity check */ add_executable_vm_area(base, base + size, 0 /* not an unmodified image */, frag_flags, false/*no lock*/ _IF_DEBUG(comment)); } #ifdef WINDOWS if (free_iat) { DEBUG_DECLARE(bool had_iat =) os_module_free_IAT_code(base); DEBUG_DECLARE(app_pc text_start;) DEBUG_DECLARE(app_pc text_end;) DEBUG_DECLARE(app_pc iat_start = NULL;) DEBUG_DECLARE(app_pc iat_end = NULL;) /* calculate IAT bounds */ ASSERT(is_IAT(base, base+size, true/*page-align*/, &iat_start, &iat_end)); ASSERT(had_iat || /* duplicate the reasons we wouldn't have stored the IAT: */ !is_module_patch_region(dcontext, base, base+size, true/*be conservative*/) || executable_vm_area_executed_from(iat_start, iat_end) || /* case 11072: rebase prior to rebind prevents IAT storage */ (get_module_preferred_base_delta(base) != 0 && is_in_code_section(get_module_base(base), base, &text_start, &text_end) && iat_start >= text_start && iat_end <= text_end)); } #endif #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hotp_only)) hotp_only_mem_prot_change(base, size, false, true); #endif } } #ifdef PROGRAM_SHEPHERDING /* These policies do not depend on a transition taking place. */ /* Make sure weaker policies are considered first, so that * the region is kept on the futureexec list with the least restrictions */ if (DYNAMO_OPTION(executable_if_x) && TEST(MEMPROT_EXEC, prot)) { /* The executable_if_x policy considers all code marked ..x to be executable */ /* Note that executable_if_rx may have added a region directly * to the executable_areas, while here we only add to the futureexec_areas * FIXME: move executable_if_rx checks as an 'else if' following this if. */ LOG(GLOBAL, LOG_VMAREAS, 1, "New future region b/c x, "PFX"-"PFX" %s, was %sexecutable\n", base, base+size, memprot_string(prot), is_executable ? "" : "not "); STATS_INC(num_mark_if_x); add_futureexec_vm_area(base, base+size, false/*permanent*/ _IF_DEBUG(TEST(MEMPROT_WRITE, prot) ? "executable_if_x protect exec .wx" : "executable_if_x protect exec .-x" )); mark_module_exempted(base); } else if (DYNAMO_OPTION(executable_if_hook) && TESTALL(MEMPROT_WRITE | MEMPROT_EXEC, prot)) { /* Note here we're strict in requesting a .WX setting by the * hooker, won't be surprising if some don't do even this */ /* FIXME: could restrict to sub-page piece of text section, * since should only be targeting 4 or 5 byte area */ app_pc modbase = get_module_base(base); if (modbase != NULL) { /* PE, and is readable */ /* FIXME - xref case 10526, if the base - base+size overlaps more than * one section then this policy won't apply, though not clear if we'd want * it to for such an unusual hooker. */ if (is_range_in_code_section(modbase, base, base+size, NULL, NULL)) { uint vm_flags; DOLOG(2, LOG_INTERP|LOG_VMAREAS, { char modname[MAX_MODNAME_INTERNAL]; os_get_module_name_buf(modbase, modname, BUFFER_SIZE_ELEMENTS(modname)); LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "adding hook to future list: "PFX"-"PFX" in code of " "module @"PFX" == %s made rwx\n", base, base+size, modbase, modname == NULL? "<invalid name>" : modname); }); STATS_INC(num_hook); /* add as a once-only future area */ add_futureexec_vm_area(base, base + size, true/*once-only*/ _IF_DEBUG(memprot_string(prot))); /* This is text section, leave area on executable list * so app can execute here, write, and then execute * again (via future list) to handle cases of hooking * kernel32 functions ref case 2803 and case 3097 */ if (!should_finish_flushing) { /* FIXME: as a quick fix we flush the existing area * just in case anyways, so that we don't think about * merging properly the FRAG_DYNGEN */ should_finish_flushing = flush_and_remove_executable_vm_area(dcontext, base, size); } /* FIXME: we could optimize away the VM_DELAY_READONLY * path if we actually knew that the current real * protection flag is not writable. Yet we've removed * any internal data about it, so we need * restructuring or an extra system call here vs the * safe one at make_unwritable(). * * case 8308: Don't mark as DELAY_READONLY if -sandbox_writable * is on. We don't need to check for -sandbox_non_text here * since we know we're in a text region here. */ vm_flags = VM_WRITABLE; if (!DYNAMO_OPTION(sandbox_writable)) vm_flags |= VM_DELAY_READONLY; add_executable_vm_area(base, base + size, vm_flags, 0, should_finish_flushing/* own the lock if we have flushed */ _IF_DEBUG("prot chg text rx->rwx not yet written")); /* leave read only since we are leaving on exec list */ if (should_finish_flushing) { flush_fragments_in_region_finish(dcontext, false /*don't keep initexit_lock*/); } if (old_memprot != NULL) { /* FIXME: case 10437 we should keep track of any previous values */ if (!get_memory_info(base, NULL, NULL, old_memprot)) { /* FIXME: should we fail instead of feigning success? */ ASSERT_CURIOSITY(false && "prot change nop should fail"); *old_memprot = MEMPROT_NONE; } } /* case 10387 initial fix - on a DEP machine to * support properly native execution we must set the X * bit: most needed for -hotp_only when we provide our * code origins policies for GBOP enforcement, but * similar need in native_exec or other possible mixed * modes. */ /* We really should be setting everything according to * app request except for writability. Hopefully we * don't have sophisticated hookers using PAGE_GUARD * so ok to use only the memprot supported flags. */ prot &= ~MEMPROT_WRITE; ASSERT_CURIOSITY(TESTALL(MEMPROT_READ|MEMPROT_EXEC, prot)); *new_memprot = prot; return SUBSET_APP_MEM_PROT_CHANGE; } } } #endif /* PROGRAM_SHEPHERDING */ if (should_finish_flushing) { flush_fragments_in_region_finish(dcontext, false /*don't keep initexit_lock*/); if (DYNAMO_OPTION(opt_jit) && is_jit_managed_area(base)) jitopt_clear_span(base, base+size); } return DO_APP_MEM_PROT_CHANGE; /* let syscall go through */ } #ifdef WINDOWS /* memory region base:base+size was flushed from hardware icache by app */ void app_memory_flush(dcontext_t *dcontext, app_pc base, size_t size, uint prot) { # ifdef PROGRAM_SHEPHERDING if (DYNAMO_OPTION(executable_if_flush)) { /* We want to ignore the loader calling flush, since our current * impl makes a flush region permanently executable. * The loader always follows the order "rw, rx, flush", but we have * seen real DGC marking rx before flushing as well, so we use * our module-being-loaded test: */ if (!is_module_patch_region(dcontext, base, base+size, false/*be liberal: don't miss loader*/)) { /* FIXME case 280: we'd like to always be once-only, but writes * to data on the same page make it hard to do that. */ bool onceonly = false; /* we do NOT go to page boundaries, instead we put sub-page * regions on our future list */ LOG(GLOBAL, LOG_VMAREAS, 1, "New future exec region b/c flushed: "PFX"-"PFX" %s\n", base, base+size, memprot_string(prot)); if (!DYNAMO_OPTION(selfmod_futureexec) && is_executable_area_on_all_selfmod_pages(base, base+size)) { /* for selfmod we can be onceonly, as writes to data on the * same page won't kick us off the executable list */ onceonly = true; } add_futureexec_vm_area(base, base + size, onceonly _IF_DEBUG("NtFlushInstructionCache")); if (DYNAMO_OPTION(xdata_rct)) { /* FIXME: for now we only care about start pc */ vmvector_add(app_flushed_areas, base, base+1, NULL); /* FIXME: remove when region de-allocated? */ } DOSTATS({ if (is_executable_area_writable(base)) STATS_INC(num_NT_flush_w2r); /* pretend writable (we made RO) */ if (TEST(MEMPROT_WRITE, prot)) STATS_INC(num_NT_flush_w); else STATS_INC(num_NT_flush_r); if (is_address_on_stack(dcontext, base)) { STATS_INC(num_NT_flush_stack); } else { STATS_INC(num_NT_flush_heap); } }); } else { LOG(THREAD, LOG_VMAREAS, 1, "module is being loaded, ignoring flush\n"); STATS_INC(num_NT_flush_loader); } } # else /* NOP */ # endif /* PROGRAM_SHEPHERDING */ } # ifdef PROGRAM_SHEPHERDING bool was_address_flush_start(dcontext_t *dcontext, app_pc pc) { ASSERT(DYNAMO_OPTION(xdata_rct)); /* FIXME: once we have flags marking where each futureexec region * came from we can distinguish NtFlush, but for now we need our own list, * which as FIXME above says could be simply htable since we only care about * start_pc (for now). * We assume we only add start pcs to the vector. */ return vmvector_overlap(app_flushed_areas, pc, pc + 1); } # endif #endif /****************************************************************************/ /* a helper function for check_thread_vm_area * assumes caller owns executable_areas write lock */ static void handle_delay_readonly(dcontext_t *dcontext, app_pc pc, vm_area_t *area) { ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock); ASSERT(TESTALL(VM_DELAY_READONLY|VM_WRITABLE, area->vm_flags)); /* should never get a selfmod region here, to be marked selfmod * would already have had to execute (to get faulting write) * so region would already have had to go through here */ ASSERT(!TEST(FRAG_SELFMOD_SANDBOXED, area->frag_flags)); if (!is_on_stack(dcontext, pc, NULL) && INTERNAL_OPTION(hw_cache_consistency)) { vm_make_unwritable(area->start, area->end - area->start); area->vm_flags |= VM_MADE_READONLY; } else { /* this could happen if app changed mem protection on its * stack that triggered us adding a delay_readonly writable * region to the executable list in * app_memory_protection_change() */ ASSERT_CURIOSITY(false); area->frag_flags |= FRAG_SELFMOD_SANDBOXED; } area->vm_flags &= ~VM_DELAY_READONLY; LOG(GLOBAL, LOG_VMAREAS, 2, "\tMarking existing wx vm_area_t ro for consistency, " "area "PFX" - "PFX", target pc "PFX"\n", area->start, area->end, pc); STATS_INC(num_delayed_rw2r); } /* Frees resources acquired in check_thread_vm_area(). * data and vmlist need to match those used in check_thread_vm_area(). * abort indicates that we are forging and exception or killing a thread * or some other drastic action that will not return to the caller * of check_thread_vm_area. * own_execareas_writelock indicates whether the executable_areas * write lock is currently held, while caller_execareas_writelock * indicates whether the caller held that lock and thus we should not * free it unless we're aborting. * If both clean_bb and abort are true, calls bb_build_abort. */ static void check_thread_vm_area_cleanup(dcontext_t *dcontext, bool abort, bool clean_bb, thread_data_t *data, void **vmlist, bool own_execareas_writelock, bool caller_execareas_writelock) { if (own_execareas_writelock && (!caller_execareas_writelock || abort)) { ASSERT(self_owns_write_lock(&executable_areas->lock)); write_unlock(&executable_areas->lock); #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) { ASSERT(self_owns_write_lock(hotp_get_lock())); write_unlock(hotp_get_lock()); } #endif } ASSERT(!caller_execareas_writelock || self_owns_write_lock(&executable_areas->lock)); /* FIXME: could we have multiply-nested vmlist==NULL where we'd need to * release read lock more than once? */ if (vmlist == NULL) SHARED_VECTOR_RWLOCK(&data->areas, read, unlock); if (self_owns_write_lock(&data->areas.lock) && (vmlist != NULL || abort)) { /* Case 9376: we can forge an exception for vmlist==NULL, in which case * we must release the write lock from the prior layer; * we can also have a decode fault with vmlist!=NULL but w/o holding * the vm areas lock. */ SHARED_VECTOR_RWLOCK(&data->areas, write, unlock); } /* we need to not unlock vmareas for nested check_thread_vm_area() call */ if (abort) { if (vmlist != NULL && *vmlist != NULL) { vm_area_destroy_list(dcontext, *vmlist); } if (clean_bb) { /* clean up bb_building_lock and IR */ bb_build_abort(dcontext, false/*don't call back*/, true/*unlock*/); } } } /* Releases any held locks. Up to caller to free vmlist. * Flags are reverse logic, just like for check_thread_vm_area() */ void check_thread_vm_area_abort(dcontext_t *dcontext, void **vmlist, uint flags) { thread_data_t *data; if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_SHARED, flags)) { /* yes, reverse logic, see comment above */ data = shared_data; } else { data = (thread_data_t *) dcontext->vm_areas_field; } check_thread_vm_area_cleanup(dcontext, true, false/*caller takes care of bb*/, data, vmlist, self_owns_write_lock(&executable_areas->lock), self_owns_write_lock(&data->areas.lock)); } static bool allow_xfer_for_frag_flags(dcontext_t *dcontext, app_pc pc, uint src_flags, uint tgt_flags) { /* the flags we don't allow a direct cti to bridge if different */ const uint frag_flags_cmp = FRAG_SELFMOD_SANDBOXED | FRAG_COARSE_GRAIN #ifdef PROGRAM_SHEPHERDING | FRAG_DYNGEN #endif ; uint src_cmp = src_flags & frag_flags_cmp; uint tgt_cmp = tgt_flags & frag_flags_cmp; bool allow = (src_cmp == tgt_cmp) || /* Case 8917: hack to allow elision of call* to vsyscall-in-ntdll, * while still ruling out fine fragments coming in to coarse regions * (where we'd rather stop the fine and build a (cheaper) coarse bb). * Use == instead of TEST to rule out any other funny flags. */ (src_cmp == 0 /* we removed FRAG_COARSE_GRAIN to make this fine */ && tgt_cmp == FRAG_COARSE_GRAIN /* still in coarse region though */ && TEST(FRAG_HAS_SYSCALL, src_flags)); if (TEST(FRAG_COARSE_GRAIN, src_flags)) { /* FIXME case 8606: we can allow intra-module xfers but we have no * way of checking here -- would have to check in * interp.c:check_new_page_jmp(). So for now we disallow all xfers. * If our regions match modules exactly we shouldn't see any * intra-module direct xfers anyway. */ /* N.B.: ibl entry removal (case 9636) assumes coarse fragments * stay bounded within contiguous FRAG_COARSE_GRAIN regions */ allow = false; } if (!allow) { LOG(THREAD, LOG_VMAREAS, 3, "change in vm area flags (0x%08x vs. 0x%08x %d): " "stopping at "PFX"\n", src_flags, tgt_flags, TEST(FRAG_COARSE_GRAIN, src_flags), pc); DOSTATS({ if (TEST(FRAG_COARSE_GRAIN, tgt_flags)) STATS_INC(elisions_prevented_for_coarse); }); } return allow; } /* check origins of code for several purposes: * 1) we need list of areas where this thread's fragments come * from, for faster flushing on munmaps * 2) also for faster flushing, each vmarea has a list of fragments * 3) we need to mark as read-only any writable region that * has a fragment come from it, to handle self-modifying code * 4) for PROGRAM_SHEPHERDING for security * * We keep a list of vm areas per thread, to make flushing fragments * due to memory unmaps faster * This routine adds the page containing start to the thread's list. * Adds any FRAG_ flags relevant for a fragment overlapping start's page. * If xfer and encounters change in vmareas flags, returns false and does NOT * add the new page to the list for this fragment -- assumes caller will NOT add * it to the current bb. This allows for selectively not following direct ctis. * Assumes only building a real app bb if vmlist!=NULL -- assumes that otherwise * caller is reconstructing an app bb or some other secondary bb walk. * If returns true, returns in the optional stop OUT parameter the final pc of * this region (open-ended). */ bool check_thread_vm_area(dcontext_t *dcontext, app_pc pc, app_pc tag, void **vmlist, uint *flags, app_pc *stop, bool xfer) { bool result; thread_data_t *data; bool in_last = false; uint frag_flags = 0; uint vm_flags = 0; bool ok; bool shared_to_private = false; /* used for new area */ app_pc base_pc = 0; size_t size = 0; /* set only for unknown areas */ uint prot = 0; /* set only for unknown areas */ /* both area and local_area either point to thread-local vector, for which * we do not need a lock, or to a shared area, for which we hold * a read or a write lock (either is sufficient) the entire time */ vm_area_t *area = NULL; vm_area_t *local_area = NULL; /* entry for this thread */ vm_area_t area_copy; /* local copy, so can let go of lock */ /* we can be recursively called (check_origins() calling build_app_bb_ilist()) * so make sure we don't re-try to get a lock we already hold */ bool caller_execareas_writelock = self_owns_write_lock(&executable_areas->lock); bool own_execareas_writelock = caller_execareas_writelock; DEBUG_DECLARE(const char *new_area_prefix;) /* deadlock issues if write lock is held already for vmlist!=NULL case */ ASSERT(vmlist == NULL || !caller_execareas_writelock); #ifdef HOT_PATCHING_INTERFACE /* hotp_vul_table_lock goes hand in hand w/ executable_areas lock here */ ASSERT(!DYNAMO_OPTION(hot_patching) || (own_execareas_writelock && self_owns_write_lock(hotp_get_lock())) || (!own_execareas_writelock && !self_owns_write_lock(hotp_get_lock()))); #endif ASSERT(flags != NULL); /* don't know yet whether this bb will be shared, but a good chance, * so we guess shared and will rectify later. * later, to add to local instead, we call again, and to tell the difference * we perversely pass FRAG_SHARED */ if (DYNAMO_OPTION(shared_bbs) && /* for TEMP_PRIVATE we make private up front */ !TEST(FRAG_TEMP_PRIVATE, *flags) && !TEST(FRAG_SHARED, *flags)) { /* yes, reverse logic, see comment above */ data = shared_data; DODEBUG({new_area_prefix = "new shared vm area: ";}); if (vmlist == NULL) { /* not making any state changes to vm lists */ /* need read access only, for lookup and holding ptr into vector */ SHARED_VECTOR_RWLOCK(&data->areas, read, lock); } else { /* building a bb */ /* need write access later, and want our lookup to be bundled * with our writes so we don't rely on the bb building lock, * so we grab the write lock for the whole routine */ SHARED_VECTOR_RWLOCK(&data->areas, write, lock); } } else { DODEBUG({new_area_prefix = "new vm area for thread: ";}); data = (thread_data_t *) dcontext->vm_areas_field; if (DYNAMO_OPTION(shared_bbs) && TEST(FRAG_SHARED, *flags)) shared_to_private = true; } LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 4, "check_thread_vm_area: pc = "PFX"\n", pc); /* no lock on data->areas needed if thread-local, * if shared we grabbed either read or write lock above */ /* check cached last area first to avoid lookup cost */ if (data->last_area != NULL) in_last = (pc < data->last_area->end && data->last_area->start <= pc); DOSTATS({ STATS_INC(checked_addresses); if (in_last) STATS_INC(looked_up_in_last_area); }); if (in_last) { local_area = data->last_area; area = local_area; } else if (lookup_addr(&data->areas, pc, &local_area)) { /* ok to hold onto pointer since it's this thread's */ area = local_area; } else { /* not in this thread's current executable list * try the global executable area list */ #ifdef LINUX /* i#1760: an app module loaded by custom loader (e.g., bionic libc) * might not be detected by DynamoRIO in process_mmap, so we check * whether it is an unseen module here. */ os_check_new_app_module(dcontext, pc); #endif #ifdef CLIENT_INTERFACE /* i#884: module load event is now on first execution */ instrument_module_load_trigger(pc); #endif if (!own_execareas_writelock) read_lock(&executable_areas->lock); ok = lookup_addr(executable_areas, pc, &area); if (ok && TEST(VM_DELAY_READONLY, area->vm_flags)) { /* need to mark region read only for consistency * need to upgrade to write lock, have to release lock first * then recheck conditions after grabbing hotp + write lock */ if (!own_execareas_writelock) { read_unlock(&executable_areas->lock); #ifdef HOT_PATCHING_INTERFACE /* Case 8780: due to lock rank issues we must grab the hotp lock * prior to the exec areas lock, as the hotp lock may be needed * for pc recreation in check_origins(). We assume this will * not cause noticeable lock contention. */ if (DYNAMO_OPTION(hot_patching)) write_lock(hotp_get_lock()); #endif write_lock(&executable_areas->lock); own_execareas_writelock = true; ok = lookup_addr(executable_areas, pc, &area); } if (ok && TEST(VM_DELAY_READONLY, area->vm_flags)) handle_delay_readonly(dcontext, pc, area); } if ((!ok || (ok && vmlist != NULL && !TEST(VM_EXECUTED_FROM, area->vm_flags))) && !own_execareas_writelock) { /* we must hold the write lock until we add the new region, as we * may want to give it selfmod or other properties that will not mix * well if we have a race and another thread adds an overlapping * region with different properties! * or if never executed from, we need to mark the area as such * (if we didn't support thread-private, we would just grab write * lock up front and not bother with read lock). */ read_unlock(&executable_areas->lock); #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) write_lock(hotp_get_lock()); /* case 8780 -- see comments above */ #endif write_lock(&executable_areas->lock); own_execareas_writelock = true; ok = lookup_addr(executable_areas, pc, &area); } if (ok) { if (vmlist != NULL && !TEST(VM_EXECUTED_FROM, area->vm_flags)) { ASSERT(self_owns_write_lock(&executable_areas->lock)); area->vm_flags |= VM_EXECUTED_FROM; } area_copy = *area; area = &area_copy; /* if we already have an area, we do not need to hold an execareas * lock, as there is no race within this routine. any removal of * the area must go through the flush synch and so cannot be * concurrent to this routine. */ if (own_execareas_writelock) { if (!caller_execareas_writelock) { write_unlock(&executable_areas->lock); #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) write_unlock(hotp_get_lock()); /* case 8780 -- see above */ #endif own_execareas_writelock = false; } } else read_unlock(&executable_areas->lock); } /* if ok we should not own the readlock but we can't assert on that */ ASSERT(ok || (self_owns_write_lock(&executable_areas->lock) && own_execareas_writelock IF_HOTP(&& (!DYNAMO_OPTION(hot_patching) || self_owns_write_lock(hotp_get_lock()))))); ASSERT(!ok || area != NULL); if (!ok) { /* we no longer allow execution from arbitrary dr mem, our dll is * on the executable list and we specifically add the callback * interception code */ bool is_in_dr = is_dynamo_address(pc); /* this is an unknown or dr area * we may need to return false, if flags change or if pc is * unreadable (and so we don't want to follow a direct cti there * until the app actually does) */ bool is_allocated_mem = get_memory_info(pc, &base_pc, &size, &prot); bool is_being_unloaded = false; #ifdef CLIENT_INTERFACE /* Clients are allowed to use DR-allocated memory as app code: * we give up some robustness by allowing any DR-allocated memory * outside of the code cache that is marked as +x (we do not allow * -x to avoid a wild jump targeting our own heap and our own cache * cons policy making the heap read-only and causing a DR crash: * xref DrM#1820). * XXX i#852: should we instead have some dr_appcode_alloc() or * dr_appcode_mark() API? */ if (is_in_dr && INTERNAL_OPTION(code_api) && TEST(MEMPROT_EXEC, prot) && !in_fcache(pc)) is_in_dr = false; /* allow it */ #endif if (!is_allocated_mem) { /* case 9022 - Kaspersky sports JMPs to a driver in * kernel address space e.g. jmp f7ab7d67 * and system call queries refuse to provide any information. * We need to just try reading from that address. */ /* we first compare to * SYSTEM_BASIC_INFORMATION.HighestUserAddress (2GB or * 3GB) to know for sure we're testing a kernel * address, and not dealing with a race instead. */ if (!is_user_address(pc) && is_readable_without_exception_try(pc, 1)) { SYSLOG_INTERNAL_WARNING_ONCE( "Readable kernel address space memory at "PFX".\n" "case 9022 seen with Kaspersky AV", pc); /* FIXME: we're constructing these flags with the * intent to allow this region, any other * characteristics are hard to validate */ is_allocated_mem = true; base_pc = (app_pc)ALIGN_BACKWARD(pc, PAGE_SIZE); size = PAGE_SIZE; prot = MEMPROT_READ | MEMPROT_EXEC; /* FIXME: note we could also test for * MEMPROT_WRITE, note that explicitly turn on * SANDBOX_FLAG() anyways. Luckily, the one known * case where this is needed doesn't leave its * driver space writable. */ vm_flags |= VM_DRIVER_ADDRESS; /* we mark so that we can add to executable_areas * list later, and as in the only current example * so that we can allow execution. FIXME: Note * we'll never remove this area. We could check * on a future access whether such an address is * still readable, and then we can remove it if * the address stops being readable. Note that we * can never tell if this area has disappeared - * since we won't get notified on memory changes. * So we may be more likely to get a decode fault * if these ever happen. */ /* FIXME: we don't support this on Linux where * we'd have to also add to all_memory_areas */ /* Note it is better to ALWAYS turn on * SANDBOX_FLAG for these fragments since it is * not clear that we can control any writes to * them from kernel space FIXME: may be * unnecessary in the case of Kaspersky. * insert_selfmod_sandbox() will suppress * sandbox2ro_threshold for VM_DRIVER_ADDRESS areas */ frag_flags |= SANDBOX_FLAG(); /* FIXME: could do this under an option */ } else { /* just a bad address in kernel space - like 0xdeadbeef */ } } else { /* check for race where DLL is still present, but no * longer on our list. */ is_being_unloaded = is_unreadable_or_currently_unloaded_region(pc); /* note here we'll forge an exception to the app, * even if the address is practically still readable */ if (is_being_unloaded) { STATS_INC(num_unloaded_race_code_origins); SYSLOG_INTERNAL_WARNING_ONCE("Application executing from unloaded " "address "PFX"\n", pc); } } /* if target unreadable, app will die, so make sure we don't die * instead, NOTE we treat dr memory as unreadable because of app * races (see bug 2574) and the fact that we don't yet expect * targeted attacks against dr */ /* case 9330 tracks a violation while we are unloading, * but address shouldn't be on a new futureexec_area (case 9371) */ #ifdef WINDOWS if (in_private_library(pc)) { /* Privately-loaded libs are put on the DR list, and if the app * ends up executing from them they can come here. We assert * in debug build but let it go in release. But, we first * have to swap to native execution of FLS callbacks, which * we cannot use our do-not-inline on b/c they're call* targets. */ if (private_lib_handle_cb(dcontext, pc)) { /* Did the native call and set up to interpret at retaddr */ check_thread_vm_area_cleanup(dcontext, true/*redirecting*/, true/*clean bb*/, data, vmlist, own_execareas_writelock, caller_execareas_writelock); /* avoid assert in dispatch_enter_dynamorio() */ dcontext->whereami = WHERE_TRAMPOLINE; set_last_exit(dcontext, (linkstub_t *) get_ibl_sourceless_linkstub(LINK_RETURN, 0)); if (is_couldbelinking(dcontext)) enter_nolinking(dcontext, NULL, false); KSTART(fcache_default); transfer_to_dispatch(dcontext, get_mcontext(dcontext), true/*full_DR_state*/); ASSERT_NOT_REACHED(); } CLIENT_ASSERT(false, "privately-loaded library executed by app: " "please report this transparency violation"); } #endif if ((is_in_dr IF_WINDOWS(&& !in_private_library(pc))) || !is_allocated_mem || prot == 0/*no access flags*/ || is_being_unloaded) { if (xfer) { /* don't follow cti, wait for app to get there and then * handle this (might be pathological case where cti is * never really followed) */ /* Note for case 9330 that for direct xfer we want * to be able to recreate the scenario after we * stop. Even though is_being_unloaded is a * transient property, since we treat unreadable * the same way, next time we get here we'll be * ok. We already have to make sure we don't * missclassify futureexec_areas so can't really * get here. Normal module unloads would have * flushed all other bb's. */ LOG(THREAD, LOG_VMAREAS, 3, "cti targets %s "PFX", stopping bb here\n", is_in_dr ? "dr" : "unreadable", pc); result = false; goto check_thread_return; } else { /* generate sigsegv as though target application * instruction being decoded generated it */ /* FIXME : might be pathalogical selfmod case where * app in fact jumps out of block before reaching the * unreadable memory */ if (vmlist == NULL) { /* Case 9376: check_origins_bb_pattern() can get here * w/ vmlist==NULL. We have to be careful to free * resources of the prior vmlist and the vmarea write lock. */ SYSLOG_INTERNAL_INFO("non-bb-build app decode found " "unreadable memory"); } LOG(GLOBAL, LOG_VMAREAS, 1, "application tried to execute from %s "PFX " is_allocated_mem=%d prot=0x%x\n", is_in_dr ? "dr" : "unreadable", pc, is_allocated_mem, prot); LOG(THREAD, LOG_VMAREAS, 1, "application tried to execute from %s "PFX " is_allocated_mem=%d prot=0x%x\n", is_in_dr ? "dr" : "unreadable", pc, is_allocated_mem, prot); DOLOG(1, LOG_VMAREAS, { dump_callstack (pc, (app_pc)get_mcontext_frame_ptr(dcontext, get_mcontext(dcontext)), THREAD, DUMP_NOT_XML); }); /* FIXME: what if the app masks it with an exception * handler? */ SYSLOG_INTERNAL_WARNING_ONCE( "Application tried to execute from %s memory "PFX".\n" "This may be a result of an unsuccessful attack or a potential " "application vulnerability.", is_in_dr ? "dr" : "unreadable", pc); /* Not logged as a security violation, but still an * external warning, We don't want to take blame for all * program bugs that overwrite EIP with invalid addresses, * yet it may help discovering new security holes. * [Although, watching for crashes of 0x41414141 can't read * 0x41414141 helps.] *It may also be a failing attack.. */ check_thread_vm_area_cleanup(dcontext, true/*abort*/, true/*clean bb*/, data, vmlist, own_execareas_writelock, caller_execareas_writelock); /* Create an exception record for this failure */ if (TEST(DUMPCORE_FORGE_UNREAD_EXEC, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Warning: App trying to execute from unreadable memory"); os_forge_exception(pc, UNREADABLE_MEMORY_EXECUTION_EXCEPTION); ASSERT_NOT_REACHED(); } } /* set all flags that don't intermix now */ #ifdef PROGRAM_SHEPHERDING # ifdef WINDOWS /* Don't classify the vsyscall code page as DGC for our purposes, * since we permit execution from that region. This is needed * for Windows XP/2003 pre-SP2 on which the code page is not * part of ntdll. * FIXME What about SP1? * FIXME A better soln is to add the region to the exec list * during os init and remove this specialized check. */ if (!is_dyngen_vsyscall(pc)) # endif frag_flags |= FRAG_DYNGEN; #endif #ifdef WINDOWS if ((prot & MEMPROT_WRITE) != 0 && is_on_stack(dcontext, pc, NULL)) { /* On win32, kernel kills process if esp is bad, * doesn't even call KiUserExceptionDispatcher entry point! * Thus we cannot make this region read-only. * We must treat it as self-modifying code, and sandbox * the whole thing, to guarantee cache consistency. * FIXME: esp can point anywhere, so other regions we make * read-only may end up becoming "stack", and then we'll * just silently fail on a write there!!! */ frag_flags |= SANDBOX_FLAG(); STATS_INC(num_selfmod_vm_areas); } #endif } } if (area != NULL) { ASSERT_CURIOSITY(vmlist == NULL || !TEST(VM_DELETE_ME, area->vm_flags)); if (vmlist != NULL && TEST(FRAG_COARSE_GRAIN, area->frag_flags)) { /* We assume get_executable_area_coarse_info() is called prior to * execution in a coarse region. We go ahead and initialize here * though we could wait if a xfer since the bb will not cross. */ DEBUG_DECLARE(coarse_info_t *info =) get_coarse_info_internal(pc, true/*init*/, true/*have shvm lock*/); ASSERT(info != NULL); } ASSERT(!TEST(FRAG_COARSE_GRAIN, area->frag_flags) || get_coarse_info_internal(pc, false/*no init*/, false/*no lock*/) != NULL); frag_flags |= area->frag_flags; #ifdef PROGRAM_SHEPHERDING if (vmlist != NULL && /* only for bb building */ TEST(VM_PATTERN_REVERIFY, area->vm_flags) && !shared_to_private /* ignore shared-to-private conversion */) { /* case 8168: sandbox2ro_threshold can turn into a non-sandboxed region, * and our re-verify won't change that as the region is already on the * executable list. It will all work fine though. */ ASSERT(DYNAMO_OPTION(sandbox2ro_threshold) > 0 || TEST(FRAG_SELFMOD_SANDBOXED, area->frag_flags)); /* Re-verify the code origins policies, unless we are ensuring that * the end of the pattern is ok. This fixes case 4020 where another * thread can use a pattern region for non-pattern code. */ area = NULL; /* clear to force a re-verify */ /* Ensure we have prot */ get_memory_info(pc, &base_pc, &size, &prot); /* satisfy lock asumptions when area == NULL */ if (!own_execareas_writelock) { # ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) write_lock(hotp_get_lock()); /* case 8780 -- see comments above */ # endif write_lock(&executable_areas->lock); own_execareas_writelock = true; } } #endif } /* Ensure we looked up the mem attributes, if a new area */ ASSERT(area != NULL || size > 0); /* FIXME: fits nicely down below as alternative to marking read-only, * but must be here for vm==NULL so will stop bb at cti -- although * here it gets executed multiple times until actually switch to sandboxing */ if (area == NULL && DYNAMO_OPTION(ro2sandbox_threshold) > 0 && TEST(MEMPROT_WRITE, prot) && !TEST(FRAG_SELFMOD_SANDBOXED, frag_flags)) { vm_area_t *w_area; /* can't clobber area here */ ro_vs_sandbox_data_t *ro2s = NULL; /* even though area==NULL this can still be an exec-writable area * if area is sub-page! we can't change to sandboxing w/ sub-page * regions on the same page, so we wait until come here the 1st time * after a flush (which will flush the whole os region). thus, the * threshold is really just a lower bound. FIXME: add stats on this case! */ ASSERT(own_execareas_writelock); #ifdef HOT_PATCHING_INTERFACE ASSERT(!DYNAMO_OPTION(hot_patching) || self_owns_write_lock(hotp_get_lock())); #endif ASSERT(self_owns_write_lock(&executable_areas->lock)); if (!is_executable_area_writable(pc)) { /* ok to read as a writer */ /* see whether this region has been cycling on and off the list due * to being written to -- if so, switch to sandboxing */ read_lock(&written_areas->lock); ok = lookup_addr(written_areas, pc, &w_area); if (ok) ro2s = (ro_vs_sandbox_data_t *) w_area->custom.client; if (ok && ro2s->written_count >= DYNAMO_OPTION(ro2sandbox_threshold)) { LOG(GLOBAL, LOG_VMAREAS, 1, "new executable area "PFX"-"PFX" written >= %dX => " "switch to sandboxing\n", base_pc, base_pc+size, DYNAMO_OPTION(ro2sandbox_threshold)); DOSTATS({ if (vmlist != NULL) /* don't count non-build calls */ STATS_INC(num_ro2sandbox); }); /* TODO FOR PERFORMANCE: * -- if app appending to area of jitted code, make threshold big enough * so will get off page * -- modern jit shouldn't really have data on same page: all jitted * code should be combined * -- we're using OS regions b/c we merge ours, but if writer and writee * are on sep pages but in same OS region, we'll keep in cycle when we * could simply split region! even if peel off written-to pages here, * (can't at flush time as must flush whole vm region) * if exec even once from target page, will add entire since we * merge, and will flush entire since flush bounds suggested by * OS regions (and must flush entire merged vmarea since that's * granularity of frags list). still, worth splitting, even if * will merge back, to not lose perf if writee is on * never-executed page! to impl, want another vm vector in * which, at flush time, we store bounds for next exec. */ frag_flags |= SANDBOX_FLAG(); /* for sandboxing best to stay at single-page regions */ base_pc = (app_pc) PAGE_START(pc); size = PAGE_SIZE; /* We do not clear the written count as we're only doing one page * here. We want the next exec in the same region to also be * over the threshold. */ DODEBUG({ ro2s->ro2s_xfers++; }); LOG(GLOBAL, LOG_VMAREAS, 2, "\tsandboxing just the page "PFX"-"PFX"\n", base_pc, base_pc+size); } read_unlock(&written_areas->lock); } else STATS_INC(num_ro2sandbox_other_sub); } /* now that we know about new area, decide whether it's compatible to be * in the same bb as previous areas, as dictated by old flags * N.B.: we only care about FRAG_ flags here, not VM_ flags */ if (xfer && !allow_xfer_for_frag_flags(dcontext, pc, *flags, frag_flags)) { result = false; goto check_thread_return; } /* Normally we return the union of flags from all vmarea regions touched. * But if one region is coarse and another fine, we do NOT want the union, * but rather we want the whole thing to be fine. FIXME: We could also try * to put in functionality to truncate at the region boundary. * Case 9932: in fact we cannot allow touching two adjacent coarse regions. */ /* N.B.: ibl entry removal (case 9636) assumes coarse fragments * stay bounded within a single FRAG_COARSE_GRAIN region */ if (TEST(FRAG_COARSE_GRAIN, frag_flags) && pc != tag/*don't cmp to nothing*/ && ((*flags & FRAG_COARSE_GRAIN) != (frag_flags & FRAG_COARSE_GRAIN) || area == NULL || area->start > tag)) { *flags &= ~FRAG_COARSE_GRAIN; frag_flags &= ~FRAG_COARSE_GRAIN; /* else we'll re-add below */ DOSTATS({ if (vmlist != NULL) STATS_INC(coarse_overlap_with_fine); }); } if (vmlist == NULL) { /* caller only cared about whether to follow direct cti, so exit now, don't * make any persistent state changes */ *flags |= frag_flags; if (stop != NULL) { if (area == NULL) *stop = base_pc + size; else *stop = area->end; } ASSERT(*stop != NULL); result = true; goto check_thread_return; } /* once reach this point we're building a real bb */ #ifdef SIMULATE_ATTACK simulate_attack(dcontext, pc); #endif /* SIMULATE_ATTACK */ if (area == NULL /* unknown area */) { LOG(GLOBAL, LOG_VMAREAS, 2, "WARNING: "PFX" -> "PFX"-"PFX" %s%s is not on executable list (thread "TIDFMT")\n", pc, base_pc, base_pc+size, ((prot & MEMPROT_WRITE) != 0)?"W":"", ((prot & MEMPROT_EXEC) != 0)?"E":"", dcontext->owning_thread); DOLOG(3, LOG_VMAREAS, { print_executable_areas(GLOBAL); }); DODEBUG({ if (is_on_stack(dcontext, pc, NULL)) SYSLOG_INTERNAL_WARNING_ONCE("executing region with pc "PFX" on " "the stack.", pc); }); #ifdef DGC_DIAGNOSTICS dyngen_diagnostics(dcontext, pc, base_pc, size, prot); #endif #ifdef PROGRAM_SHEPHERDING /* give origins checker a chance to change region * N.B.: security violation reports in detect_mode assume that at * this point we aren't holding pointers into vectors, since the * shared vm write lock is released briefly for the diagnostic report. */ if (DYNAMO_OPTION(code_origins) && !shared_to_private) { /* don't check for shared-to-private conversion */ int res = check_origins(dcontext, pc, &base_pc, &size, prot, &vm_flags, &frag_flags, xfer); if (res < 0) { if (!xfer) { action_type_t action = security_violation_main(dcontext, pc, res, OPTION_BLOCK|OPTION_REPORT); if (action != ACTION_CONTINUE) { check_thread_vm_area_cleanup(dcontext, true/*abort*/, true/*clean bb*/, data, vmlist, own_execareas_writelock, caller_execareas_writelock); security_violation_action(dcontext, action, pc); ASSERT_NOT_REACHED(); } } else { /* if xfer, we simply don't follow the xfer */ LOG(THREAD, LOG_VMAREAS, 3, "xfer to "PFX" => violation, so stopping at "PFX"\n", base_pc, pc); result = false; goto check_thread_return; } } } #endif /* make sure code is either read-only or selfmod sandboxed */ /* making unwritable and adding to exec areas must be atomic * (another thread could get what would look like app seg fault in between!) * and selfmod flag additions, etc. have restrictions, so we must have * held the write lock the whole time */ ASSERT(own_execareas_writelock); ok = lookup_addr(executable_areas, pc, &area); if (ok) { LOG(GLOBAL, LOG_VMAREAS, 1, "\tNew executable region is on page already added!\n"); #ifdef FORENSICS_ACQUIRES_INITEXIT_LOCK /* disabled until case 6141 is resolved: no lock release needed for now */ /* if we release the exec areas lock to emit forensic info, then * someone else could have added the region since we checked above. * see if we need to handle the DELAY_READONLY flag. */ if (TEST(VM_DELAY_READONLY, area->vm_flags)) handle_delay_readonly(dcontext, pc, area); else { #endif #ifdef PROGRAM_SHEPHERDING /* else, this can only happen for pattern reverification: no races! */ ASSERT(TEST(VM_PATTERN_REVERIFY, area->vm_flags) && TEST(FRAG_SELFMOD_SANDBOXED, area->frag_flags)); #else ASSERT_NOT_REACHED(); #endif #ifdef FORENSICS_ACQUIRES_INITEXIT_LOCK } #endif } else { /* need to add the region */ if (TEST(MEMPROT_WRITE, prot)) { vm_flags |= VM_WRITABLE; STATS_INC(num_writable_code_regions); /* Now that new area bounds are finalized, see if it should be * selfmod. Mainly this is a problem with a subpage region on the * same page as an existing subpage selfmod region. We want the new * region to be selfmod to avoid forcing the old to switch to page * protection. We won't have to do this once we separate the * consistency region list from the code origins list (case 3744): * then we'd have the whole page as selfmod on the consistency list, * with only the valid subpage on the origins list. We don't mark * pieces of a large region, for simplicity. */ if (is_executable_area_on_all_selfmod_pages(base_pc, base_pc+size)) { frag_flags |= SANDBOX_FLAG(); } /* case 8308: We've added options to force certain regions to * use selfmod instead of RO. -sandbox_writable causes all writable * regions to be selfmod. -sandbox_non_text causes all non-text * writable regions to be selfmod. */ else if (DYNAMO_OPTION(sandbox_writable)) { frag_flags |= SANDBOX_FLAG(); } else if (DYNAMO_OPTION(sandbox_non_text)) { app_pc modbase = get_module_base(base_pc); if (modbase == NULL || !is_range_in_code_section (modbase, base_pc, base_pc + size, NULL, NULL)) { frag_flags |= SANDBOX_FLAG(); } } if (TEST(FRAG_SELFMOD_SANDBOXED, frag_flags)) { LOG(GLOBAL, LOG_VMAREAS, 2, "\tNew executable region "PFX"-"PFX" is writable, but selfmod, " "so leaving as writable\n", base_pc, base_pc+size); } else if (INTERNAL_OPTION(hw_cache_consistency)) { /* Make entire region read-only * If that's too big, i.e., it contains some data, the * region size will be corrected when we get a write * fault in the region */ LOG(GLOBAL, LOG_VMAREAS, 2, "\tNew executable region "PFX"-"PFX" is writable, " "making it read-only\n", base_pc, base_pc+size); #if 0 /* this syslog causes services.exe to hang * (ref case 666) once case 666 is fixed re-enable if * desired FIXME */ SYSLOG_INTERNAL_WARNING_ONCE("new executable vm area is writable."); #endif vm_make_unwritable(base_pc, size); vm_flags |= VM_MADE_READONLY; STATS_INC(num_rw2r_code_regions); } } /* now add the new region to the global list */ ASSERT(!TEST(FRAG_COARSE_GRAIN, frag_flags)); /* else no pre-exec query */ add_executable_vm_area(base_pc, base_pc+size, vm_flags | VM_EXECUTED_FROM, frag_flags, true/*own lock*/ _IF_DEBUG("unexpected vm area")); ok = lookup_addr(executable_areas, pc, &area); ASSERT(ok); DOLOG(2, LOG_VMAREAS, { /* new area could have been split into multiple */ print_contig_vm_areas(executable_areas, base_pc, base_pc+size, GLOBAL, "new executable vm area: "); }); } ASSERT(area != NULL); area_copy = *area; area = &area_copy; if (xfer && !allow_xfer_for_frag_flags(dcontext, pc, *flags, frag_flags)) { result = false; goto check_thread_return; } } if (local_area == NULL) { /* new area for this thread */ ASSERT(TEST(VM_EXECUTED_FROM, area->vm_flags)); /* marked above */ #ifdef DGC_DIAGNOSTICS if (!TESTANY(VM_UNMOD_IMAGE|VM_WAS_FUTURE, area->vm_flags)) { LOG(GLOBAL, LOG_VMAREAS, 1, "DYNGEN in %d: non-unmod-image exec area "PFX"-"PFX" %s\n", get_thread_id(), area->start, area->end, area->comment); } #endif #ifdef PROGRAM_SHEPHERDING DOSTATS({ if (!TEST(VM_UNMOD_IMAGE, area->vm_flags) && TEST(VM_WAS_FUTURE, area->vm_flags)) { /* increment for other threads (1st thread will be inc-ed in check_origins_helper) */ if (is_on_stack(dcontext, area->start, area)) { STATS_INC(num_exec_future_stack); } else { STATS_INC(num_exec_future_heap); } } }); # ifdef WINDOWS DOSTATS({ if (!TEST(VM_UNMOD_IMAGE, area->vm_flags) && !TEST(VM_WAS_FUTURE, area->vm_flags)) STATS_INC(num_exec_after_load); }); # endif #endif add_vm_area(&data->areas, area->start, area->end, area->vm_flags, area->frag_flags, NULL _IF_DEBUG(area->comment)); /* get area for actual pc (new area could have been split up) */ ok = lookup_addr(&data->areas, pc, &local_area); ASSERT(ok); DOLOG(2, LOG_VMAREAS, { print_vm_area(&data->areas, local_area, THREAD, new_area_prefix); }); DOLOG(5, LOG_VMAREAS, { print_vm_areas(&data->areas, THREAD); }); DOCHECK(CHKLVL_ASSERTS, { LOG(THREAD, 1, LOG_VMAREAS, "checking thread vmareas against executable_areas\n"); exec_area_bounds_match(dcontext, data); }); } ASSERT(local_area != NULL); data->last_area = local_area; /* for adding new bbs to frag lists */ if (tag != NULL) { bool already = false; fragment_t *entry, *prev; /* see if this frag is already on this area's list. * prev entry may not be first on list due to area merging or due to * trace building that requires bb creation in middle. */ /* vmlist has to point to front, so must walk every time * along the way check to see if existing entry points to this area */ for (entry = (fragment_t *) *vmlist, prev = NULL; entry != NULL; prev = entry, entry = FRAG_ALSO(entry)) { if (FRAG_PC(entry) >= local_area->start && FRAG_PC(entry) < local_area->end) { already = true; break; } } if (!already) { /* always allocate global, will re-allocate later if not shared */ prev = prepend_fraglist(MULTI_ALLOC_DC(dcontext, (data == shared_data) ? FRAG_SHARED : 0), local_area, pc, tag, prev); ASSERT(FRAG_PREV(prev) != NULL); if (*vmlist == NULL) { /* write back first */ *vmlist = (void *) prev; } } DOLOG(6, LOG_VMAREAS, { print_fraglist(dcontext, local_area, "after check_thread_vm_area, "); }); DOLOG(7, LOG_VMAREAS, { print_fraglists(dcontext); }); } *flags |= frag_flags; if (stop != NULL) { *stop = area->end; ASSERT(*stop != NULL); } result = true; /* we are building a real bb, assert consistency checks */ DOCHECK(1, { uint prot2; ok = get_memory_info(pc, NULL, NULL, &prot2); ASSERT(!ok || !TEST(MEMPROT_WRITE, prot2) || TEST(FRAG_SELFMOD_SANDBOXED, *flags) || !INTERNAL_OPTION(hw_cache_consistency)); ASSERT(is_readable_without_exception_try(pc, 1)); }); check_thread_return: check_thread_vm_area_cleanup(dcontext, false/*not aborting*/, false/*leave bb*/, data, vmlist, own_execareas_writelock, caller_execareas_writelock); return result; } static void remove_fraglist_entry(dcontext_t *dcontext, fragment_t *entry, vm_area_t *area); /* page_pc must be aligned to the start of a page */ void set_thread_decode_page_start(dcontext_t *dcontext, app_pc page_pc) { thread_data_t *data; /* Regardless of the dcontext that's passed in, we want to track the * page_pc for the thread so get a real dcontext. */ #ifdef UNIX /* FIXME On Linux, fetching a context requires a syscall, which is a * relatively costly operation, so we don't even try. Note that this can * be misleading when the dcontext that's passed in isn't the one for * the executing thread (such as in case 5388 on Windows). */ if (dcontext == GLOBAL_DCONTEXT) { ASSERT_CURIOSITY(dynamo_exited); return; } #else dcontext = get_thread_private_dcontext(); if (dcontext == NULL) { ASSERT_CURIOSITY(dynamo_exited); return; } #endif data = (thread_data_t *) dcontext->vm_areas_field; ASSERT(page_pc == (app_pc) PAGE_START(page_pc)); data->last_decode_area_page_pc = page_pc; data->last_decode_area_valid = true; } /* Check if address is in the last area that passed the check_thread_vm_area tests. * Used for testing for an application race condition (case 845), * where code executed by one thread is unmapped by another. * The last decoded application pc should always be in the thread's last area. */ bool check_in_last_thread_vm_area(dcontext_t *dcontext, app_pc pc) { thread_data_t *data = NULL; bool in_last = false; app_pc last_decode_area_page_pc; /* extra paranoia since called by intercept_exception */ if (is_readable_without_exception((app_pc)&dcontext->vm_areas_field, 4)) data = (thread_data_t *) dcontext->vm_areas_field; /* note that if data is NULL &data->last_area will not be readable either */ if (is_readable_without_exception((app_pc)&data->last_area, 4) && is_readable_without_exception((app_pc)&data->last_area->end, 4) && is_readable_without_exception((app_pc)&data->last_area->start, 4)) /* we can walk off to the next page */ in_last = (pc < data->last_area->end + MAX_INSTR_LENGTH && data->last_area->start <= pc); /* last decoded app pc may be in last shared area instead */ if (!in_last && DYNAMO_OPTION(shared_bbs)) { /* FIXME: bad to grab on failure path... * can we assume only grabbed then, and not synch? */ SHARED_VECTOR_RWLOCK(&shared_data->areas, read, lock); if (is_readable_without_exception((app_pc)&shared_data->last_area->end, 4) && is_readable_without_exception((app_pc)&shared_data->last_area->start, 4)) /* we can walk off to the next page */ in_last = (pc < shared_data->last_area->end + MAX_INSTR_LENGTH && shared_data->last_area->start <= pc); SHARED_VECTOR_RWLOCK(&shared_data->areas, read, unlock); } /* the last decoded app pc may be in the last decoded page or the page after * if the instr crosses a page boundary. This can help us more gracefully * handle a race during the origins pattern check between a thread unmapping * a region and another thread decoding in that region (xref case 7103). */ if (!in_last && data != NULL && safe_read(&data->last_decode_area_page_pc, sizeof(last_decode_area_page_pc), &last_decode_area_page_pc) && /* I think the above "safety" checks are ridiculous so not doing them here */ data->last_decode_area_valid) { /* Check the last decoded pc's current page and the page after. */ app_pc last_decode_page_end = last_decode_area_page_pc + 2*PAGE_SIZE; in_last = ((POINTER_OVERFLOW_ON_ADD(last_decode_area_page_pc, 2*PAGE_SIZE) || pc < last_decode_page_end) && last_decode_area_page_pc <= pc); } return in_last; } /* Removes vmlist entries added to the global vmarea list for f. * If new_vmlist != NULL, adds locally in addition to removing globally, and * removes the global area itself if empty. */ static void remove_shared_vmlist(dcontext_t *dcontext, void *vmlist, fragment_t *f, void **local_vmlist) { vm_area_t *area = NULL; fragment_t *entry = (fragment_t *) vmlist; fragment_t *next; bool remove; bool ok; uint check_flags = 0; app_pc pc; LOG(THREAD, LOG_VMAREAS, 4, "\tremoving shared vm data for F%d("PFX")\n", f->id, f->tag); SHARED_VECTOR_RWLOCK(&shared_data->areas, write, lock); while (entry != NULL) { ASSERT(FRAG_MULTI_INIT(entry)); ASSERT(FRAG_FRAG(entry) == (fragment_t *) f->tag); /* for this frag */ /* If area will become empty, remove it, since it was only added for * this bb that is not actually shared. * Case 8906: do NOT remove the area for coarse fragments, as they are * still shared! We need the area, just not the fragment on the frags * list(s). */ remove = (local_vmlist != NULL && FRAG_PREV(entry) == entry && !TEST(FRAG_COARSE_GRAIN, f->flags)); if (remove) { ok = lookup_addr(&shared_data->areas, FRAG_PC(entry), &area); ASSERT(ok && area != NULL); if (TEST(FRAG_COARSE_GRAIN, area->frag_flags)) { /* Case 9806: do NOT remove the coarse area even if this * particular fragment is fine-grained. We also test f->flags * up front to avoid the lookup cost as an optimization. */ remove = false; } else { LOG(THREAD, LOG_VMAREAS, 4, "sole fragment in added shared area, removing\n"); } } else area = NULL; next = FRAG_ALSO(entry); pc = FRAG_PC(entry); remove_fraglist_entry(GLOBAL_DCONTEXT, entry, area /* ok to be NULL */); if (remove) { /* FIXME case 8629: lots of churn if frequent removals (e.g., coarse grain) */ remove_vm_area(&shared_data->areas, area->start, area->end, false); shared_data->last_area = NULL; } if (local_vmlist != NULL) { /* add area to local and add local heap also entry */ if (DYNAMO_OPTION(shared_bbs)) check_flags = f->flags | FRAG_SHARED; /*indicator to NOT use global*/ ok = check_thread_vm_area(dcontext, pc, f->tag, local_vmlist, &check_flags, NULL, false /*xfer should not matter now*/); ASSERT(ok); } entry = next; } SHARED_VECTOR_RWLOCK(&shared_data->areas, write, unlock); } void vm_area_add_fragment(dcontext_t *dcontext, fragment_t *f, void *vmlist) { thread_data_t *data; vm_area_t *area = NULL; fragment_t *entry = (fragment_t *) vmlist; fragment_t *prev = NULL; LOG(THREAD, LOG_VMAREAS, 4, "vm_area_add_fragment for F%d("PFX")\n", f->id, f->tag); if (TEST(FRAG_COARSE_GRAIN, f->flags)) { /* We went ahead and built up vmlist since we might decide later to not * make a fragment coarse-grain. If it is emitted as coarse-grain, * we need to clean up the vmlist as it is not needed. */ remove_shared_vmlist(dcontext, vmlist, f, NULL/*do not add local*/); return; } if (TEST(FRAG_SHARED, f->flags)) { data = shared_data; /* need write lock since writing area->frags */ SHARED_VECTOR_RWLOCK(&shared_data->areas, write, lock); } else if (!DYNAMO_OPTION(shared_bbs) || /* should already be in private vmareas */ TESTANY(FRAG_IS_TRACE | FRAG_TEMP_PRIVATE, f->flags)) data = (thread_data_t *) dcontext->vm_areas_field; else { void *local_vmlist = NULL; /* turns out bb isn't shared, so we have to transfer also entries * to local heap and vector. we do that by removing from global * and then calling check_thread_vm_area, telling it to add local. */ ASSERT(dcontext != GLOBAL_DCONTEXT); /* only bbs do we build shared and then switch to private */ ASSERT(!TEST(FRAG_IS_TRACE, f->flags)); data = (thread_data_t *) dcontext->vm_areas_field; LOG(THREAD, LOG_VMAREAS, 4, "\tbb not shared, shifting vm data to thread-local\n"); remove_shared_vmlist(dcontext, vmlist, f, &local_vmlist); /* now proceed as though everything were local to begin with */ vmlist = local_vmlist; entry = (fragment_t *) vmlist; } /* swap f for the first multi_entry_t (the one in region of f->tag) */ ASSERT(entry != NULL); FRAG_NEXT_ASSIGN(f, FRAG_NEXT(entry)); FRAG_PREV_ASSIGN(f, FRAG_PREV(entry)); FRAG_ALSO_ASSIGN(f, FRAG_ALSO(entry)); prev = FRAG_PREV(f); ASSERT(prev != NULL); /* prev is never null */ if (FRAG_NEXT(prev) == NULL) { DEBUG_DECLARE(bool ok =) /* need to know area */ lookup_addr(&data->areas, FRAG_PC(entry), &area); ASSERT(ok); /* remember: prev wraps around, next does not */ ASSERT(area->custom.frags == entry); area->custom.frags = f; /* if single entry will be circular */ if (prev == entry) FRAG_PREV_ASSIGN(f, f); } else FRAG_NEXT_ASSIGN(prev, f); if (FRAG_NEXT(f) == NULL) { if (area == NULL) { DEBUG_DECLARE(bool ok =) /* need to know area for area->frags */ lookup_addr(&data->areas, FRAG_PC(entry), &area); ASSERT(ok); } if (area->custom.frags == f) { ASSERT(FRAG_PREV(area->custom.frags) == f); } else { ASSERT(FRAG_PREV(area->custom.frags) == entry); FRAG_PREV_ASSIGN(area->custom.frags, f); } } else { prev = FRAG_NEXT(f); FRAG_PREV_ASSIGN(prev, f); } ASSERT(area_contains_frag_pc(area, entry)); prev = FRAG_ALSO(entry); nonpersistent_heap_free(MULTI_ALLOC_DC(dcontext, entry->flags), entry, sizeof(multi_entry_t) HEAPACCT(ACCT_VMAREA_MULTI)); entry = prev; DOSTATS({ if (entry != NULL) STATS_INC(num_bb_also_vmarea); }); /* now put backpointers in */ while (entry != NULL) { ASSERT(FRAG_MULTI_INIT(entry)); ASSERT(FRAG_FRAG(entry) == (fragment_t *) f->tag); /* for this frag */ DOLOG(4, LOG_VMAREAS, { print_entry(dcontext, entry, "\talso "); }); FRAG_FRAG_ASSIGN(entry, f); /* remove the init flag now that the real fragment_t is in the f field * The vector lock protects this non-atomic flag change. */ entry->flags &= ~FRAG_IS_EXTRA_VMAREA_INIT; entry = FRAG_ALSO(entry); } DOLOG(6, LOG_VMAREAS, { print_frag_arealist(dcontext, f); }); DOLOG(7, LOG_VMAREAS, { print_fraglists(dcontext); }); /* can't release lock once done w/ prev/next values since alsos can * be changed as well by vm_area_clean_fraglist()! */ SHARED_VECTOR_RWLOCK(&data->areas, write, unlock); } void acquire_vm_areas_lock(dcontext_t *dcontext, uint flags) { thread_data_t *data = GET_DATA(dcontext, flags); SHARED_VECTOR_RWLOCK(&data->areas, write, lock); } bool acquire_vm_areas_lock_if_not_already(dcontext_t *dcontext, uint flags) { thread_data_t *data = GET_DATA(dcontext, flags); return writelock_if_not_already(&data->areas); } void release_vm_areas_lock(dcontext_t *dcontext, uint flags) { thread_data_t *data = GET_DATA(dcontext, flags); SHARED_VECTOR_RWLOCK(&data->areas, write, unlock); } #ifdef DEBUG /* i#942: Check that each also_vmarea entry in a multi-area fragment is in its * own vmarea. If a fragment is on a vmarea fragment list twice, we can end up * deleting that fragment twice while flushing. */ static bool frag_also_list_areas_unique(dcontext_t *dcontext, thread_data_t *tgt_data, void **vmlist) { fragment_t *entry; fragment_t *already; vm_area_t *entry_area; vm_area_t *already_area; bool ok; for (entry = (fragment_t *) *vmlist; entry != NULL; entry = FRAG_ALSO(entry)) { ASSERT(FRAG_MULTI(entry)); ok = lookup_addr(&tgt_data->areas, FRAG_PC(entry), &entry_area); ASSERT(ok); /* Iterate the previous also entries and make sure they don't have the * same vmarea. * XXX: This is O(n^2) in the also list length, but these lists are * short and the O(n) impl would require a hashtable. */ for (already = (fragment_t *) *vmlist; already != entry; already = FRAG_ALSO(already)) { ASSERT(FRAG_MULTI(already)); ok = lookup_addr(&tgt_data->areas, FRAG_PC(already), &already_area); ASSERT(ok); if (entry_area == already_area) return false; } } return true; } /* i#942: Check that the per-thread list of executed areas doesn't cross any * executable_area boundaries. If this happens, we start adding fragments to the * wrong vmarea fragment lists. This check should be roughly O(n log n) in the * number of exec areas, so not too slow to run at the assertion check level. */ static void exec_area_bounds_match(dcontext_t *dcontext, thread_data_t *data) { vm_area_vector_t *v = &data->areas; int i; read_lock(&executable_areas->lock); for (i = 0; i < v->length; i++) { vm_area_t *thread_area = &v->buf[i]; vm_area_t *exec_area; bool ok = lookup_addr(executable_areas, thread_area->start, &exec_area); ASSERT(ok); /* It's OK if thread areas are more fragmented than executable_areas. */ if (!(thread_area->start >= exec_area->start && thread_area->end <= exec_area->end)) { DOLOG(1, LOG_VMAREAS, { LOG(THREAD, LOG_VMAREAS, 1, "%s: bounds mismatch on %s vmvector\n", __FUNCTION__, (TEST(VECTOR_SHARED, v->flags) ? "shared" : "private")); print_vm_area(v, thread_area, THREAD, "thread area: "); print_vm_area(v, exec_area, THREAD, "exec area: "); LOG(THREAD, 1, LOG_VMAREAS, "executable_areas:\n"); print_vm_areas(executable_areas, THREAD); LOG(THREAD, 1, LOG_VMAREAS, "thread areas:\n"); print_vm_areas(v, THREAD); ASSERT(false && "vmvector does not match exec area bounds"); }); } } read_unlock(&executable_areas->lock); } #endif /* DEBUG */ /* Creates a list of also entries for each vmarea touched by f and prepends it * to vmlist. * * Case 8419: this routine will fail and return false if f is marked as * FRAG_WAS_DELETED, since that means f's also entries have been deleted! * Caller can make an atomic no-fail region by holding f's vm area lock * and the change_linking_lock and passing true for have_locks. */ bool vm_area_add_to_list(dcontext_t *dcontext, app_pc tag, void **vmlist, uint list_flags, fragment_t *f, bool have_locks) { thread_data_t *src_data = GET_DATA(dcontext, f->flags); thread_data_t *tgt_data = GET_DATA(dcontext, list_flags); vm_area_t *area = NULL; bool ok; fragment_t *prev = (fragment_t *) *vmlist; fragment_t *already; fragment_t *entry = f; bool success = true; bool lock; if (!have_locks) SHARED_FLAGS_RECURSIVE_LOCK(f->flags, acquire, change_linking_lock); else { ASSERT((!TEST(VECTOR_SHARED, tgt_data->areas.flags) && !TEST(VECTOR_SHARED, src_data->areas.flags)) || self_owns_recursive_lock(&change_linking_lock)); } /* support caller already owning write lock */ lock = writelock_if_not_already(&src_data->areas); if (src_data != tgt_data) { /* we assume only one of the two is shared, or that they are both the same, * and we thus grab only one lock in this routine: * otherwise we need to do more work to avoid deadlocks here! */ ASSERT(!TEST(VECTOR_SHARED, tgt_data->areas.flags) || !TEST(VECTOR_SHARED, src_data->areas.flags)); if (TEST(VECTOR_SHARED, tgt_data->areas.flags)) { ASSERT(!lock); lock = writelock_if_not_already(&tgt_data->areas); } } ASSERT((lock && !have_locks) || (!lock && have_locks) || (!TEST(VECTOR_SHARED, tgt_data->areas.flags) && !TEST(VECTOR_SHARED, src_data->areas.flags))); DOCHECK(CHKLVL_ASSERTS, { LOG(THREAD, 1, LOG_VMAREAS, "checking src_data\n"); exec_area_bounds_match(dcontext, src_data); LOG(THREAD, 1, LOG_VMAREAS, "checking tgt_data\n"); exec_area_bounds_match(dcontext, tgt_data); }); /* If deleted, the also field is invalid and we cannot handle that! */ if (TEST(FRAG_WAS_DELETED, f->flags)) { success = false; goto vm_area_add_to_list_done; } /* vmlist has to point to front, so must walk every time to find end */ while (prev != NULL && FRAG_ALSO(prev) != NULL) prev = FRAG_ALSO(prev); /* walk f's areas */ while (entry != NULL) { /* see if each of f's areas is already on trace's list */ ok = lookup_addr(&src_data->areas, FRAG_PC(entry), &area); ASSERT(ok); ok = false; /* whether found existing entry in area or not */ for (already = (fragment_t *) *vmlist; already != NULL; already = FRAG_ALSO(already)) { ASSERT(FRAG_MULTI(already)); if (FRAG_PC(already) >= area->start && FRAG_PC(already) < area->end) { ok = true; break; } } if (!ok) { /* found new area that trace is on */ /* src may be shared bb, its area may not be on tgt list (e.g., private trace) */ if (src_data != tgt_data) { /* else, have area already */ vm_area_t *tgt_area = NULL; if (lookup_addr(&tgt_data->areas, FRAG_PC(entry), &tgt_area)) { /* check target area for existing entry */ for (already = (fragment_t *) *vmlist; already != NULL; already = FRAG_ALSO(already)) { ASSERT(FRAG_MULTI(already)); if (FRAG_PC(already) >= tgt_area->start && FRAG_PC(already) < tgt_area->end) { ok = true; break; } } if (ok) break; } else { add_vm_area(&tgt_data->areas, area->start, area->end, area->vm_flags, area->frag_flags, NULL _IF_DEBUG(area->comment)); ok = lookup_addr(&tgt_data->areas, FRAG_PC(entry), &tgt_area); ASSERT(ok); /* modified vector, must clear last_area */ tgt_data->last_area = NULL; DOLOG(2, LOG_VMAREAS, { print_vm_area(&tgt_data->areas, tgt_area, THREAD, "new vm area for thread: "); }); DOLOG(5, LOG_VMAREAS, { print_vm_areas(&tgt_data->areas, THREAD); }); } area = tgt_area; } ASSERT(area != NULL); prev = prepend_fraglist(MULTI_ALLOC_DC(dcontext, list_flags), area, FRAG_PC(entry), tag, prev); if (*vmlist == NULL) { /* write back first */ *vmlist = (void *) prev; } } entry = FRAG_ALSO(entry); } ASSERT_MESSAGE(CHKLVL_DEFAULT, "fragment also list has duplicate entries", frag_also_list_areas_unique(dcontext, tgt_data, vmlist)); DOLOG(6, LOG_VMAREAS, { print_frag_arealist(dcontext, (fragment_t *) *vmlist); }); DOLOG(7, LOG_VMAREAS, { print_fraglists(dcontext); }); vm_area_add_to_list_done: if (lock) { if (src_data != tgt_data) SHARED_VECTOR_RWLOCK(&tgt_data->areas, write, unlock); SHARED_VECTOR_RWLOCK(&src_data->areas, write, unlock); } if (!have_locks) SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock); return success; } /* Frees storage for any multi-entries in the list (NOT for any fragment_t). * FIXME: this is now used on bb abort, where we may want to remove a vmarea * that was added only for a unreadable region (if decode fault will have been * added already)! Yet we don't know whether any coarse fragments in area, * etc., so we go ahead and leave there: cached in last_area will lead to decode * fault rather than explicit detection in check_thread_vm_area but that's ok. * If we do want to remove should share code between this routine and * remove_shared_vmlist(). */ void vm_area_destroy_list(dcontext_t *dcontext, void *vmlist) { if (vmlist != NULL) vm_area_remove_fragment(dcontext, (fragment_t *)vmlist); } bool vm_list_overlaps(dcontext_t *dcontext, void *vmlist, app_pc start, app_pc end) { vm_area_vector_t *v = GET_VECTOR(dcontext, ((fragment_t *)vmlist)->flags); fragment_t *entry; bool ok; vm_area_t *area; bool result = false; LOG(THREAD, LOG_VMAREAS, 4, "vm_list_overlaps "PFX" vs "PFX"-"PFX"\n", vmlist, start, end); /* don't assert if can't find anything -- see usage in handle_modified_code() */ if (v == NULL) return false; SHARED_VECTOR_RWLOCK(v, read, lock); for (entry = vmlist; entry != NULL; entry = FRAG_ALSO(entry)) { ok = lookup_addr(v, FRAG_PC(entry), &area); if (!ok) break; if (start < area->end && end > area->start) { result = true; break; } } SHARED_VECTOR_RWLOCK(v, read, unlock); return result; } /* Removes an entry from the fraglist of area. * If area is NULL, looks it up based on dcontext->vm_areas_field->areas, * or the shared areas, depending on entry. * That lookup may need to be synchronized: this routine checks if the * caller holds the write lock before grabbing it. * If entry is a multi_entry_t, frees its heap * DOES NOT update the also chain! */ static void remove_fraglist_entry(dcontext_t *dcontext, fragment_t *entry, vm_area_t *area) { thread_data_t *data = GET_DATA(dcontext, entry->flags); fragment_t *prev; vm_area_vector_t *vector = &data->areas; /* need write lock since may modify area->frags */ bool lock = writelock_if_not_already(vector); /* entry is only in shared vector if still live -- if not * we shouldn't get here */ ASSERT(!TEST(VECTOR_SHARED, vector->flags) || !TEST(FRAG_WAS_DELETED, entry->flags)); ASSERT(area_contains_frag_pc(area, entry)); prev = FRAG_PREV(entry); if (FRAG_NEXT(prev) == NULL || FRAG_NEXT(entry) == NULL) { /* need to know area */ DEBUG_DECLARE(bool ok =) lookup_addr(vector, FRAG_PC(entry), &area); ASSERT(ok); ASSERT(area != NULL); } /* remember: prev wraps around, next does not */ if (FRAG_NEXT(prev) == NULL) { ASSERT(area->custom.frags == entry); area->custom.frags = FRAG_NEXT(entry); } else { FRAG_NEXT_ASSIGN(prev, FRAG_NEXT(entry)); } if (FRAG_NEXT(entry) == NULL) { if (area->custom.frags != NULL) { ASSERT(FRAG_PREV(area->custom.frags) == entry); FRAG_PREV_ASSIGN(area->custom.frags, FRAG_PREV(entry)); } } else { fragment_t *next = FRAG_NEXT(entry); FRAG_PREV_ASSIGN(next, FRAG_PREV(entry)); } /* next MUST be NULL-ed for fragment_remove_shared_no_flush() */ FRAG_NEXT_ASSIGN(entry, NULL); DODEBUG({ FRAG_PREV_ASSIGN(entry, NULL); FRAG_ALSO_ASSIGN(entry, NULL); }); if (FRAG_MULTI(entry)) { nonpersistent_heap_free(MULTI_ALLOC_DC(dcontext, entry->flags), entry, sizeof(multi_entry_t) HEAPACCT(ACCT_VMAREA_MULTI)); } if (lock) SHARED_VECTOR_RWLOCK(vector, write, unlock); } #ifdef DEBUG /* For every multi_entry_t fragment in the fraglist, make sure that neither the * real fragment nor any of the other also entries are in the same fraglist. * This should only ever happen after a merger, at which point we call * vm_area_clean_fraglist() to fix it. Any other occurrence is a bug. */ static void vm_area_check_clean_fraglist(vm_area_t *area) { fragment_t *entry; for (entry = area->custom.frags; entry != NULL; entry = FRAG_NEXT(entry)) { /* All entries and fragments should be from this area. */ ASSERT(area_contains_frag_pc(area, entry)); if (FRAG_MULTI(entry)) { fragment_t *f = FRAG_FRAG(entry); /* Ideally we'd take FRAG_ALSO(f) to start also iteration, but that * pointer isn't valid during bb building. */ fragment_t *also = FRAG_ALSO(entry); ASSERT(f != FRAG_NEXT(entry)); /* Iterate the also list. All elements should be outside the * current area, or they should be the multi_entry_t that we're * currently looking at. */ while (also != NULL) { ASSERT(FRAG_MULTI(also)); ASSERT(also == entry || !area_contains_frag_pc(area, also)); also = FRAG_ALSO(also); } /* This is a multi area entry, so the real fragment shouldn't start * in this area and therefore shouldn't be on this list. */ ASSERT(FRAG_MULTI_INIT(entry) || !(f->tag >= area->start && f->tag < area->end)); } } } #endif /* DEBUG */ /* Removes redundant also entries in area's frags list * (viz., those also entries that are now in same area as frag) * Meant to be called after merging areas */ static void vm_area_clean_fraglist(dcontext_t *dcontext, vm_area_t *area) { fragment_t *entry, *next, *f; fragment_t *also, *also_prev, *also_next; LOG(THREAD, LOG_VMAREAS, 4, "vm_area_clean_fraglist for "PFX"-"PFX"\n", area->start, area->end); DOLOG(6, LOG_VMAREAS, { print_fraglist(dcontext, area, "before cleaning "); }); /* FIXME: would like to assert we hold write lock but only have area ptr */ for (entry = area->custom.frags; entry != NULL; entry = next) { next = FRAG_NEXT(entry); /* might delete entry */ /* Strategy: look at each multi, see if its fragment_t is here or if the next * multi in also chain is here. * This cleaning doesn't happen very often so this shouldn't be perf critical. */ if (FRAG_MULTI(entry)) { f = FRAG_FRAG(entry); ASSERT(f != next); /* Remove later also entries first */ also = FRAG_ALSO(entry); also_prev = entry; while (also != NULL) { app_pc pc = FRAG_PC(also); also_next = FRAG_ALSO(also); if (pc >= area->start && pc < area->end) { ASSERT(FRAG_FRAG(also) == f); DOLOG(5, LOG_VMAREAS, { print_entry(dcontext, also, "\tremoving "); }); /* we have to remove from also chain ourselves */ FRAG_ALSO_ASSIGN(also_prev, also_next); /* now remove from area frags list */ remove_fraglist_entry(dcontext, also, area); } else also_prev = also; also = also_next; } /* fragment_t itself is always in area of its tag */ if (!FRAG_MULTI_INIT(entry) && f->tag >= area->start && f->tag < area->end) { /* Remove this multi entry */ DOLOG(5, LOG_VMAREAS, { print_entry(dcontext, entry, "\tremoving "); }); /* we have to remove from also chain ourselves */ for (also_prev = f; FRAG_ALSO(also_prev) != entry; also_prev = FRAG_ALSO(also_prev)) ; FRAG_ALSO_ASSIGN(also_prev, FRAG_ALSO(entry)); /* now remove from area frags list */ remove_fraglist_entry(dcontext, entry, area); } } } DOCHECK(CHKLVL_DEFAULT, { vm_area_check_clean_fraglist(area); }); DOLOG(6, LOG_VMAREAS, { print_fraglist(dcontext, area, "after cleaning "); }); } void vm_area_remove_fragment(dcontext_t *dcontext, fragment_t *f) { fragment_t *entry, *next, *match; /* must grab lock across whole thing since alsos can be changed * by vm_area_clean_fraglist() */ vm_area_vector_t *vector = &(GET_DATA(dcontext, f->flags))->areas; bool multi = FRAG_MULTI(f); bool lock = writelock_if_not_already(vector); if (!multi) { LOG(THREAD, LOG_VMAREAS, 4, "vm_area_remove_fragment: F%d tag="PFX"\n", f->id, f->tag); match = f; } else { /* we do get called for multi-entries from vm_area_destroy_list */ LOG(THREAD, LOG_VMAREAS, 4, "vm_area_remove_fragment: entry "PFX"\n", f); match = FRAG_FRAG(f); } ASSERT(FRAG_PREV(f) != NULL); /* prev wraps around, should never be null */ entry = f; while (entry != NULL) { DOLOG(5, LOG_VMAREAS, { print_entry(dcontext, entry, "\tremoving "); }); /* from vm_area_destroy_list we can end up deleting a multi-init */ ASSERT(FRAG_FRAG(entry) == match); next = FRAG_ALSO(entry); remove_fraglist_entry(dcontext, entry, NULL); entry = next; } if (!multi) /* else f may have been freed */ FRAG_ALSO_ASSIGN(f, NULL); DOLOG(7, LOG_VMAREAS, { print_fraglists(dcontext); }); /* f may no longer exist if it is FRAG_MULTI */ if (lock) SHARED_VECTOR_RWLOCK(vector, write, unlock); } /* adds the fragment list chained by next_vmarea starting at f to a new * pending deletion entry */ static void add_to_pending_list(dcontext_t *dcontext, fragment_t *f, uint refcount, uint flushtime _IF_DEBUG(app_pc start) _IF_DEBUG(app_pc end)) { pending_delete_t *pend; ASSERT_OWN_MUTEX(true, &shared_delete_lock); pend = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, pending_delete_t, ACCT_VMAREAS, PROTECTED); DODEBUG({ pend->start = start; pend->end = end; }); pend->frags = f; if (DYNAMO_OPTION(shared_deletion)) { /* Set up ref count and timestamp for delayed deletion */ pend->ref_count = refcount; pend->flushtime_deleted = flushtime; LOG(GLOBAL, LOG_VMAREAS, 2, "deleted area ref count=%d timestamp=%u start="PFX" end="PFX"\n", pend->ref_count, pend->flushtime_deleted, start, end); } /* add to front of list */ pend->next = todelete->shared_delete; todelete->shared_delete = pend; todelete->shared_delete_count++; if (pend->next == NULL) { ASSERT(todelete->shared_delete_tail == NULL); todelete->shared_delete_tail = pend; } if (DYNAMO_OPTION(reset_every_nth_pending) > 0 && DYNAMO_OPTION(reset_every_nth_pending) == todelete->shared_delete_count) { /* if too many pending entries are piling up, suspend all threads * in order to free them immediately. * we can get here multiple times before we actually do the reset * (can dec and then re-inc shared_delete_count), * but that's not a problem, except we have to move our stats inc * into the reset routine itself. */ schedule_reset(RESET_PENDING_DELETION/*NYI: currently this is ignored and we * do a full reset*/); } STATS_INC(num_shared_flush_regions); LOG(GLOBAL, LOG_VMAREAS, 3, "Pending list after adding deleted vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_pending_list(GLOBAL); }); } #if defined(DEBUG) && defined(INTERNAL) static void print_lazy_deletion_list(dcontext_t *dcontext, const char *msg) { uint i = 0; fragment_t *f; ASSERT_OWN_MUTEX(true, &lazy_delete_lock); LOG(THREAD, LOG_VMAREAS, 1, "%s", msg); for (f = todelete->lazy_delete_list; f != NULL; f = f->next_vmarea) { LOG(THREAD, LOG_VMAREAS, 1, "\t%d: F%d ("PFX")\n", i, f->id, f->tag); i++; } } #endif #ifdef DEBUG static void check_lazy_deletion_list_consistency() { uint i =0; fragment_t *f; ASSERT_OWN_MUTEX(true, &lazy_delete_lock); for (f = todelete->lazy_delete_list; f != NULL; f = f->next_vmarea) { i++; } ASSERT(i == todelete->lazy_delete_count); } #endif bool remove_from_lazy_deletion_list(dcontext_t *dcontext, fragment_t *remove) { fragment_t *f, *prev_f = NULL; mutex_lock(&lazy_delete_lock); /* FIXME: start using prev_vmarea?!? (case 7165) */ for (f = todelete->lazy_delete_list; f != NULL; prev_f = f, f = f->next_vmarea) { if (f == remove) { if (prev_f == NULL) todelete->lazy_delete_list = f->next_vmarea; else prev_f->next_vmarea = f->next_vmarea; if (f == todelete->lazy_delete_tail) todelete->lazy_delete_tail = prev_f; todelete->lazy_delete_count--; mutex_unlock(&lazy_delete_lock); return true; } } mutex_unlock(&lazy_delete_lock); return false; } /* Moves all lazy list entries into a real pending deletion entry. * Can only be called when !couldbelinking. */ static void move_lazy_list_to_pending_delete(dcontext_t *dcontext) { ASSERT_OWN_NO_LOCKS(); ASSERT(is_self_couldbelinking()); /* to properly set up ref count we MUST get a flushtime synched with a * thread count (otherwise we may have too many threads decrementing * the ref count, or vice versa, causing either premature or * never-occurring freeing), so we must grab thread_initexit_lock, * meaning we must be nolinking, meaning the caller must accept loss * of locals. * FIXME: should switch to a flag-triggered addition in dispatch * to avoid this nolinking trouble. */ enter_nolinking(dcontext, NULL, false/*not a cache transition*/); mutex_lock(&thread_initexit_lock); /* to ensure no deletion queue checks happen in the middle of our update */ mutex_lock(&shared_cache_flush_lock); mutex_lock(&shared_delete_lock); mutex_lock(&lazy_delete_lock); if (todelete->move_pending) { /* it's possible for remove_from_lazy_deletion_list to drop the count */ #ifdef X86 DODEBUG({ fragment_t *f; /* Raise SIGILL if a deleted fragment gets executed again */ for (f = todelete->lazy_delete_list; f != NULL; f = f->next_vmarea) { *(ushort *) f->start_pc = RAW_OPCODE_SIGILL; } }); #endif DODEBUG({ if (todelete->lazy_delete_count <= DYNAMO_OPTION(lazy_deletion_max_pending)) { SYSLOG_INTERNAL_WARNING_ONCE("lazy_delete_count dropped below " "threshold before move to pending"); } }); LOG(THREAD, LOG_VMAREAS, 3, "moving lazy list to a pending deletion entry\n"); STATS_INC(num_lazy_del_to_pending); STATS_ADD(num_lazy_del_frags_to_pending, todelete->lazy_delete_count); /* ensure all threads in ref count will actually check the queue */ increment_global_flushtime(); add_to_pending_list(dcontext, todelete->lazy_delete_list, /* we do count this thread, as we aren't checking the * pending list here or inc-ing our flushtime */ get_num_threads(), flushtime_global _IF_DEBUG(NULL) _IF_DEBUG(NULL)); todelete->lazy_delete_list = NULL; todelete->lazy_delete_tail = NULL; todelete->lazy_delete_count = 0; todelete->move_pending = false; } else /* should not happen */ ASSERT(false && "race in move_lazy_list_to_pending_delete"); DODEBUG({ check_lazy_deletion_list_consistency(); }); mutex_unlock(&lazy_delete_lock); mutex_unlock(&shared_delete_lock); mutex_unlock(&shared_cache_flush_lock); mutex_unlock(&thread_initexit_lock); enter_couldbelinking(dcontext, NULL, false/*not a cache transition*/); } /* adds the list of fragments beginning with f and chained by {next,prev}_vmarea * to a new pending-lazy-deletion entry. * This routine may become nolinking, meaning that fragments may be freed * before this routine returns, so the caller should invalidate all pointers. * It also means that no locks may be held by the caller! */ void add_to_lazy_deletion_list(dcontext_t *dcontext, fragment_t *f) { /* rather than allocate memory for a pending operation to save memory, * we re-use f->incoming_stubs's slot (via a union), which is no longer needed * (caller should have already called incoming_remove_fragment()), to store our * timestamp, and next_vmarea to chain. */ fragment_t *tail, *prev = NULL; uint flushtime; bool perform_move = false; ASSERT_OWN_NO_LOCKS(); ASSERT(is_self_couldbelinking()); mutex_lock(&shared_cache_flush_lock); /* for consistent flushtime */ mutex_lock(&lazy_delete_lock); /* We need a flushtime as we are compared to shared deletion pending * entries, but we don't need to inc flushtime_global. We need a value * larger than any thread has already signed off on, and thus larger than * the current flushtime_global. We hold shared_cache_flush_lock to ensure * our flushtime retains that property until the lazy list is updated. * * (Optimization to allow lazy adds to proceed concurrently with deletion * list checks: don't grab the shared_cache_flush_lock. Since we're * couldbelinking, the flusher won't inc flushtime until we're done here, * and the lazy lock prevents other lazy adders from incing flushtime global * for a shift to pending deletion list (in code below). Then non-flusher * must hold lazy lock in general to inc flushtime.). */ ASSERT(flushtime_global < UINT_MAX); /* currently we reset if flushtime hits a threshold -- in which case we * may never reach this flushtime, but the reset if we hit threshold again, * moving lazy entries to pending delete (below), and -reset_every_nth_pending * combined should ensure we delete these fragments */ flushtime = flushtime_global + 1; /* we support adding a string of fragments at once * FIXME: if a string is common, move to a data structure w/ a * single timestamp for a group of fragments -- though lazy_deletion_max_pending * sort of does that for us. */ /* must append to keep the list reverse-sorted by flushtime */ if (todelete->lazy_delete_list == NULL) { ASSERT(todelete->lazy_delete_tail == NULL); todelete->lazy_delete_list = f; } else { ASSERT(todelete->lazy_delete_tail->next_vmarea == NULL); todelete->lazy_delete_tail->next_vmarea = f; } for (tail = f; tail != NULL; prev = tail, tail = tail->next_vmarea) { ASSERT(tail->also.also_vmarea == NULL); ASSERT(TEST(FRAG_SHARED, tail->flags)); tail->also.flushtime = flushtime; todelete->lazy_delete_count++; } todelete->lazy_delete_tail = prev; ASSERT(todelete->lazy_delete_tail != NULL); LOG(THREAD, LOG_VMAREAS, 3, "adding F%d to lazy deletion list @ timestamp %u\n", f->id, flushtime); STATS_INC(num_lazy_deletion_appends); DOLOG(5, LOG_VMAREAS, { print_lazy_deletion_list(dcontext, "Lazy deletion list after adding deleted fragment:\n"); }); DODEBUG({ check_lazy_deletion_list_consistency(); }); /* case 9115: ensure only one thread calls move_lazy_list_to_pending_delete, * to reduce thread_initexit_lock contention and subsequent synch_with_all_threads * performance issues */ if (!todelete->move_pending && todelete->lazy_delete_count > DYNAMO_OPTION(lazy_deletion_max_pending)) { perform_move = true; todelete->move_pending = true; } mutex_unlock(&lazy_delete_lock); mutex_unlock(&shared_cache_flush_lock); if (perform_move) { /* hit threshold -- move to real pending deletion entry */ /* had to release lazy_delete_lock and re-grab for proper rank order */ move_lazy_list_to_pending_delete(dcontext); } } /* frees all fragments on the lazy list with flushtimes less than flushtime */ static void check_lazy_deletion_list(dcontext_t *dcontext, uint flushtime) { fragment_t *f, *next_f; mutex_lock(&lazy_delete_lock); LOG(THREAD, LOG_VMAREAS, 3, "checking lazy list @ timestamp %u\n", flushtime); for (f = todelete->lazy_delete_list; f != NULL; f = next_f) { next_f = f->next_vmarea; /* may be freed so cache now */ LOG(THREAD, LOG_VMAREAS, 4, "\tf->id %u vs %u\n", f->id, f->also.flushtime, flushtime); if (f->also.flushtime <= flushtime) { /* it is safe to free! */ LOG(THREAD, LOG_VMAREAS, 3, "freeing F%d on lazy deletion list @ timestamp %u\n", f->id, flushtime); DOSTATS({ if (dcontext == GLOBAL_DCONTEXT) /* at exit */ STATS_INC(num_lazy_deletion_frees_atexit); else STATS_INC(num_lazy_deletion_frees); }); /* FIXME: separate stats for frees at exit time */ ASSERT(TEST(FRAG_SHARED, f->flags)); /* we assume we're freeing the entire head of the list */ todelete->lazy_delete_count--; todelete->lazy_delete_list = next_f; if (f == todelete->lazy_delete_tail) { ASSERT(todelete->lazy_delete_list == NULL); todelete->lazy_delete_tail = NULL; } #ifdef X86 DODEBUG({ /* Raise SIGILL if a deleted fragment gets executed again */ *(ushort *) f->start_pc = RAW_OPCODE_SIGILL; }); #endif fragment_delete(dcontext, f, FRAGDEL_NO_OUTPUT | FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE | FRAGDEL_NO_VMAREA); } else { /* the lazy list is appended to and thus reverse-sorted, so * we can stop now as the oldest items are at the front */ break; } } DOLOG(5, LOG_VMAREAS, { print_lazy_deletion_list(dcontext, "Lazy deletion list after freeing fragments:\n"); }); DODEBUG({ check_lazy_deletion_list_consistency(); }); mutex_unlock(&lazy_delete_lock); } /* Prepares a list of shared fragments for deletion.. * Caller should have already called vm_area_remove_fragment() on * each and chained them together via next_vmarea. * Caller must hold the shared_cache_flush_lock. * Returns the number of fragments unlinked */ int unlink_fragments_for_deletion(dcontext_t *dcontext, fragment_t *list, int pending_delete_threads) { fragment_t *f, *next; uint num = 0; /* only applies to lists of shared fragments -- we check the head now */ ASSERT(TEST(FRAG_SHARED, list->flags)); /* for shared_deletion we have to protect this whole walk w/ a lock so * that the flushtime_global value remains higher than any thread's * flushtime. */ ASSERT_OWN_MUTEX(DYNAMO_OPTION(shared_deletion), &shared_cache_flush_lock); acquire_recursive_lock(&change_linking_lock); for (f = list; f != NULL; f = next) { ASSERT(!FRAG_MULTI(f)); next = f->next_vmarea; if (SHARED_IB_TARGETS()) { /* Invalidate shared targets from all threads' ibl tables * (if private) or from shared ibl tables. Right now this * routine is only called mid-flush so it's safe to do * this here. */ flush_invalidate_ibl_shared_target(dcontext, f); } fragment_unlink_for_deletion(dcontext, f); num++; } release_recursive_lock(&change_linking_lock); mutex_lock(&shared_delete_lock); /* add area's fragments as a new entry in the pending deletion list */ add_to_pending_list(dcontext, list, pending_delete_threads, flushtime_global _IF_DEBUG(NULL) _IF_DEBUG(NULL)); mutex_unlock(&shared_delete_lock); STATS_ADD(list_entries_unlinked_for_deletion, num); return num; } /* returns the number of fragments unlinked */ int vm_area_unlink_fragments(dcontext_t *dcontext, app_pc start, app_pc end, int pending_delete_threads _IF_DGCDIAG(app_pc written_pc)) { /* dcontext is for another thread, so don't use THREAD to log. Cache the * logfile instead of repeatedly calling THREAD_GET. */ LOG_DECLARE(file_t thread_log = get_thread_private_logfile();) thread_data_t *data = GET_DATA(dcontext, 0); fragment_t *entry, *next; int num = 0, i; if (data == shared_data) { /* we also need to add to the deletion list */ mutex_lock(&shared_delete_lock); acquire_recursive_lock(&change_linking_lock); /* we do not need the bb building lock, only the vm lock and * the fragment hashtable write lock, which is grabbed by fragment_remove */ SHARED_VECTOR_RWLOCK(&data->areas, write, lock); /* clear shared last_area now, don't want a new bb in flushed area * thought to be ok b/c of a last_area hit */ shared_data->last_area = NULL; /* for shared_deletion we have to protect this whole walk w/ a lock so * that the flushtime_global value remains higher than any thread's * flushtime. */ ASSERT_OWN_MUTEX(DYNAMO_OPTION(shared_deletion), &shared_cache_flush_lock); } LOG(thread_log, LOG_FRAGMENT|LOG_VMAREAS, 2, "vm_area_unlink_fragments "PFX".."PFX"\n", start, end); /* walk backwards to avoid O(n^2) * FIXME case 9819: could use executable_area_overlap_bounds() to avoid linear walk */ for (i = data->areas.length - 1; i >= 0; i--) { /* look for overlap */ if (start < data->areas.buf[i].end && end > data->areas.buf[i].start) { LOG(thread_log, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tmarking region "PFX".."PFX" for deletion & unlinking all its frags\n", data->areas.buf[i].start, data->areas.buf[i].end); data->areas.buf[i].vm_flags |= VM_DELETE_ME; if (data->areas.buf[i].start < start || data->areas.buf[i].end > end) { /* FIXME: best to only delete within asked-for flush area * however, checking every fragment's bounds is way too expensive * (surprisingly). we've gone through several different schemes, * including keeping a min_page and max_page in fragment_t, or * various multi-page flags, to make checking every fragment faster, * but keeping vm area lists is the most efficient. * HOWEVER, deleting outside the flush bounds can cause problems * if the caller holds fragment_t pointers and expects them not * to be flushed (e.g., a faulting write on a read-only code region). */ LOG(thread_log, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tWARNING: region "PFX".."PFX" is larger than " "flush area "PFX".."PFX"\n", data->areas.buf[i].start, data->areas.buf[i].end, start, end); } /* i#942: We can't flush a fragment list with multiple also entries * from the same fragment on it, or our iteration gets derailed. */ DOCHECK(CHKLVL_DEFAULT, { vm_area_check_clean_fraglist(&data->areas.buf[i]); }); ASSERT(!TEST(FRAG_COARSE_GRAIN, data->areas.buf[i].frag_flags)); for (entry = data->areas.buf[i].custom.frags; entry != NULL; entry = next) { fragment_t *f = FRAG_FRAG(entry); next = FRAG_NEXT(entry); ASSERT(f != next && "i#942: changing f's fraglist derails iteration"); /* case 9381: this shouldn't happen but we handle it to avoid crash */ if (FRAG_MULTI_INIT(entry)) { ASSERT(false && "stale multi-init entry on frags list"); /* stale init entry, just remove it */ vm_area_remove_fragment(dcontext, entry); continue; } /* case 9118: call fragment_unlink_for_deletion() even if fragment * is already unlinked */ if (!TEST(FRAG_WAS_DELETED, f->flags) || data == shared_data) { LOG(thread_log, LOG_FRAGMENT|LOG_VMAREAS, 5, "\tunlinking "PFX"%s F%d("PFX")\n", entry, FRAG_MULTI(entry) ? " multi": "", FRAG_ID(entry), FRAG_PC(entry)); /* need to remove also entries from other vm lists * thread-private doesn't have to do this b/c only unlinking, * so ok if encounter an also in same flush, except we * now do incoming_remove_fragment() for thread-private for * use of fragment_t.incoming_stubs as a union. so we * do this for all fragments. */ if (FRAG_ALSO(entry) != NULL || FRAG_MULTI(entry)) { if (FRAG_MULTI(entry)) { vm_area_remove_fragment(dcontext, f); /* move to this area's frags list so will get * transferred to deletion list if shared, or * freed from this marked-vmarea if private */ prepend_entry_to_fraglist(&data->areas.buf[i], f); } else { /* entry is the fragment, remove all its alsos */ vm_area_remove_fragment(dcontext, FRAG_ALSO(entry)); } FRAG_ALSO_ASSIGN(f, NULL); } if (data == shared_data && SHARED_IB_TARGETS()) { /* Invalidate shared targets from all threads' ibl * tables (if private) or from shared ibl tables */ flush_invalidate_ibl_shared_target(dcontext, f); } fragment_unlink_for_deletion(dcontext, f); #ifdef DGC_DIAGNOSTICS /* try to find out exactly which fragment contained written_pc */ if (written_pc != NULL) { app_pc bb; DOLOG(2, LOG_VMAREAS, { LOG(thread_log, LOG_VMAREAS, 1, "Flushing F%d "PFX":\n", FRAG_ID(entry), FRAG_PC(entry)); disassemble_fragment(dcontext, entry, false); LOG(thread_log, LOG_VMAREAS, 1, "First app bb for frag:\n"); disassemble_app_bb(dcontext, FRAG_PC(entry), thread_log); }); if (fragment_overlaps(dcontext, entry, written_pc, written_pc+1, false, NULL, &bb)) { LOG(thread_log, LOG_VMAREAS, 1, "Write target is actually inside app bb @"PFX":\n", written_pc); disassemble_app_bb(dcontext, bb, thread_log); } } #endif num++; } else { LOG(thread_log, LOG_FRAGMENT|LOG_VMAREAS, 5, "\tnot unlinking "PFX"%s F%d("PFX") (already unlinked)\n", entry, FRAG_MULTI(entry) ? " multi": "", FRAG_ID(entry), FRAG_PC(entry)); } /* let recreate_fragment_ilist() know that this fragment * is pending deletion and might no longer match the app's * state. note that if we called fragment_unlink_for_deletion() * then we already set this flag above. */ f->flags |= FRAG_WAS_DELETED; } DOLOG(6, LOG_VMAREAS, { print_fraglist(dcontext, &data->areas.buf[i], "Fragments after unlinking\n"); }); if (data == shared_data) { if (data->areas.buf[i].custom.frags != NULL) { /* add area's fragments as a new entry in the pending deletion list */ add_to_pending_list(dcontext, data->areas.buf[i].custom.frags, pending_delete_threads, flushtime_global _IF_DEBUG(data->areas.buf[i].start) _IF_DEBUG(data->areas.buf[i].end)); /* frags are moved over completely */ data->areas.buf[i].custom.frags = NULL; STATS_INC(num_shared_flush_regions); } /* ASSUMPTION: remove_vm_area, given exact bounds, simply shifts later * areas down in vector! */ LOG(thread_log, LOG_VMAREAS, 3, "Before removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(&data->areas, thread_log); }); LOG(thread_log, LOG_VMAREAS, 2, "Removing shared vm area "PFX"-"PFX"\n", data->areas.buf[i].start, data->areas.buf[i].end); remove_vm_area(&data->areas, data->areas.buf[i].start, data->areas.buf[i].end, false); LOG(thread_log, LOG_VMAREAS, 3, "After removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(&data->areas, thread_log); }); } } } if (data == shared_data) { SHARED_VECTOR_RWLOCK(&data->areas, write, unlock); release_recursive_lock(&change_linking_lock); mutex_unlock(&shared_delete_lock); } LOG(thread_log, LOG_FRAGMENT|LOG_VMAREAS, 2, " Unlinked %d frags\n", num); return num; } /* removes incoming links for all private fragments in the dcontext * thread that contain 'pc' */ void vm_area_unlink_incoming(dcontext_t *dcontext, app_pc pc) { int i; thread_data_t *data; ASSERT(dcontext != GLOBAL_DCONTEXT); data = GET_DATA(dcontext, 0); for (i = data->areas.length - 1; i >= 0; i--) { if (pc >= data->areas.buf[i].start && pc < data->areas.buf[i].end) { fragment_t *entry; for (entry = data->areas.buf[i].custom.frags; entry != NULL; entry = FRAG_NEXT(entry)) { fragment_t *f = FRAG_FRAG(entry); ASSERT(!TEST(FRAG_SHARED, f->flags)); /* Note that we aren't unlinking or ibl-invalidating * (i.e., making unreachable) any fragments in other * threads containing pc. */ if ((f->flags & FRAG_LINKED_INCOMING) != 0) unlink_fragment_incoming(dcontext, f); fragment_remove_from_ibt_tables(dcontext, f, false); } } } } /* Decrements ref counts for thread-shared pending-deletion fragments, * and deletes those whose count has reached 0. * If dcontext==GLOBAL_DCONTEXT, does NOT check the ref counts and assumes it's * safe to free EVERYTHING. * Returns false iff was_I_flushed has been flushed (not necessarily * fully freed yet though, but may be at any time after this call * returns, so caller should drop its ref to it). */ bool vm_area_check_shared_pending(dcontext_t *dcontext, fragment_t *was_I_flushed) { pending_delete_t *pend; pending_delete_t *pend_prev = NULL; pending_delete_t *pend_nxt; /* a local list used to arrange in reverse order of flushtime */ pending_delete_t *tofree = NULL; fragment_t *entry, *next; int num = 0; DEBUG_DECLARE(int i = 0;) bool not_flushed = true; ASSERT(DYNAMO_OPTION(shared_deletion) || dynamo_exited); /* must pass in real dcontext, unless exiting or resetting */ ASSERT(dcontext != GLOBAL_DCONTEXT || dynamo_exited || dynamo_resetting); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "thread "TIDFMT" (flushtime %d) walking pending deletion list (was_I_flushed==F%d)\n", get_thread_id(), dcontext == GLOBAL_DCONTEXT ? flushtime_global : get_flushtime_last_update(dcontext), (was_I_flushed==NULL) ? -1 : was_I_flushed->id); STATS_INC(num_shared_flush_walks); /* synch w/ anyone incrementing flushtime_global and using its * value when adding to the shared deletion list (currently flushers * and lazy list transfers). */ mutex_lock(&shared_cache_flush_lock); /* check if was_I_flushed has been flushed, prior to dec ref count and * allowing anyone to be fully freed */ if (was_I_flushed != NULL && TESTALL(FRAG_SHARED|FRAG_WAS_DELETED, was_I_flushed->flags)) { not_flushed = false; if (was_I_flushed == dcontext->last_fragment) last_exit_deleted(dcontext); } /* we can hit check points before we re-enter the cache, so we cannot * rely on the enter_couldbelinking of exiting the cache for invalidating * last_fragment -- we must check here as well (case 7453) (and case 7666, * where a non-null was_I_flushed prevented this check from executing). */ if (dcontext != GLOBAL_DCONTEXT && dcontext->last_fragment != NULL && TESTALL(FRAG_SHARED|FRAG_WAS_DELETED, dcontext->last_fragment->flags)) { last_exit_deleted(dcontext); } mutex_lock(&shared_delete_lock); for (pend = todelete->shared_delete; pend != NULL; pend = pend_nxt) { bool delete_area = false; pend_nxt = pend->next; LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, " Considering #%d: "PFX".."PFX" flushtime %d\n", i, pend->start, pend->end, pend->flushtime_deleted); if (dcontext == GLOBAL_DCONTEXT) { /* indication that it's safe to free everything */ delete_area = true; if (dynamo_exited) STATS_INC(num_shared_flush_atexit); else STATS_INC(num_shared_flush_atreset); } else if (get_flushtime_last_update(dcontext) < pend->flushtime_deleted) { ASSERT(pend->ref_count > 0); pend->ref_count--; STATS_INC(num_shared_flush_refdec); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tdec => ref_count is now %d, flushtime diff is %d\n", pend->ref_count, flushtime_global - pend->flushtime_deleted); delete_area = (pend->ref_count == 0); DODEBUG({ if (INTERNAL_OPTION(detect_dangling_fcache) && delete_area) { /* don't actually free fragments until exit so we can catch any * lingering links or ibt entries */ delete_area = false; for (entry = pend->frags; entry != NULL; entry = FRAG_NEXT(entry)) { /* we do have to notify caller of flushing held ptrs */ if (FRAG_FRAG(entry) == was_I_flushed) ASSERT(!not_flushed); /* should have been caught up top */ /* catch any links or ibt entries allowing access to deleted * fragments by filling w/ int3 instead of reusing the cache * space. this will show up as a pc translation assert, * typically. */ /* should only get fragment_t here */ ASSERT(!FRAG_MULTI(entry)); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 4, "\tfilling F%d "PFX"-"PFX" with 0x%x\n", entry->id, entry->start_pc, entry->start_pc+entry->size, DEBUGGER_INTERRUPT_BYTE); memset(entry->start_pc, DEBUGGER_INTERRUPT_BYTE, entry->size); } } }); DOSTATS({ if (delete_area) STATS_INC(num_shared_flush_refzero); }); } else { /* optimization: since we always pre-pend, can skip all the rest, as * they are guaranteed to have been ok-ed by us already */ LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\t(aborting now since rest have already been ok-ed)\n"); break; } if (delete_area) { /* we want to delete in increasing order of flushtime so that * fcache unit flushing will not occur before all lazily deleted * fragments in a unit are freed */ if (pend_prev == NULL) todelete->shared_delete = pend->next; else pend_prev->next = pend->next; if (pend == todelete->shared_delete_tail) { ASSERT(pend->next == NULL); todelete->shared_delete_tail = pend_prev; } pend->next = tofree; tofree = pend; } else pend_prev = pend; DODEBUG({ i++; }); } for (pend = tofree; pend != NULL; pend = pend_nxt) { pend_nxt = pend->next; /* we now know that any objects unlinked at or before this entry's * timestamp are safe to be freed (although not all earlier objects have * yet been freed, so containers cannot necessarily be freed: case 8242). * free these before this entry's fragments as they are older * (fcache unit flushing relies on this order). */ check_lazy_deletion_list(dcontext, pend->flushtime_deleted); STATS_TRACK_MAX(num_shared_flush_maxdiff, flushtime_global - pend->flushtime_deleted); DOSTATS({ /* metric: # times flushtime diff is > #threads */ if (flushtime_global - pend->flushtime_deleted > (uint) get_num_threads()) STATS_INC(num_shared_flush_diffthreads); }); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tdeleting all fragments in region "PFX".."PFX" flushtime %u\n", pend->start, pend->end, pend->flushtime_deleted); ASSERT(pend->frags != NULL); for (entry = pend->frags; entry != NULL; entry = next) { next = FRAG_NEXT(entry); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 5, "\tremoving "PFX"%s F%d("PFX")\n", entry, FRAG_MULTI(entry) ? " multi": "", FRAG_ID(entry), FRAG_PC(entry)); if (FRAG_FRAG(entry) == was_I_flushed) ASSERT(!not_flushed); /* should have been caught up top */ /* vm_area_unlink_fragments should have removed all multis/alsos */ ASSERT(!FRAG_MULTI(entry)); /* FRAG_ALSO is used by lazy list so it may not be NULL */ ASSERT(TEST(FRAG_WAS_DELETED, FRAG_FRAG(entry)->flags)); /* do NOT call vm_area_remove_fragment, as it will freak out trying * to look up the area this fragment is in */ fragment_delete(dcontext, FRAG_FRAG(entry), FRAGDEL_NO_OUTPUT | FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE | FRAGDEL_NO_VMAREA); STATS_INC(num_fragments_deleted_consistency); num++; } ASSERT(todelete->shared_delete_count > 0); todelete->shared_delete_count--; HEAP_TYPE_FREE(GLOBAL_DCONTEXT, pend, pending_delete_t, ACCT_VMAREAS, PROTECTED); } if (tofree != NULL) { /* if we freed something (careful: tofree is dangling) */ /* case 8242: due to -syscalls_synch_flush, a later entry can * reach refcount 0 before an earlier entry, so we cannot free * units until all earlier entries have been freed. */ if (todelete->shared_delete_tail == NULL) fcache_free_pending_units(dcontext, flushtime_global); else { fcache_free_pending_units(dcontext, todelete->shared_delete_tail->flushtime_deleted - 1); } } if (dcontext == GLOBAL_DCONTEXT) { /* need to free everything */ check_lazy_deletion_list(dcontext, flushtime_global+1); fcache_free_pending_units(dcontext, flushtime_global+1); /* reset_every_nth_pending relies on this */ ASSERT(todelete->shared_delete_count == 0); } mutex_unlock(&shared_delete_lock); STATS_TRACK_MAX(num_shared_flush_maxpending, i); /* last_area cleared in vm_area_unlink_fragments */ LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "thread "TIDFMT" done walking pending list @flushtime %d\n", get_thread_id(), flushtime_global); if (dcontext != GLOBAL_DCONTEXT) { /* update thread timestamp */ set_flushtime_last_update(dcontext, flushtime_global); } mutex_unlock(&shared_cache_flush_lock); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, " Flushed %d frags\n", num); return not_flushed; } /* Deletes all pending-delete thread-private vm areas belonging to dcontext. * Returns false iff was_I_flushed ends up being deleted. */ bool vm_area_flush_fragments(dcontext_t *dcontext, fragment_t *was_I_flushed) { thread_data_t *data = GET_DATA(dcontext, 0); vm_area_vector_t *v = &data->areas; fragment_t *entry, *next; int i, num = 0; bool not_flushed = true; /* should call vm_area_check_shared_pending for shared flushing */ ASSERT(data != shared_data); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "vm_area_flush_fragments\n"); /* walk backwards to avoid O(n^2) */ for (i = v->length - 1; i >= 0; i--) { LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, " Considering %d == "PFX".."PFX"\n", i, v->buf[i].start, v->buf[i].end); if (TEST(VM_DELETE_ME, v->buf[i].vm_flags)) { LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tdeleting all fragments in region "PFX".."PFX"\n", v->buf[i].start, v->buf[i].end); for (entry = v->buf[i].custom.frags; entry != NULL; entry = next) { next = FRAG_NEXT(entry); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 5, "\tremoving "PFX"%s F%d("PFX")\n", entry, FRAG_MULTI(entry) ? " multi": "", FRAG_ID(entry), FRAG_PC(entry)); if (FRAG_FRAG(entry) == was_I_flushed) { not_flushed = false; if (was_I_flushed == dcontext->last_fragment) last_exit_deleted(dcontext); } ASSERT(TEST(FRAG_WAS_DELETED, FRAG_FRAG(entry)->flags)); ASSERT(FRAG_ALSO_DEL_OK(entry) == NULL); fragment_delete(dcontext, FRAG_FRAG(entry), /* We used to leave link, vmarea, and htable removal * until here for private fragments, but for case * 3559 we wanted link removal at unlink time, and * the 3 of them must go together, so we now do all 3 * at unlink time just like for shared fragments. */ FRAGDEL_NO_OUTPUT | FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE | FRAGDEL_NO_VMAREA); STATS_INC(num_fragments_deleted_consistency); num++; } v->buf[i].custom.frags = NULL; /* could just remove flush region...but we flushed entire vm region * ASSUMPTION: remove_vm_area, given exact bounds, simply shifts later * areas down in vector! */ LOG(THREAD, LOG_VMAREAS, 3, "Before removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(v, THREAD); }); remove_vm_area(v, v->buf[i].start, v->buf[i].end, false); LOG(THREAD, LOG_VMAREAS, 3, "After removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(v, THREAD); }); } } #ifdef WINDOWS /* The relink needs a real thread dcontext, so don't pass a GLOBAL_DCONTEXT * in. This can occur when flushing shared fragments. Functionally, this is * fine since only private fragments are routed thru shared syscall, and * flush requests for such fragments are provided with a real thread * context. */ if (DYNAMO_OPTION(shared_syscalls) && dcontext != GLOBAL_DCONTEXT && !IS_SHARED_SYSCALL_THREAD_SHARED) { /* re-link shared syscall */ link_shared_syscall(dcontext); } #endif /* i#849: re-link private xfer */ if (dcontext != GLOBAL_DCONTEXT && special_ibl_xfer_is_thread_private()) link_special_ibl_xfer(dcontext); data->last_area = NULL; DOSTATS({ if (num == 0) STATS_INC(num_flushq_actually_empty); }); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, " Flushed %d frags\n", num); DOLOG(7, LOG_VMAREAS, { SHARED_VECTOR_RWLOCK(&data->areas, read, lock); print_fraglists(dcontext); SHARED_VECTOR_RWLOCK(&data->areas, read, unlock); }); return not_flushed; } /* Flushes all units grouped with info. * Caller must hold change_linking_lock, read lock hotp_get_lock(), and * executable_areas lock. */ static void vm_area_flush_coarse_unit(dcontext_t *dcontext, coarse_info_t *info_in, vm_area_t *area, bool all_synched, bool entire) { coarse_info_t *info = info_in, *next_info; ASSERT(info != NULL); ASSERT_OWN_RECURSIVE_LOCK(true, &change_linking_lock); #ifdef HOT_PATCHING_INTERFACE ASSERT_OWN_READWRITE_LOCK(DYNAMO_OPTION(hot_patching), hotp_get_lock()); #endif ASSERT(READ_LOCK_HELD(&executable_areas->lock)); /* Need a real dcontext for persisting rac */ if (dcontext == GLOBAL_DCONTEXT) dcontext = get_thread_private_dcontext(); if (DYNAMO_OPTION(coarse_freeze_at_unload)) { /* we do not try to freeze if we've failed to suspend the world */ if (all_synched) { /* in-place builds a separate unit anyway so no savings that way */ vm_area_coarse_region_freeze(dcontext, info, area, false/*!in place*/); STATS_INC(persist_unload_try); } else { SYSLOG_INTERNAL_WARNING_ONCE("not freezing due to synch failure"); STATS_INC(persist_unload_suspend_failure); } } while (info != NULL) { /* loop over primary and secondary unit */ next_info = info->non_frozen; ASSERT(info->frozen || info->non_frozen == NULL); if (!entire && TEST(PERSCACHE_CODE_INVALID, info->flags)) { /* Do not reset yet as it may become valid again. * Assumption: if !entire, we will leave this info there. */ /* Should only mark invalid if no or empty secondary unit */ ASSERT(next_info == NULL || next_info->cache == NULL); break; } DOSTATS({ if (info->persisted) { STATS_INC(flush_persisted_units); if (os_module_get_flag(info->base_pc, MODULE_BEING_UNLOADED)) STATS_INC(flush_persisted_unload); } STATS_INC(flush_coarse_units); }); coarse_unit_reset_free(dcontext, info, false/*no locks*/, true/*unlink*/, true/*give up primary*/); /* We only want one non-frozen unit per region; we keep the 1st unit */ if (info != info_in) { coarse_unit_free(GLOBAL_DCONTEXT, info); info = NULL; } else coarse_unit_mark_in_use(info); /* still in-use if re-used */ /* The remaining info itself is freed from exec list in remove_vm_area, * though may remain if only part of this region is removed * and will be lazily re-initialized if we execute from there again. * FIXME: case 8640: better to remove it all here? */ info = next_info; ASSERT(info == NULL || !info->frozen); } } /* Assumes that all threads are suspended at safe synch points. * Flushes fragments in the region [start, end) in the vmarea * list for del_dcontext. * If dcontext == del_dcontext == GLOBAL_DCONTEXT, * removes shared fine fragments and coarse units in the region. * If dcontext == thread and del_dcontext == GLOBAL_DCONTEXT, * removes any ibl table entries for shared fragments in the region. * WARNING: this routine will not remove coarse ibl entries! * Else (both dcontexts are the local thread's), deletes private fragments * in the region. * FIXME: share code w/ vm_area_unlink_fragments() and vm_area_flush_fragments()! * all_synched is ignored unless dcontext == GLOBAL_DCONTEXT */ void vm_area_allsynch_flush_fragments(dcontext_t *dcontext, dcontext_t *del_dcontext, app_pc start, app_pc end, bool exec_invalid, bool all_synched) { thread_data_t *data = GET_DATA(del_dcontext, 0); vm_area_vector_t *v = &data->areas; fragment_t *entry, *next; int i; bool remove_shared_vm_area = true; DEBUG_DECLARE(int num_fine = 0;) DEBUG_DECLARE(int num_coarse = 0;) LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "vm_area_allsynch_flush_fragments "PFX" "PFX"\n", dcontext, del_dcontext); ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock)); ASSERT(is_self_allsynch_flushing()); /* change_linking_lock is higher ranked than shared_vm_areas lock and is * acquired for fragment_delete()'s unlinking as well as fcache removal to * add to free list, so we must grab it up front. * coarse_unit_persist and coarse_unit_freeze also require it to be held. */ acquire_recursive_lock(&change_linking_lock); if (dcontext == GLOBAL_DCONTEXT && del_dcontext == GLOBAL_DCONTEXT) { /* We can't add persisted units to shared vector at load time due to * lock rank orders, so we normally add on first access -- but we can * flush before any access, so we must walk exec areas here. * While we're at it we do our coarse unit freeing here, so don't have * to do lookups in exec areas while walking shared vmarea vector below. */ #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) read_lock(hotp_get_lock()); /* case 9970: rank hotp < exec_areas */ #endif read_lock(&executable_areas->lock); /* no need to write */ for (i = 0; i < executable_areas->length; i++) { if (TEST(FRAG_COARSE_GRAIN, executable_areas->buf[i].frag_flags) && start < executable_areas->buf[i].end && end > executable_areas->buf[i].start) { coarse_info_t *coarse = (coarse_info_t *) executable_areas->buf[i].custom.client; bool do_flush = (coarse != NULL); #ifdef HOT_PATCHING_INTERFACE /* Case 9995: do not flush for 1-byte (mostly hotp) regions that are * still valid execution regions and that are recorded as not being * present in persistent caches. */ if (do_flush && !exec_invalid && start + 1 == end && coarse->hotp_ppoint_vec != NULL) { app_pc modbase = get_module_base(coarse->base_pc); ASSERT(modbase <= start); /* Only persisted units store vec, though we could store for * frozen but not persisted if we had frequent nudges throwing * them out. */ ASSERT(coarse->persisted); if (hotp_ppoint_on_list((app_rva_t)(start - modbase), coarse->hotp_ppoint_vec, coarse->hotp_ppoint_vec_num)) { do_flush = false; STATS_INC(perscache_hotp_flush_avoided); remove_shared_vm_area = false; } } #endif if (do_flush) { vm_area_flush_coarse_unit(dcontext, coarse, &executable_areas->buf[i], all_synched, start <= executable_areas->buf[i].start && end >= executable_areas->buf[i].end); DODEBUG({ num_coarse++; }); if (TEST(VM_ADD_TO_SHARED_DATA, executable_areas->buf[i].vm_flags)) { LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tdeleting coarse unit not yet in shared vector " PFX".."PFX"\n", executable_areas->buf[i].start, executable_areas->buf[i].end); /* This flag is only relevant for persisted units, so we clear it * here since this same coarse_info_t may be re-used */ executable_areas->buf[i].vm_flags &= ~VM_ADD_TO_SHARED_DATA; } } } } read_unlock(&executable_areas->lock); #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) read_unlock(hotp_get_lock()); #endif } SHARED_VECTOR_RWLOCK(v, write, lock); /* walk backwards to avoid O(n^2) * FIXME case 9819: could use executable_area_overlap_bounds() to avoid linear walk */ for (i = v->length - 1; i >= 0; i--) { if (start < v->buf[i].end && end > v->buf[i].start) { if (v->buf[i].start < start || v->buf[i].end > end) { /* see comments in vm_area_unlink_fragments() */ LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tWARNING: region "PFX".."PFX" is larger than flush area" " "PFX".."PFX"\n", v->buf[i].start, v->buf[i].end, start, end); } LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tdeleting all fragments in region "PFX".."PFX"\n", v->buf[i].start, v->buf[i].end); /* We flush coarse units in executable_areas walk down below */ /* We can have fine fragments here as well */ if (v->buf[i].custom.frags != NULL) { for (entry = v->buf[i].custom.frags; entry != NULL; entry = next) { next = FRAG_NEXT(entry); if (dcontext == del_dcontext) { LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 5, "\tremoving "PFX"%s F%d("PFX")\n", entry, FRAG_MULTI(entry) ? " multi": "", FRAG_ID(entry), FRAG_PC(entry)); if (SHARED_IBT_TABLES_ENABLED()) { /* fragment_remove() won't remove from shared ibt tables, * b/c assuming we didn't do the synch for it, so we * have to explicitly remove */ fragment_remove_from_ibt_tables(dcontext, FRAG_FRAG(entry), true/*rm from shared*/); } fragment_delete(dcontext, FRAG_FRAG(entry), FRAGDEL_ALL); STATS_INC(num_fragments_deleted_consistency); DODEBUG({ num_fine++; }); } else { ASSERT(dcontext != GLOBAL_DCONTEXT && del_dcontext == GLOBAL_DCONTEXT); fragment_remove_from_ibt_tables(dcontext, FRAG_FRAG(entry), false/*shouldn't be in shared*/); } } if (dcontext == del_dcontext) v->buf[i].custom.frags = NULL; } if (dcontext == del_dcontext && remove_shared_vm_area) { /* could just remove flush region...but we flushed entire vm region * ASSUMPTION: remove_vm_area, given exact bounds, simply shifts later * areas down in vector! */ LOG(THREAD, LOG_VMAREAS, 3, "Before removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(v, THREAD); }); remove_vm_area(v, v->buf[i].start, v->buf[i].end, false); LOG(THREAD, LOG_VMAREAS, 3, "After removing vm area:\n"); DOLOG(3, LOG_VMAREAS, { print_vm_areas(v, THREAD); }); } else { ASSERT(dcontext != del_dcontext || /* should only not flush for special hotp case 9995 */ start + 1 == end); } } } if (dcontext == del_dcontext) data->last_area = NULL; SHARED_VECTOR_RWLOCK(v, write, unlock); release_recursive_lock(&change_linking_lock); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, " Flushed %d fine frags & %d coarse units\n", num_fine, num_coarse); DOLOG(7, LOG_VMAREAS, { SHARED_VECTOR_RWLOCK(v, read, lock); print_fraglists(dcontext); SHARED_VECTOR_RWLOCK(v, read, unlock); }); } /* Deletes all coarse units */ void vm_area_coarse_units_reset_free() { vm_area_vector_t *v = executable_areas; int i; ASSERT(DYNAMO_OPTION(coarse_units)); LOG(GLOBAL, LOG_FRAGMENT|LOG_VMAREAS, 2, "vm_area_coarse_units_reset_free\n"); ASSERT(dynamo_exited || dynamo_resetting); DOLOG(1, LOG_VMAREAS, { LOG(GLOBAL, LOG_VMAREAS, 1, "\nexecutable_areas before reset:\n"); print_executable_areas(GLOBAL); }); /* We would grab executable_areas_lock but coarse_unit_reset_free() grabs * change_linking_lock and coarse_info_lock, both of higher rank. We could * grab change_linking_lock first here and raise executable_areas_lock above * coarse_info_lock's rank, but executable_areas_lock can be acquired during * coarse_unit_unlink after special_heap_lock -- so the best solution is to * not grab executable_areas_lock here and rely on reset synch. */ for (i = 0; i < v->length; i++) { if (TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags)) { coarse_info_t *info_start = (coarse_info_t *) v->buf[i].custom.client; coarse_info_t *info = info_start, *next_info; ASSERT(info != NULL); while (info != NULL) { /* loop over primary and secondary unit */ next_info = info->non_frozen; ASSERT(info->frozen || info->non_frozen == NULL); LOG(GLOBAL, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tdeleting all fragments in region "PFX".."PFX"\n", v->buf[i].start, v->buf[i].end); coarse_unit_reset_free(GLOBAL_DCONTEXT, info, false/*no locks*/, true/*unlink*/, true/*give up primary*/); /* We only want one non-frozen unit per region; we keep the 1st one */ if (info != info_start) { coarse_unit_free(GLOBAL_DCONTEXT, info); info = NULL; } else coarse_unit_mark_in_use(info); /* still in-use if re-used */ /* The start info itself is freed in remove_vm_area, if exiting */ /* XXX i#1051: should re-load persisted caches after reset */ info = next_info; ASSERT(info == NULL || !info->frozen); } } } } /* Returns true if info && info->non_frozen meet the size requirements * for persisting. */ static bool coarse_region_should_persist(dcontext_t *dcontext, coarse_info_t *info) { bool cache_large_enough = false; size_t cache_size = 0; /* Must hold lock to get size but ok for size to change afterward; * normal usage has all threads synched */ if (!info->persisted) { mutex_lock(&info->lock); cache_size += coarse_frozen_cache_size(dcontext, info); mutex_unlock(&info->lock); } if (info->non_frozen != NULL) { mutex_lock(&info->non_frozen->lock); cache_size += coarse_frozen_cache_size(dcontext, info->non_frozen); mutex_unlock(&info->non_frozen->lock); } LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tconsidering persisting coarse unit %s with cache size %d\n", info->module, cache_size); /* case 10107: check for disk space before freezing, if persisting. * A crude estimate is all we need up front (we'll do a precise check at file * write time): estimate that hashtables, stubs, etc. double cache size. */ if (!coarse_unit_check_persist_space(INVALID_FILE, cache_size * 2)) { LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tnot enough disk space for %s\n", info->module); STATS_INC(coarse_units_persist_nospace); return false; } cache_large_enough = (cache_size > DYNAMO_OPTION(coarse_freeze_min_size) || (info->persisted && /* FIXME: should use append size if merging only w/ disk as well */ cache_size > DYNAMO_OPTION(coarse_freeze_append_size))); #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) /* Real cost is in pages touched while walking reloc, which is * typically 80% of module. */ if (rct_module_live_entries(dcontext, info->base_pc, RCT_RCT) > DYNAMO_OPTION(coarse_freeze_rct_min)) { DOSTATS({ if (!cache_large_enough) STATS_INC(persist_code_small); }); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tRCT entries are over threshold so persisting %s\n", info->module); return true; } #endif /* defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) */ DOSTATS({ if (!cache_large_enough) { LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tnot persisting %s since too small\n", info->module); STATS_INC(persist_too_small); } }); return cache_large_enough; } /* FIXME case 9975: we should provide separate control over persistence * (today we assume !in_place==persist) so we can persist and use in_place * rather than having to wait until next run to get the benefit. */ /* FIXME: if we map in a newly persisted unit we need to set * VM_PERSISTED_CACHE, but we only care about it in executable_areas. */ /* Caller must hold change_linking_lock, read lock hotp_get_lock(), and * either executable_areas lock or dynamo_all_threads_synched. */ static void vm_area_coarse_region_freeze(dcontext_t *dcontext, coarse_info_t *info, vm_area_t *area, bool in_place) { coarse_info_t *frozen_info = NULL; /* the already-frozen info */ coarse_info_t *unfrozen_info = NULL; /* the un-frozen info */ if (!DYNAMO_OPTION(coarse_enable_freeze) || RUNNING_WITHOUT_CODE_CACHE()) return; ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); ASSERT(info != NULL); ASSERT_OWN_RECURSIVE_LOCK(true, &change_linking_lock); #ifdef HOT_PATCHING_INTERFACE ASSERT_OWN_READWRITE_LOCK(DYNAMO_OPTION(hot_patching), hotp_get_lock()); #endif ASSERT(READ_LOCK_HELD(&executable_areas->lock) || dynamo_all_threads_synched); /* Note that freezing in place will call mark_executable_area_coarse_frozen and * add a new unit, so next_info should not be traversed after freezing. */ if (info->frozen) { frozen_info = info; unfrozen_info = info->non_frozen; } else { unfrozen_info = info; ASSERT(info->non_frozen == NULL); } if (unfrozen_info != NULL && unfrozen_info->cache != NULL /*skip empty units*/ && !TEST(PERSCACHE_CODE_INVALID, unfrozen_info->flags) && /* we only freeze a unit in presence of a frozen unit if we're merging * (we don't support side-by-side frozen units) */ (DYNAMO_OPTION(coarse_freeze_merge) || frozen_info == NULL)) { if (in_place || coarse_region_should_persist(dcontext, info)) { coarse_info_t *frozen; coarse_info_t *premerge; LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "\tfreezing coarse unit for region "PFX".."PFX" %s\n", info->base_pc, info->end_pc, info->module); if (frozen_info != NULL && in_place) { /* We're freezing unfrozen_info, merging frozen_info into it, and * then deleting frozen_info, so we need to replace it with just * unfrozen_info (soon to be frozen); we do it this way since * mark_executable_area_coarse_frozen assumes being-frozen info is * the 1st info. */ area->custom.client = (void *) unfrozen_info; } frozen = coarse_unit_freeze(dcontext, unfrozen_info, in_place); ASSERT(frozen != NULL && frozen->frozen); /* mark_executable_area_coarse_frozen creates new non_frozen for in_place */ ASSERT(!in_place || frozen->non_frozen != NULL); premerge = frozen; if (frozen_info != NULL) { ASSERT(DYNAMO_OPTION(coarse_freeze_merge)); /* case 9701: more efficient to merge while freezing, but * this way we share code w/ offline merger */ /* I would put most-likely-larger unit as first source since more * efficient to merge into, but we need frozen first in case * we are in_place. */ frozen = coarse_unit_merge(dcontext, frozen, frozen_info, in_place); ASSERT(frozen != NULL); ASSERT(!in_place || frozen->non_frozen != NULL); if (frozen == NULL && in_place) { /* Shouldn't happen w/ online units; if it does we end up * tossing frozen_info w/o merging it */ frozen = premerge; } /* for !in_place we free premerge after persisting, so clients don't * get deletion events that remove data from hashtables too early * (xref https://github.com/DynamoRIO/drmemory/issues/869) */ if (in_place) { coarse_unit_reset_free(dcontext, frozen_info, false/*no locks*/, true/*need to unlink*/, false/*keep primary*/); coarse_unit_free(dcontext, frozen_info); frozen_info = NULL; } } if (!in_place && frozen != NULL) { coarse_unit_persist(dcontext, frozen); coarse_unit_reset_free(dcontext, frozen, false/*no locks*/, false/*already unlinked*/, false/*not in use anyway*/); coarse_unit_free(dcontext, frozen); frozen = NULL; } else ASSERT(frozen == unfrozen_info); if (frozen_info != NULL && !in_place && premerge != NULL) { /* see comment above: delayed until after persist */ coarse_unit_reset_free(dcontext, premerge, false/*no locks*/, false/*already unlinked*/, false/*not in use anyway*/); ASSERT(frozen != premerge); coarse_unit_free(dcontext, premerge); premerge = NULL; } } } else if (frozen_info != NULL && frozen_info->cache != NULL && !in_place && !frozen_info->persisted) { ASSERT(!TEST(PERSCACHE_CODE_INVALID, frozen_info->flags)); if (coarse_region_should_persist(dcontext, frozen_info)) coarse_unit_persist(dcontext, frozen_info); } } /* FIXME: could create iterator and move this and vm_area_coarse_units_reset_free() * into callers * If !in_place this routine freezes (if not already) and persists. */ void vm_area_coarse_units_freeze(bool in_place) { vm_area_vector_t *v = executable_areas; int i; dcontext_t *dcontext = get_thread_private_dcontext(); if (!DYNAMO_OPTION(coarse_units) || !DYNAMO_OPTION(coarse_enable_freeze) || RUNNING_WITHOUT_CODE_CACHE()) return; ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); ASSERT(dcontext != NULL); LOG(THREAD, LOG_FRAGMENT|LOG_VMAREAS, 2, "vm_area_coarse_units_freeze\n"); ASSERT(dynamo_all_threads_synched); acquire_recursive_lock(&change_linking_lock); #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) read_lock(hotp_get_lock()); #endif /* We would grab executable_areas_lock but coarse_unit_freeze() grabs * change_linking_lock and coarse_info_lock, both of higher rank. We could * grab change_linking_lock first here and raise executable_areas_lock above * coarse_info_lock's rank, but executable_areas_lock can be acquired during * coarse_unit_unlink after special_heap_lock -- so the best solution is to * not grab executable_areas_lock here and rely on all_threads_synched. * Could make executable_areas_lock recursive and grab all locks here? */ for (i = 0; i < v->length; i++) { if (TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags)) { coarse_info_t *info = (coarse_info_t *) v->buf[i].custom.client; ASSERT(info != NULL); if (info != NULL) vm_area_coarse_region_freeze(dcontext, info, &v->buf[i], in_place); } } #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) read_unlock(hotp_get_lock()); #endif release_recursive_lock(&change_linking_lock); } #if 0 /* not used */ /* remove a thread's vm area */ static bool remove_thread_vm_area(dcontext_t *dcontext, app_pc start, app_pc end) { thread_data_t *data = GET_DATA(dcontext, 0); bool ok; LOG(THREAD, LOG_VMAREAS, 2, "removing thread "TIDFMT" vm area: "PFX"-"PFX"\n", dcontext->owning_thread, start, end); /* no lock needed, this is thread-private */ ok = remove_vm_area(&data->areas, start, end, false); /* due to re-sorting, areas move around...not worth trying to shift, * just clear the cache area */ data->last_area = NULL; return ok; } #endif /* returns true if the passed in area overlaps any thread executable areas */ bool thread_vm_area_overlap(dcontext_t *dcontext, app_pc start, app_pc end) { thread_data_t *data = GET_DATA(dcontext, 0); bool res; if (data == shared_data) { ASSERT(!self_owns_write_lock(&shared_data->areas.lock)); SHARED_VECTOR_RWLOCK(&data->areas, write, lock); } res = vm_area_overlap(&data->areas, start, end); if (data == shared_data) { SHARED_VECTOR_RWLOCK(&data->areas, write, unlock); } return res; } /* Returns NULL if should re-execute the faulting write * Else returns the target pc for a new basic block -- caller should * return to dispatch rather than the code cache * If instr_cache_pc==NULL, assumes the cache is unavailable (due to reset). */ app_pc handle_modified_code(dcontext_t *dcontext, cache_pc instr_cache_pc, app_pc instr_app_pc, app_pc target, fragment_t *f) { /* FIXME: for Linux, this is all happening inside signal handler... * flushing could take a while, and signals are blocked the entire time! */ app_pc base_pc, flush_start = NULL, next_pc; app_pc instr_size_pc; size_t size, flush_size = 0, instr_size; uint opnd_size = 0; uint prot; overlap_info_t info = {0,/* init to 0 so info.overlap is false */}; app_pc bb_start = NULL; app_pc bb_end = NULL; app_pc bb_pstart = NULL, bb_pend = NULL; /* pages occupied by instr's bb */ vm_area_t *a = NULL; fragment_t wrapper; /* get the "region" size (don't use exec list, it merges regions), * the os merges regions too, and we might have changed the protections * on the region and caused it do so, so below we take the intersection * with the enclosing executable_areas region if it exists */ bool ok = get_memory_info(target, &base_pc, &size, &prot); if (f == NULL && instr_cache_pc != NULL) f = fragment_pclookup(dcontext, instr_cache_pc, &wrapper); /* FIXME: what if seg fault is b/c target is unreadable? then should have * app die, not us trigger assertion! */ /* In the absence of reset, f MUST still be in the cache since we're still * nolinking, and pclookup will find it even if it's no longer in htables. * But, a reset can result in not having the fragment available at all. In * that case we just flush the whole region and hope that in the future * we'll eventually identify the writer, but there's a possibility of no * forward progress if another thread keeps flushing writing fragment * (ro2sandbox_threshold would alleviate that). */ DOLOG(1, LOG_VMAREAS, { if (instr_cache_pc == NULL) { LOG(THREAD, LOG_VMAREAS, 1, "WARNING: cache unavailable for processing code mod @ app pc "PFX"\n", instr_app_pc); } else if (f == NULL) { LOG(THREAD, LOG_VMAREAS, 1, "WARNING: cannot find fragment @ writer pc "PFX" -- was deleted, " "or native\n", instr_cache_pc); } }); ASSERT(ok); SYSLOG_INTERNAL_WARNING_ONCE("writing to executable region."); STATS_INC(num_write_faults); read_lock(&executable_areas->lock); lookup_addr(executable_areas, (app_pc)target, &a); if (a == NULL) { LOG(THREAD, LOG_VMAREAS, 1, "\tRegion for "PFX" not exec, probably data on same page\n", target); DOLOG(2, LOG_VMAREAS, { print_vm_areas(executable_areas, THREAD); }); } else { /* The os may have merged regions because we made a region read * only! (ref case 2803), thus we should take the intersection of * the region on our list and the os region */ /* make sure to handle sub-page regions, pad to page boundary */ app_pc a_pstart = (app_pc)ALIGN_BACKWARD(a->start, PAGE_SIZE); app_pc a_pend = (app_pc)ALIGN_FORWARD(a->end, PAGE_SIZE); if (a_pstart > base_pc) { size -= a_pstart - base_pc; base_pc = a_pstart; } if (a_pend < base_pc + size) { size = a_pend - base_pc; } LOG(THREAD, LOG_VMAREAS, 1, "WARNING: Exec "PFX"-"PFX" %s%s written @"PFX" by "PFX" == app "PFX"\n", base_pc, base_pc+size, ((a->vm_flags & VM_WRITABLE) != 0) ? "W" : "", ((prot & MEMPROT_EXEC) != 0) ? "E" : "", target, instr_cache_pc, instr_app_pc); } read_unlock(&executable_areas->lock); #ifdef DGC_DIAGNOSTICS DOLOG(1, LOG_VMAREAS, { /* it's hard to locate frag owning an app pc in the cache, so we wait until * we flush and only check the flushed frags */ char buf[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(instr_app_pc, buf, sizeof(buf), false); LOG(THREAD, LOG_VMAREAS, 1, "code written by app pc "PFX" from bb %s:\n", instr_app_pc, buf); disassemble_app_bb(dcontext, instr_app_pc, THREAD); }); #endif if (TEST(MEMPROT_WRITE, prot)) { LOG(THREAD, LOG_VMAREAS, 1, "\tWARNING: region now writable: assuming another thread already flushed it\n" "\tgoing to flush again just to make sure\n"); /* we could just bail here, but could have no forward progress if repeated * races between selfmod writer and out-of-region writer */ STATS_INC(num_write_fault_races); } /* see if writer is inside our region * need instr size and opnd size to check for page boundary overlaps! * For reset when the cache is not present, we decode from the app code, * though that's racy! solution is to have reset store a copy of the app instr * (FIXME case 7393). */ instr_size_pc = (instr_cache_pc == NULL) ? instr_app_pc : instr_cache_pc; next_pc = decode_memory_reference_size(dcontext, instr_size_pc, &opnd_size); ASSERT(next_pc != NULL); ASSERT(opnd_size != 0); instr_size = next_pc - instr_size_pc; /* FIXME case 7492: if write crosses page boundary, the reported faulting * target for win32 will be in the middle of the instr's target (win32 * reports the first unwritable byte). (On Linux we're fine as we calculate * the target ourselves.) */ if (target + opnd_size > base_pc + size) { /* must expand to cover entire target, even if crosses OS regions */ app_pc t_pend = (app_pc)ALIGN_FORWARD(target + opnd_size, PAGE_SIZE); size = t_pend - base_pc; } /* see if instr's bb is in region * not good enough to only check instr! * will end up in infinite loop if any part of bb overlaps the executable * region removed! * if f was deleted, we threw away its also info, so we have to do a full * overlaps lookup. f cannot have been removed completely since we * count as being in the shared cache and could be inside f. */ if (f != NULL && /* faster check up front if frag not deleted -- BUT, we are in * a race w/ any flusher marking as deleted! * so, we make vm_list_overlaps not assert on a not-there fragment, * and only if it finds it and it's STILL not marked do we trust the * return value. */ (vm_list_overlaps(dcontext, (void *)f, base_pc, base_pc+size) || TEST(FRAG_WAS_DELETED, f->flags))) { fragment_overlaps(dcontext, f, instr_app_pc, instr_app_pc+1, false /* fine-grain! */, &info, &bb_start); /* if did fast check and it said overlap, slow check should too */ ASSERT(TEST(FRAG_WAS_DELETED, f->flags) || info.overlap); } if (info.overlap) { /* instr_t may be in region, but could also be from a different region * included in a trace. Determine if instr bb overlaps with target * region. * Move to page boundaries, with inclusive end pages. * We must look at entire bb containing instr, not just instr * itself (can't isolate write from its bb -- will always * enter from top of bb, even across direct cti) */ ASSERT(info.overlap && bb_start != NULL); if (info.contiguous) bb_end = info.bb_end; else { /* FIXME: could be smart and have info include list of all pages, * handle situations like start outside of region and jmp/call in, * but this is going to be rare -- let's just take min and max of * entire bb, even if that includes huge area (in which case we'll * consider it self-modifying code, even if jumped over middle) */ bb_start = info.min_pc; bb_end = info.max_pc; ASSERT(bb_start != NULL && bb_end != NULL); } bb_pstart = (app_pc) PAGE_START(bb_start); bb_pend = (app_pc) PAGE_START(bb_end); ASSERT(instr_app_pc >= bb_pstart && instr_app_pc+instr_size <= bb_pend+PAGE_SIZE); ASSERT(f != NULL); /* else info.overlap should not be set */ } /* Now we can check if source bb overlaps target region. */ if (info.overlap && base_pc < (bb_pend + PAGE_SIZE) && (base_pc + size) > bb_pstart) { /* bb pages overlap target region - * We want to split up region to keep instr exec but target writable. * All pages touched by target will become writable. * All pages in instr's bb must remain executable (can't isolate * write from its bb -- will always enter from top of bb) */ /* pages occupied by target */ app_pc tgt_pstart = (app_pc) PAGE_START(target); app_pc tgt_pend = (app_pc) PAGE_START(target+opnd_size); DOSTATS({ /* race condition case of another thread flushing 1st */ if (TEST(MEMPROT_WRITE, prot)) STATS_INC(num_write_fault_races_selfmod); }); LOG(THREAD, LOG_VMAREAS, 2, "Write instr is inside F%d "PFX"\n", f->id, f->tag); LOG(THREAD, LOG_VMAREAS, 1, "\tinstr's bb src "PFX"-"PFX" overlaps target "PFX"-"PFX"\n", bb_start, bb_end, target, target+opnd_size); /* look for selfmod overlap */ if (bb_pstart <= tgt_pend && bb_pend >= tgt_pstart) { vm_area_t *execarea; app_pc nxt_on_page; LOG(THREAD, LOG_VMAREAS, 1, "WARNING: self-modifying code: instr @"PFX" (in bb "PFX"-"PFX")\n" "\twrote to "PFX"-"PFX"\n", instr_app_pc, bb_start, bb_end, target, target+opnd_size); SYSLOG_INTERNAL_WARNING_ONCE("self-modifying code."); /* can leave non-intersection part of instr pages as executable, * no need to flush them */ /* DGC_DIAGNOSTICS: have flusher pass target to * vm_area_unlink_fragments to check if code was actually overwritten */ flush_fragments_in_region_start(dcontext, (app_pc)tgt_pstart, (tgt_pend+PAGE_SIZE-tgt_pstart), false /* don't own initexit_lock */, false /* keep futures */, true /* exec invalid */, false /* don't force synchall */ _IF_DGCDIAG(target)); /* flush_* grabbed exec areas lock for us, to make following sequence atomic */ /* need to change all exec areas on these pages to be selfmod */ for (ok = true, nxt_on_page = (app_pc) tgt_pstart; ok && nxt_on_page < (app_pc)tgt_pend + PAGE_SIZE; ) { ok = binary_search(executable_areas, nxt_on_page, (app_pc)tgt_pend+PAGE_SIZE, &execarea, NULL, true /* want 1st match! */); if (ok) { nxt_on_page = execarea->end; if (TESTANY(FRAG_SELFMOD_SANDBOXED, execarea->frag_flags)) { /* not calling remove_vm_area so we have to vm_make_writable * FIXME: why do we have to do anything if already selfmod? */ if (DR_MADE_READONLY(execarea->vm_flags)) vm_make_writable(execarea->start, execarea->end - execarea->start); continue; } if (execarea->start < (app_pc)tgt_pstart || execarea->end > (app_pc)tgt_pend + PAGE_SIZE) { /* this area sticks out from our target area, so we split it * by removing and then re-adding (as selfmod) the overlap portion */ uint old_vmf = execarea->vm_flags; uint old_ff = execarea->frag_flags; app_pc old_start = (execarea->start < tgt_pstart) ? tgt_pstart : execarea->start; app_pc old_end = (execarea->end > tgt_pend + PAGE_SIZE) ? tgt_pend + PAGE_SIZE : execarea->end; LOG(GLOBAL, LOG_VMAREAS, 2, "removing executable vm area to mark selfmod: "PFX"-"PFX"\n", old_start, old_end); remove_vm_area(executable_areas, old_start, old_end, true); /* now re-add */ add_executable_vm_area(old_start, old_end, old_vmf, old_ff | FRAG_SELFMOD_SANDBOXED, true /*own lock */ _IF_DEBUG("selfmod replacement")); STATS_INC(num_selfmod_vm_areas); /* this won't hurt our iteration since it's stateless except for * nxt_on_page */ } else { LOG(THREAD, LOG_VMAREAS, 2, "\tmarking "PFX"-"PFX" as selfmod\n", execarea->start, execarea->end); execarea->frag_flags |= SANDBOX_FLAG(); STATS_INC(num_selfmod_vm_areas); /* not calling remove_vm_area so we have to vm_make_writable */ if (DR_MADE_READONLY(execarea->vm_flags)) vm_make_writable(execarea->start, execarea->end - execarea->start); } } } LOG(GLOBAL, LOG_VMAREAS, 3, "After marking all areas in "PFX"-"PFX" as selfmod:\n", tgt_pstart, tgt_pend+PAGE_SIZE); DOLOG(3, LOG_VMAREAS, { print_vm_areas(executable_areas, GLOBAL); }); flush_fragments_in_region_finish(dcontext, false /*don't keep initexit_lock*/); if (DYNAMO_OPTION(opt_jit) && !TEST(MEMPROT_WRITE, prot) && is_jit_managed_area((app_pc)tgt_pstart)) { jitopt_clear_span((app_pc) tgt_pstart, (app_pc) (tgt_pend+PAGE_SIZE-tgt_pstart)); } /* must execute instr_app_pc next, even though that new bb will be * useless afterward (will most likely re-enter from bb_start) */ return instr_app_pc; } else { /* Not selfmod, but target and bb region may still overlap - * heuristic: split the region up -- assume will keep writing * to higher addresses and keep executing at higher addresses. */ if (tgt_pend < bb_pstart) { /* make all pages from tgt_pstart up to bb_pstart or * region end (which ever is first) non-exec */ /* FIXME - CHECK - should we really be starting at * base_pc instead? Not clear why we shouldn't start at * region start (like we would if we didn't have an * overlap). */ flush_start = tgt_pstart; ASSERT(bb_pstart < (base_pc + size) && bb_pstart > tgt_pstart); flush_size = bb_pstart - tgt_pstart; } else if (tgt_pstart > bb_pend) { /* make all pages from tgt_pstart to end of region non-exec */ flush_start = tgt_pstart; flush_size = (base_pc + size) - tgt_pstart; } else { /* should never get here -- all cases covered above */ ASSERT_NOT_REACHED(); } LOG(THREAD, LOG_VMAREAS, 2, "splitting region up, flushing just "PFX"-"PFX"\n", flush_start, flush_start+flush_size); } } else { ASSERT(!info.overlap || (f != NULL && TEST(FRAG_IS_TRACE, f->flags))); /* instr not in region, so move entire region off the executable list */ flush_start = base_pc; flush_size = size; LOG(THREAD, LOG_VMAREAS, 2, "instr not in region, flushing entire "PFX"-"PFX"\n", flush_start, flush_start+flush_size); } /* DGC_DIAGNOSTICS: have flusher pass target to * vm_area_unlink_fragments to check if code was actually overwritten */ flush_fragments_in_region_start(dcontext, flush_start, flush_size, false /* don't own initexit_lock */, false /* keep futures */, true /* exec invalid */, false /* don't force synchall */ _IF_DGCDIAG(target)); f = NULL; /* after the flush we don't know if it's safe to deref f */ if (DYNAMO_OPTION(ro2sandbox_threshold) > 0) { /* add removed region to written list to track # of times this has happened * actually, we only track by the written-to page * FIXME case 8161: should we add more than just the page? * we'll keep adding the whole region until it hits the ro2sandbox threshold, * at which point we'll just add the page */ ro_vs_sandbox_data_t *ro2s; write_lock(&written_areas->lock); /* use the add routine to lookup if present, add if not */ add_written_area(written_areas, target, (app_pc) PAGE_START(target), (app_pc) PAGE_START(target+opnd_size) + PAGE_SIZE, &a); ASSERT(a != NULL); ro2s = (ro_vs_sandbox_data_t *) a->custom.client; ro2s->written_count++; LOG(GLOBAL, LOG_VMAREAS, 2, "written area "PFX"-"PFX" now written %d X\n", a->start, a->end, ro2s->written_count); DOLOG(3, LOG_VMAREAS, { LOG(GLOBAL, LOG_VMAREAS, 2, "\nwritten areas:\n"); print_vm_areas(written_areas, GLOBAL); }); write_unlock(&written_areas->lock); } if ( #ifdef PROGRAM_SHEPHERDING !DYNAMO_OPTION(selfmod_futureexec) && #endif is_executable_area_on_all_selfmod_pages(target, target+opnd_size)) { /* We can be in various races with another thread in handling write * faults to this same region. We check at the start of this routine, * but in practice (case 7911) I've seen the race more often show up * here, after the flush synch. If another thread has already switched * the target region to selfmod, then we shouldn't remove it from * executable_areas here. In fact if we were to remove it we would foil * the selfmod->remove future optimizations (case 280) (once-only at * NtFlush, selfmod when used to validate exec area, and remove * overlapping futures w/ new selfmod exec area). */ /* FIXME: is it worth checking this selfmod overlap in earlier places, * like the start of this routine, or at the start of the flush synch, * which could save some synch work and perhaps avoid the flush * altogether? */ STATS_INC(flush_selfmod_race_no_remove); LOG(THREAD, LOG_VMAREAS, 2, "Target "PFX" is already selfmod, race, no reason to remove\n", target); } else { /* flush_* grabbed exec areas lock for us, to make vm_make_writable, * remove global vm area, and lookup an atomic sequence */ LOG(GLOBAL, LOG_VMAREAS, 2, "removing executable vm area since written: "PFX"-"PFX"\n", flush_start, flush_start+flush_size); /* FIXME : are we removing regions that might not get re-added here? * what about things that came from once only future or mem prot changes, * the region removed here can be much larger then just the page written */ /* FIXME (part of case 3744): should remove only non-selfmod regions here! * Then can eliminate the if above. Could pass filter flag to remove_vm_area, * but better to just split code origins from consistency and not have * sub-page regions on the consistency list (case 3744). */ remove_vm_area(executable_areas, flush_start, flush_start+flush_size, true/*restore writability!*/); LOG(THREAD, LOG_VMAREAS, 2, "Removed "PFX"-"PFX" from exec list, continuing @ write\n", flush_start, flush_start+flush_size); } DOLOG(3, LOG_VMAREAS, { thread_data_t *data = GET_DATA(dcontext, 0); LOG(THREAD, LOG_VMAREAS, 2, "\nexecutable areas:\n"); print_vm_areas(executable_areas, THREAD); LOG(THREAD, LOG_VMAREAS, 2, "\nthread areas:\n"); print_vm_areas(&data->areas, THREAD); }); /* There is no good way to tell if we flushed f or not, so need to start * interpreting at instr_app_pc. If f was a trace could overlap flushed * region even if the src bb didn't and anyways flushing can end up * flushing outside the requested region (entire vm_area_t). If we could tell * we could return NULL instead (which is a special flag that says redo the * write instead of going to dispatch) if f wasn't flushed. * FIXME - Redoing the write would be more efficient then going back to * dispatch and should be the common case. */ flush_fragments_in_region_finish(dcontext, false /*don't keep initexit_lock*/); if (DYNAMO_OPTION(opt_jit) && !TEST(MEMPROT_WRITE, prot) && is_jit_managed_area(flush_start)) jitopt_clear_span(flush_start, flush_start+flush_size); return instr_app_pc; } /* Returns the counter a selfmod fragment should execute for -sandbox2ro_threshold */ uint * get_selfmod_exec_counter(app_pc tag) { vm_area_t *area = NULL; ro_vs_sandbox_data_t *ro2s; uint *counter; bool ok; read_lock(&written_areas->lock); ok = lookup_addr(written_areas, tag, &area); if (!ok) { read_unlock(&written_areas->lock); read_lock(&executable_areas->lock); write_lock(&written_areas->lock); ok = lookup_addr(executable_areas, tag, &area); ASSERT(ok && area != NULL); /* FIXME: do this addition whenever add new exec area marked as * selfmod? * FIXME case 8161: add only add one page? since never split written_areas? * For now we add the whole region, reasoning that as a selfmod * region it's probably not very big anyway. * In Sun's JVM 1.4.2 we actually never get here b/c we always * have an executable region already present before we make it selfmod, * so we're only adding to written_areas when we get a write fault, * at which point we only use the surrounding page. */ STATS_INC(num_sandbox_before_ro); add_written_area(written_areas, tag, area->start, area->end, &area); ASSERT(area != NULL); ro2s = (ro_vs_sandbox_data_t *) area->custom.client; counter = &ro2s->selfmod_execs; /* Inc of selfmod_execs from cache can have problems if it crosses a * cache line, so we assert on the 32-bit alignment we should get from * the heap. add_written_area already asserts but we double-check here. */ ASSERT(ALIGNED(counter, sizeof(uint))); write_unlock(&written_areas->lock); read_unlock(&executable_areas->lock); } else { ASSERT(ok && area != NULL); ro2s = (ro_vs_sandbox_data_t *) area->custom.client; counter = &ro2s->selfmod_execs; read_unlock(&written_areas->lock); } /* ref to counter will be accessed in-cache w/o read lock but * written_areas is never merged and counter won't be freed until * exit time. */ return counter; } /* Returns true if f has been flushed */ bool vm_area_selfmod_check_clear_exec_count(dcontext_t *dcontext, fragment_t *f) { ro_vs_sandbox_data_t *ro2s = NULL; vm_area_t *exec_area = NULL, *written_area; app_pc start, end; bool ok; bool convert_s2ro = true; if (DYNAMO_OPTION(sandbox2ro_threshold) == 0) return false; /* NOTE - we could only grab the readlock here. Even though we're going to * write to selfmod_execs count, it's not really protected by the written_areas * lock since we read and write to it from the cache. Should change to read lock * if contention ever becomes an issue. Note that we would then have to later * grab the write lock if we need to write to ro2s->written_count below. */ write_lock(&written_areas->lock); ok = lookup_addr(written_areas, f->tag, &written_area); if (ok) { ro2s = (ro_vs_sandbox_data_t *) written_area->custom.client; } else { /* never had instrumentation */ write_unlock(&written_areas->lock); return false; } if (ro2s->selfmod_execs < DYNAMO_OPTION(sandbox2ro_threshold)) { /* must be a real fragment modification, reset the selfmod_execs count * xref case 9908 */ LOG(THREAD, LOG_VMAREAS, 3, "Fragment "PFX" self-write -> "PFX"-"PFX" selfmod exec counter reset, old" " count=%d\n", f->tag, written_area->start, written_area->end, ro2s->selfmod_execs); /* Write must be atomic since we access this field from the cache, an aligned * 4 byte write is atomic on the architectures we support. */ ASSERT(sizeof(ro2s->selfmod_execs) == 4 && ALIGNED(&(ro2s->selfmod_execs), 4)); ro2s->selfmod_execs = 0; write_unlock(&written_areas->lock); return false; } LOG(THREAD, LOG_VMAREAS, 1, "Fragment "PFX" caused "PFX"-"PFX" to cross sandbox2ro threshold %d vs %d\n", f->tag, written_area->start, written_area->end, ro2s->selfmod_execs, DYNAMO_OPTION(sandbox2ro_threshold)); start = written_area->start; end = written_area->end; /* reset to avoid immediate re-trigger */ ro2s->selfmod_execs = 0; if (is_on_stack(dcontext, f->tag, NULL)) { /* Naturally we cannot make the stack ro. We checked when we built f, * but esp must now point elsewhere. We go ahead and flush and assume * that when we rebuild f we won't put the instrumentation in. */ convert_s2ro = false; STATS_INC(num_sandbox2ro_onstack); LOG(THREAD, LOG_VMAREAS, 1, "Fragment "PFX" is on stack now!\n", f->tag); ASSERT_CURIOSITY(false && "on-stack selfmod bb w/ counter inc"); } if (convert_s2ro && DYNAMO_OPTION(ro2sandbox_threshold) > 0) { /* We'll listen to -sandbox2ro_threshold even if a selfmod region * didn't become that way via -ro2sandbox_threshold, to avoid perf * problems w/ other code in the same region, and to take advantage of * patterns of write at init time and then never selfmod again. * FIXME: have a different threshold for regions made selfmod for actual * self-writes versus -ro2sandbox_threshold regions? * If there is a written_count, we reset it so it can trigger again. * We reset here rather than when ro2sandbox_threshold is triggered as * ro2sandbox only does a page at a time and if keeping a count for * multiple pages doesn't want to clear that count too early. */ LOG(THREAD, LOG_VMAREAS, 2, "re-setting written executable vm area: "PFX"-"PFX" written %d X\n", written_area->start, written_area->end, ro2s->written_count); ro2s->written_count = 0; } DOLOG(3, LOG_VMAREAS, { LOG(THREAD, LOG_VMAREAS, 2, "\nwritten areas:\n"); print_vm_areas(written_areas, THREAD); }); write_unlock(&written_areas->lock); /* Convert the selfmod region to a ro region. * FIXME case 8161: should we flush and make ro the executable area, * or the written area? Written area may only be a page if made * selfmod due to a code write, but then it should match the executable area * in the common case, though written area may be larger if executable area * is from a tiny NtFlush. If we make a sub-piece of the executable area ro, * the rest will remain selfmod and will eventually come here anyway. */ flush_fragments_in_region_start(dcontext, start, end - start, false /* don't own initexit_lock */, false /* keep futures */, true /* exec invalid */, false /* don't force synchall */ _IF_DGCDIAG(NULL)); if (convert_s2ro) { DODEBUG(ro2s->s2ro_xfers++;); /* flush_* grabbed executable_areas lock for us */ ok = lookup_addr(executable_areas, f->tag, &exec_area); if (ok) { if (TEST(FRAG_SELFMOD_SANDBOXED, exec_area->frag_flags)) { /* FIXME: if exec area is larger than flush area, it's * ok since marking fragments in a ro region as selfmod * is not a correctness problem. Current flush impl, though, * will flush whole region. */ vm_area_t area_copy = *exec_area; /* copy since we remove it */ exec_area = &area_copy; LOG(THREAD, LOG_VMAREAS, 1, "\tconverting "PFX"-"PFX" from sandbox to ro\n", exec_area->start, exec_area->end); exec_area->frag_flags &= ~FRAG_SELFMOD_SANDBOXED; /* can't ASSERT(!TEST(VM_MADE_READONLY, area->vm_flags)) (case 7877) */ vm_make_unwritable(exec_area->start, exec_area->end - exec_area->start); exec_area->vm_flags |= VM_MADE_READONLY; /* i#942: Remove the sandboxed area and re-add it to merge it * back with any areas it used to be a part of. */ remove_vm_area(executable_areas, exec_area->start, exec_area->end, false /* !restore_prot */); ok = add_executable_vm_area(exec_area->start, exec_area->end, exec_area->vm_flags, exec_area->frag_flags, true /*own lock */ _IF_DEBUG("selfmod replacement")); ASSERT(ok); /* Re-do the lookup in case of merger. */ ok = lookup_addr(executable_areas, f->tag, &exec_area); ASSERT(ok); LOG(THREAD, LOG_VMAREAS, 3, "After marking "PFX"-"PFX" as NOT selfmod:\n", exec_area->start, exec_area->end); DOLOG(3, LOG_VMAREAS, { print_vm_areas(executable_areas, THREAD); }); STATS_INC(num_sandbox2ro); } else { /* must be a race! */ LOG(THREAD, LOG_VMAREAS, 3, "Area "PFX"-"PFX" is ALREADY not selfmod!\n", exec_area->start, exec_area->end); STATS_INC(num_sandbox2ro_race); } } else { /* must be a flushing race */ LOG(THREAD, LOG_VMAREAS, 3, "Area "PFX"-"PFX" is no longer there!\n", start, end); STATS_INC(num_sandbox2ro_flush_race); } } ASSERT(exec_area == NULL || /* never looked up */ (start < exec_area->end && end > exec_area->start)); flush_fragments_in_region_finish(dcontext, false /*don't keep initexit_lock*/); if (DYNAMO_OPTION(opt_jit) && is_jit_managed_area(start)) jitopt_clear_span(start, end); return true; } void mark_unload_start(app_pc module_base, size_t module_size) { /* in thin client mode we don't allocate this, * but we do track unloads in -client mode */ if (last_deallocated == NULL) return; ASSERT(DYNAMO_OPTION(unloaded_target_exception)); ASSERT_CURIOSITY(!last_deallocated->unload_in_progress); /* we may have a race, or a thread killed during unload syscall, * either way we just mark our last region on top of the old one */ mutex_lock(&last_deallocated_lock); last_deallocated->last_unload_base = module_base; last_deallocated->last_unload_size = module_size; last_deallocated->unload_in_progress = true; mutex_unlock(&last_deallocated_lock); } void mark_unload_future_added(app_pc module_base, size_t size) { /* case 9371: if a thread gets preempted before returning from * unmapviewofsection and in the mean time another has a _future_ exec * area allocated at the same place and executes from it, we should not * throw exception mistakenly if the area would have been allowed */ if (last_deallocated == NULL) return; ASSERT(DYNAMO_OPTION(unloaded_target_exception)); ASSERT_CURIOSITY(!last_deallocated->unload_in_progress && "future while unload"); /* FIXME: more preciselly we should only remove our intersection * with the last module, otherwise don't need to, but it is never * expected to happen, so not optimizing at all */ last_deallocated->unload_in_progress = false; } void mark_unload_end(app_pc module_base) { if (last_deallocated == NULL) return; ASSERT(DYNAMO_OPTION(unloaded_target_exception)); /* We're trying to avoid a spurious security violation while we * are flushing our security policies, but before the address is * actually fully unloaded. So if we don't have an entry in our * executable_areas or RAC or RCT policies then we should either * find the address unreadable with query_virtual_memory(), or we * should make sure that we find it as is_currently_unloaded_region(). */ /* The fact that we have reached this routine already guarantees * that the memory was made unreadable (whether the memory is * still unreadable is not guaranteed, see below). Yet if we do * checks in proper order is_currently_unloaded_region() _before_ * is_readable_without_exception(), as we do in the convenience * routine is_unreadable_or_currently_unloaded_region(), we can * get away without a barrier here. */ /* FIXME: Otherwise we'd need a barrier, such that until a security * policy reader is done, we cannot mark the module as unloaded, * and if they start doing their check after this - then they * should get a policy consistent with the memory already being * unreadable. (For example, we can synchronize with * check_thread_vm_area() via * {executable_areas_lock();executable_areas_unlock()} but since * all other policies have sufficient information from unreadable * memory, we're OK with a DLL being completely unloaded. */ /* FIXME: note we may want to grab the appropriate policy locks so * that we can thus delay our declaring we're no longer unloading * a module until the policy processing is done, e.g. if one has * started querying a security policy while we are unloading, we * should preserve the marker until they are done. * for .B we hold a writable executable_areas_lock(), * watch out here if for case 9371 we want to also mark_unload_end() * on any new allocations * FIXME: the RCT policies however we don't hold a lock. */ /* FIXME: case 9372 Note that we may still have a problem primarily if a DLL * gets subsequently reloaded at the same location, (so we have * lost our flag) so after a time in which we make our checks * whether the target is unreadable, the new version will show up * and may not yet be fully processed in postsys_MapViewOfSection * (and even if it is, we may have already checked our * policies). I assume this should be less frequent than the * unload side (although it still shows up in our * win32/reload-race.c). At least not a problem if the DLL gets * reloaded at a different address, like case 9121 or with -aslr 1 */ /* note grabbing this lock is only useful for the ASSERTs, setting * the flag is atomic even without it. * is_unreadable_or_currently_unloaded_region() when used in * proper order doesn't need to synchronize with this lock either */ mutex_lock(&last_deallocated_lock); /* note, we mark_unload_start on MEM_IMAGE but mark_unload_end on * MEM_MAPPED as well. Note base doesn't have to match as long as * it is within the module */ ASSERT_CURIOSITY(!last_deallocated->unload_in_progress || ((last_deallocated->last_unload_base <= module_base && module_base < (last_deallocated->last_unload_base + last_deallocated->last_unload_size)) && "race - multiple unmaps")); DOLOG(1, LOG_VMAREAS, { /* there are a few cases where DLLs aren't unloaded by real * base uxtheme.dll, but I haven't seen them */ ASSERT_CURIOSITY(!last_deallocated->unload_in_progress || (last_deallocated->last_unload_base == module_base && "not base")); }); /* multiple racy unmaps can't be handled simultaneously anyways */ last_deallocated->unload_in_progress = false; mutex_unlock(&last_deallocated_lock); } bool is_in_last_unloaded_region(app_pc pc) { bool in_last = true; if (last_deallocated == NULL) return false; ASSERT(DYNAMO_OPTION(unloaded_target_exception)); mutex_lock(&last_deallocated_lock); /* if we are in such a tight race that we're no longer * last_deallocated->unload_in_progress we can still use the * already unloaded module */ if ((pc < last_deallocated->last_unload_base) || (pc >= (last_deallocated->last_unload_base + last_deallocated->last_unload_size))) in_last = false; mutex_unlock(&last_deallocated_lock); return in_last; } static bool is_currently_unloaded_region(app_pc pc) { if (last_deallocated == NULL) return false; ASSERT(DYNAMO_OPTION(unloaded_target_exception)); if (!last_deallocated->unload_in_progress) return false; return is_in_last_unloaded_region(pc); } bool is_unreadable_or_currently_unloaded_region(app_pc pc) { /* we want one atomic query - so if we are before the completion * of the UnMap system call we should be * is_currently_unloaded_region(), but afterwards the address * should be !is_readable_without_exception */ /* order of execution is important - so that we don't have to grab * a lock to synchronize with mark_unload_end(). */ if (is_currently_unloaded_region(pc)) { STATS_INC(num_unloaded_race); return true; } /* If we are not in a currently unloaded module then target is * either not being unloaded or we are beyond system call. */ if (!is_readable_without_exception(pc, 1)) { return true; } return false; } void print_last_deallocated(file_t outf) { if (last_deallocated == NULL) return; ASSERT(DYNAMO_OPTION(unloaded_target_exception)); if (last_deallocated->last_unload_base == NULL) { print_file(outf, "never unloaded\n"); return; } print_file(outf, "last unload: "PFX"-"PFX"%s\n", last_deallocated->last_unload_base, last_deallocated->last_unload_base + last_deallocated->last_unload_size, last_deallocated->unload_in_progress ? " being unloaded": ""); } #ifdef PROGRAM_SHEPHERDING /* Note that rerouting an APC to this target should safely popup the arguments * and continue. * * Since ThreadProc and APCProc have the same signature, we handle a * remote thread in a similar way, instead of letting attack handling * decide its fate - which may be an exception instead of killing the * thread. * * FIXME: we're interpreting dynamorio.dll code here */ /* FIXME clean up: safe_apc_or_thread_target, apc_thread_policy_helper and * aslr_report_violation should all be ifdef WINDOWS, and may be in a * different file */ /* could do naked to get a single RET 4 emitted with no prologue */ void APC_API safe_apc_or_thread_target(reg_t arg) { /* NOTHING */ } /* FIXME: case 9023: this is WRONG for NATIVE APCs! * kernel32!BaseDispatchAPC+0x33: * 7c82c13a c20c00 ret 0xc * FIXME: add safe_native_apc(PVOID context, PAPCFUNC func, reg_t arg) */ /* a helper procedure for DYNAMO_OPTION(apc_policy) or DYNAMO_OPTION(thread_policy) * * FIXME: currently relevant only on WINDOWS */ void apc_thread_policy_helper(app_pc *apc_target_location, /* IN/OUT */ security_option_t target_policy, apc_thread_type_t target_type) { bool is_apc = (target_type == APC_TARGET_NATIVE) || (target_type == APC_TARGET_WINDOWS); /* if is_win32api we're evaluating the Win32 API targets of * QueueUserAPC/CreateThreadEx, otherwise it is the native * NtQueueApcThread/NtCreateThreadEx targets */ bool is_win32api = (target_type == THREAD_TARGET_WINDOWS) || (target_type == APC_TARGET_WINDOWS); bool match = false; /* FIXME: note taking the risk here of reading from either the * word on the stack, or from a Cxt. While the app would fail in * either case this should be safer. I don't want the extra * is_readable_without_exception() here though. */ app_pc injected_target = *apc_target_location; uint injected_code = 0; /* first bytes of shellcode */ /* match PIC shellcode header, for example * 0013004c 53 push ebx * 0013004d e800000000 call 00130052 */ enum {PIC_SHELLCODE_MATCH = 0x0000e853}; /* Now we quickly check a stipped down code origins policy instead * of letting the bb builder do this. ALTERNATIVE design: We could save * the target and have this extra work done only after a code * origins violations. Then we would not modify application state * unnecessarily. The problem however is that we need to make * sure we do that only _immediately_ after an APC. */ /* using only executable area - assuming areas added by * -executable_if_x are only added to futureexec_areas, so that * this test can be done and acted upon independently of us * running in NX compatibility */ if (is_executable_address(injected_target)) { return; /* not a match */ } if (safe_read(injected_target, sizeof(injected_code), &injected_code)) { LOG(GLOBAL, LOG_ASYNCH, 2, "ASYNCH intercepted APC: APC pc="PFX", APC code="PFX" %s\n", injected_target, injected_code, injected_code == PIC_SHELLCODE_MATCH ? "MATCH" : ""); } else { ASSERT_NOT_TESTED(); } /* target is a non-executable area, but we may want to be more specific */ if (TEST(OPTION_CUSTOM, target_policy)) { match = true; /* no matter what is in the shellcode */ } else { if (injected_code == PIC_SHELLCODE_MATCH) match = true; } if (match) { bool squashed = false; char injected_threat_buf[MAXIMUM_VIOLATION_NAME_LENGTH] = "APCS.XXXX.B"; const char *name = injected_threat_buf; bool block = TEST(OPTION_BLOCK, target_policy); /* we need the constructed name before deciding to really * block, in case we exempt by ID */ if (TEST(OPTION_REPORT, target_policy)) { /* mangle injected_code into a name */ if (injected_code == PIC_SHELLCODE_MATCH) { /* keeping the well known hardcoded ones for VSE */ name = is_apc ? "VVPP.3200.B" : "YCRP.3200.B"; } else { /* FIXME: nativs vs non-native could get a different prefix as well */ if (!is_apc) { const char *INJT = "INJT"; /* (injected) shellcode thread */ ASSERT_NOT_TESTED(); /* gcc warns if we use the string "INJT" directly */ strncpy(injected_threat_buf, INJT, 4); } fill_security_violation_target(injected_threat_buf, (const byte*)&injected_code); } /* we allow -exempt_threat_list to override our action */ if (!IS_STRING_OPTION_EMPTY(exempt_threat_list)) { if (is_exempt_threat_name(name)) { /* we want to ALLOW unconditionally so we don't * immediately get a regular .B violation after we * let it through the APC check */ block = false; } /* FIXME: we don't have a good way to express allow * everyone except for the ones on this list while we * could say block = !block that doesn't match the * general meaning of exempt_threat_list */ } } if (block) { /* always using custom attack handling */ /* We cannot let default attack handling take care * of this because a main thread may get affected very early. * * It is also hard to reuse security_violation() call here * (since we are not under dispatch()). If we want to see a * code origins failure, we can just disable this policy. */ ASSERT(!TEST(OPTION_HANDLING, target_policy) && "handling cannot be modified"); SYSLOG_INTERNAL_WARNING("squashed %s %s at bad target pc="PFX" %s", is_apc ? "APC" : "thread", is_win32api ? "win32" : "native", injected_target, name); /* FIXME: case 9023 : should squash appropriately native * vs non-native since the number of arguments may be * different, hence stdcall RET size */ *apc_target_location = is_win32api ? (app_pc)safe_apc_or_thread_target : (app_pc)safe_apc_or_thread_target; squashed = true; } else { /* allow */ app_pc base = (app_pc)PAGE_START(injected_target); SYSLOG_INTERNAL_WARNING("allowing %s %s at bad target pc="PFX" %s", is_apc ? "APC" : "thread", is_win32api ? "win32" : "native", injected_target, name); /* FIXME: for HIGH mode, unfortunately the target code * may be selfmod, so adding a hook-style policy is hard. */ /* FIXME: It looks like in VirusScan (case 2871) they * eventually free this memory, so not that bad hole. * Although I haven't found how would they properly * synchronize that entapi.dll is loaded. */ /* we can't safely determine a subpage region so adding whole page */ add_futureexec_vm_area(base, base + PAGE_SIZE, false/*permanent*/ _IF_DEBUG(is_apc ? "apc_helper" : "thread_policy")); } if (TEST(OPTION_REPORT, target_policy)) { /* report a violation adjusted for appropriate action */ /* FIXME: should come up with a new name for this * violation otherwise it is pretty inconsistent to say * running in detect mode and -B policies */ /* note that we may not actually report if silent_block_threat_list */ security_violation_report(injected_target, APC_THREAD_SHELLCODE_VIOLATION, name, squashed ? ACTION_TERMINATE_THREAD : ACTION_CONTINUE); } DOSTATS({ if (is_apc) STATS_INC(num_used_apc_policy); else STATS_INC(num_used_thread_policy); }); } } /* a helper procedure for reporting ASLR violations */ void aslr_report_violation(app_pc execution_fault_pc, security_option_t handling_policy) { STATS_INC(aslr_wouldbe_exec); /* note OPTION_BLOCK has to be set since there is nothing we can * do to not block the attack, there is no detect mode here, yet * we let the original exception be passed. For default * applications where ASLR can be hit natively, the attack * handling policy is to throw an exception. */ ASSERT(TEST(OPTION_BLOCK, handling_policy)); /* FIXME: yet we should have a choice whether to override the * exception that would normally be delivered to the application, * with a -kill_thread or -kill_process in case the SEH chain is * corrupt, and to allow the attack handling thresholds to take * effect. */ ASSERT(!TEST(OPTION_HANDLING, handling_policy)); /* FIXME: if using report security_violation() to provide attack * handling decisions should make sure it prefers exceptions, * FIXME: make sure not trying to release locks, FIXME: also clean * kstats (currently hotp_only is already broken) */ ASSERT(!TEST(OPTION_CUSTOM, handling_policy)); if (TEST(OPTION_REPORT, handling_policy)) { /* report a violation, adjusted for appropriate action */ char aslr_threat_id[MAXIMUM_VIOLATION_NAME_LENGTH]; /* in -hotp_only mode cannot have the regular distinction * between stack and heap targets (usually marked as .A and * .B), instead marking all as the same .R violation. */ security_violation_t aslr_violation_type = ASLR_TARGET_VIOLATION; /* source cannot be obtained */ /* FIXME: case 8160 on possibly setting the source to something useful */ /* FIXME: target is currently unreadable, forensic and Threat * ID generation will adjust to a likely current mapping to * print its contents */ dcontext_t *dcontext = get_thread_private_dcontext(); /* should be in hotp_only */ ASSERT(dcontext != NULL && dcontext->last_fragment != NULL && dcontext->last_fragment->tag == NULL); /* note we clobber next_tag here, not bothering to preserve */ /* report_dcontext_info() uses next_tag for target (and * preferred target) diagnostics */ dcontext->next_tag = execution_fault_pc; /* if likely_target_pc is unreadable (and it should be) * get_security_violation_name will use as target the contents * of a likely would be target */ get_security_violation_name(dcontext, execution_fault_pc, aslr_threat_id, MAXIMUM_VIOLATION_NAME_LENGTH, aslr_violation_type, NULL); security_violation_report(execution_fault_pc, aslr_violation_type, aslr_threat_id, ACTION_THROW_EXCEPTION); } } #endif /* PROGRAM_SHEPHERDING */ #ifdef STANDALONE_UNIT_TEST # define INT_TO_PC(x) ((app_pc)(ptr_uint_t)(x)) static void print_vector_msg(vm_area_vector_t *v, file_t f, const char *msg) { print_file(f, "%s:\n", msg); print_vm_areas(v, f); } static void check_vec(vm_area_vector_t *v, int i, app_pc start, app_pc end, uint vm_flags, uint frag_flags, void *data) { ASSERT(i < v->length); ASSERT(v->buf[i].start == start); ASSERT(v->buf[i].end == end); ASSERT(v->buf[i].vm_flags == vm_flags); ASSERT(v->buf[i].frag_flags == frag_flags); ASSERT(v->buf[i].custom.client == data); } void vmvector_tests() { vm_area_vector_t v = {0, 0, 0, VECTOR_SHARED | VECTOR_NEVER_MERGE, INIT_READWRITE_LOCK(thread_vm_areas)}; bool res; app_pc start = NULL, end = NULL; print_file(STDERR, "\nvm_area_vector_t tests\n"); /* FIXME: not tested */ vmvector_add(&v, INT_TO_PC(0x100), INT_TO_PC(0x103), NULL); vmvector_add(&v, INT_TO_PC(0x200), INT_TO_PC(0x203), NULL); vmvector_print(&v, STDERR); #if 0 /* this raises no-merge assert: no mechanism to test that it fires though */ vmvector_add(&v, INT_TO_PC(0x202), INT_TO_PC(0x210), NULL); /* should complain */ #endif vmvector_add(&v, INT_TO_PC(0x203), INT_TO_PC(0x221), NULL); vmvector_print(&v, STDERR); check_vec(&v, 2, INT_TO_PC(0x203), INT_TO_PC(0x221), 0, 0, NULL); res = vmvector_remove_containing_area(&v, INT_TO_PC(0x103), NULL, NULL); /* not in */ EXPECT(res, false); check_vec(&v, 0, INT_TO_PC(0x100), INT_TO_PC(0x103), 0, 0, NULL); res = vmvector_remove_containing_area(&v, INT_TO_PC(0x100), NULL, &end); EXPECT(end, 0x103); EXPECT(res, true); vmvector_print(&v, STDERR); check_vec(&v, 0, INT_TO_PC(0x200), INT_TO_PC(0x203), 0, 0, NULL); res = vmvector_remove_containing_area(&v, INT_TO_PC(0x100), NULL, NULL); /* not in */ EXPECT(res, false); vmvector_print(&v, STDERR); res = vmvector_remove_containing_area(&v, INT_TO_PC(0x202), &start, NULL); EXPECT(res, true); EXPECT(start, 0x200); vmvector_print(&v, STDERR); res = vmvector_remove(&v, INT_TO_PC(0x20), INT_TO_PC(0x210)); /* truncation allowed? */ EXPECT(res, true); vmvector_print(&v, STDERR); } /* initial vector tests * FIXME: should add a lot more, esp. wrt other flags -- these only * test no flags or interactions w/ selfmod flag */ void unit_test_vmareas(void) { vm_area_vector_t v = {0,0,0,false}; /* not needed yet: dcontext_t *dcontext = */ ASSIGN_INIT_READWRITE_LOCK_FREE(v.lock, thread_vm_areas); /* TEST 1: merge a bunch of areas */ add_vm_area(&v, INT_TO_PC(1), INT_TO_PC(3), 0, 0, NULL _IF_DEBUG("A")); add_vm_area(&v, INT_TO_PC(5), INT_TO_PC(7), 0, 0, NULL _IF_DEBUG("B")); add_vm_area(&v, INT_TO_PC(9), INT_TO_PC(11), 0, 0, NULL _IF_DEBUG("C")); print_vector_msg(&v, STDERR, "after adding areas"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(3), 0, 0, NULL); check_vec(&v, 1, INT_TO_PC(5), INT_TO_PC(7), 0, 0, NULL); check_vec(&v, 2, INT_TO_PC(9), INT_TO_PC(11), 0, 0, NULL); add_vm_area(&v, INT_TO_PC(0), INT_TO_PC(12), 0, 0, NULL _IF_DEBUG("D")); print_vector_msg(&v, STDERR, "after merging with D"); check_vec(&v, 0, INT_TO_PC(0), INT_TO_PC(12), 0, 0, NULL); /* clear for next test */ remove_vm_area(&v, INT_TO_PC(0), UNIVERSAL_REGION_END, false); print_file(STDERR, "\n"); /* TEST 2: add an area that covers several smaller ones, including one * that cannot be merged */ add_vm_area(&v, INT_TO_PC(1), INT_TO_PC(3), 0, 0, NULL _IF_DEBUG("A")); add_vm_area(&v, INT_TO_PC(5), INT_TO_PC(7), 0, FRAG_SELFMOD_SANDBOXED, NULL _IF_DEBUG("B")); add_vm_area(&v, INT_TO_PC(9), INT_TO_PC(11), 0, 0, NULL _IF_DEBUG("C")); print_vector_msg(&v, STDERR, "after adding areas"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(3), 0, 0, NULL); check_vec(&v, 1, INT_TO_PC(5), INT_TO_PC(7), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 2, INT_TO_PC(9), INT_TO_PC(11), 0, 0, NULL); add_vm_area(&v, INT_TO_PC(2), INT_TO_PC(10), 0, 0, NULL _IF_DEBUG("D")); print_vector_msg(&v, STDERR, "after merging with D"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(5), 0, 0, NULL); check_vec(&v, 1, INT_TO_PC(5), INT_TO_PC(7), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 2, INT_TO_PC(7), INT_TO_PC(11), 0, 0, NULL); remove_vm_area(&v, INT_TO_PC(6), INT_TO_PC(8), false); print_vector_msg(&v, STDERR, "after removing 6-8"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(5), 0, 0, NULL); check_vec(&v, 1, INT_TO_PC(5), INT_TO_PC(6), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 2, INT_TO_PC(8), INT_TO_PC(11), 0, 0, NULL); /* clear for next test */ remove_vm_area(&v, INT_TO_PC(0), UNIVERSAL_REGION_END, false); print_file(STDERR, "\n"); /* TEST 3: add an area that covers several smaller ones, including two * that cannot be merged */ add_vm_area(&v, INT_TO_PC(1), INT_TO_PC(3), 0, FRAG_SELFMOD_SANDBOXED, NULL _IF_DEBUG("A")); add_vm_area(&v, INT_TO_PC(5), INT_TO_PC(7), 0, FRAG_SELFMOD_SANDBOXED, NULL _IF_DEBUG("B")); add_vm_area(&v, INT_TO_PC(9), INT_TO_PC(11), 0, 0, NULL _IF_DEBUG("C")); print_vector_msg(&v, STDERR, "after adding areas"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(3), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 1, INT_TO_PC(5), INT_TO_PC(7), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 2, INT_TO_PC(9), INT_TO_PC(11), 0, 0, NULL); add_vm_area(&v, INT_TO_PC(2), INT_TO_PC(12), 0, 0, NULL _IF_DEBUG("D")); print_vector_msg(&v, STDERR, "after merging with D"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(3), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 1, INT_TO_PC(3), INT_TO_PC(5), 0, 0, NULL); check_vec(&v, 2, INT_TO_PC(5), INT_TO_PC(7), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 3, INT_TO_PC(7), INT_TO_PC(12), 0, 0, NULL); remove_vm_area(&v, INT_TO_PC(2), INT_TO_PC(11), false); print_vector_msg(&v, STDERR, "after removing 2-11"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(2), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 1, INT_TO_PC(11), INT_TO_PC(12), 0, 0, NULL); /* FIXME: would be nice to be able to test that an assert is generated... * say, for this: * add_vm_area(&v, INT_TO_PC(7), INT_TO_PC(12), 0, FRAG_SELFMOD_SANDBOXED, NULL _IF_DEBUG("E")); */ /* clear for next test */ remove_vm_area(&v, INT_TO_PC(0), UNIVERSAL_REGION_END, false); print_file(STDERR, "\n"); /* TEST 4: add an area completely inside one that cannot be merged */ add_vm_area(&v, INT_TO_PC(1), INT_TO_PC(5), 0, FRAG_SELFMOD_SANDBOXED, NULL _IF_DEBUG("A")); print_vector_msg(&v, STDERR, "after adding areas"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(5), 0, FRAG_SELFMOD_SANDBOXED, NULL); add_vm_area(&v, INT_TO_PC(3), INT_TO_PC(4), 0, 0, NULL _IF_DEBUG("B")); print_vector_msg(&v, STDERR, "after merging with B"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(5), 0, FRAG_SELFMOD_SANDBOXED, NULL); /* clear for next test */ remove_vm_area(&v, INT_TO_PC(0), UNIVERSAL_REGION_END, false); print_file(STDERR, "\n"); /* TEST 5: Test merging adjacent areas. */ add_vm_area(&v, INT_TO_PC(1), INT_TO_PC(2), 0, 0, NULL _IF_DEBUG("A")); add_vm_area(&v, INT_TO_PC(2), INT_TO_PC(3), 0, 0, NULL _IF_DEBUG("B")); add_vm_area(&v, INT_TO_PC(3), INT_TO_PC(4), 0, 0, NULL _IF_DEBUG("C")); print_vector_msg(&v, STDERR, "do areas merge"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(4), 0, 0, NULL); remove_vm_area(&v, INT_TO_PC(1), INT_TO_PC(4), false); add_vm_area(&v, INT_TO_PC(1), INT_TO_PC(2), 0, 0, NULL _IF_DEBUG("A")); add_vm_area(&v, INT_TO_PC(2), INT_TO_PC(3), 0, FRAG_SELFMOD_SANDBOXED, NULL _IF_DEBUG("B")); add_vm_area(&v, INT_TO_PC(3), INT_TO_PC(4), 0, 0, NULL _IF_DEBUG("C")); print_vector_msg(&v, STDERR, "do areas merge with flags"); check_vec(&v, 0, INT_TO_PC(1), INT_TO_PC(2), 0, 0, NULL); check_vec(&v, 1, INT_TO_PC(2), INT_TO_PC(3), 0, FRAG_SELFMOD_SANDBOXED, NULL); check_vec(&v, 2, INT_TO_PC(3), INT_TO_PC(4), 0, 0, NULL); vmvector_tests(); } #endif /* STANDALONE_UNIT_TEST */
1
10,598
This is declared in the wrong block
DynamoRIO-dynamorio
c